text stringlengths 0 1.05M | meta dict |
|---|---|
AT_DEFAULT_CLASSES = ['at-status', 'tooltipped', 'tooltipped-nw']
AT_HINT_TEMPLATE = 'Last {last_nucliotides_count} bases at the 3`' \
+ 'end {dont}include {residue_length} A or T residues'
def generate_A_T_hint(checkStatus, type):
dont = '' if checkStatus else 'don`t '
last_nucliotides_count = ''
residue_length = ''
if type == 'at5':
last_nucliotides_count = '5'
residue_length = '2-3'
elif type == 'at3':
last_nucliotides_count = '3'
residue_length = '1-2'
elif type == 'at2':
last_nucliotides_count = '2'
residue_length = '1'
else:
raise Exception('unsupported type in generate_A_T_hint')
return AT_HINT_TEMPLATE.format(
last_nucliotides_count=last_nucliotides_count,
dont=dont,
residue_length=residue_length
)
def generate_A_T_classes(checkStatus):
at_classes = AT_DEFAULT_CLASSES + ['at-status_green' if checkStatus else 'at-status_red']
return ' '.join(at_classes)
def generate_A_T_status(checkStatus, type):
return {
'classes': generate_A_T_classes(checkStatus),
'hint': generate_A_T_hint(checkStatus, type)
}
def map_to_view(primers):
results = []
for primer in primers:
d = {}
d['sequence'] = primer['sequence']
d['length'] = primer['length']
d['tm'] = primer['tm']
at_statuses = [
generate_A_T_status(primer['at5'], 'at5'),
generate_A_T_status(primer['at3'], 'at3'),
generate_A_T_status(primer['at2'], 'at2')
]
d['at_statuses'] = at_statuses
results.append(d)
return results
def build_primers_view(primers):
return {
'forwards': map_to_view(primers['forwards']),
'reverses': map_to_view(primers['reverses'])
}
| {
"repo_name": "ujenjt/miprimer",
"path": "view.py",
"copies": "1",
"size": "1841",
"license": "mit",
"hash": 3959264833405796400,
"line_mean": 25.6811594203,
"line_max": 93,
"alpha_frac": 0.5817490494,
"autogenerated": false,
"ratio": 3.1906412478336224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9271619406083452,
"avg_score": 0.0001541782300339192,
"num_lines": 69
} |
# ATDM Assignment 1
# Jens Raaby, May 2013
import sampling
def main():
bn_given = sampling.BN()
bn_given.add_node("x1",[],{(True,): 0.3})
bn_given.add_node("x2",[],{(True,): 0.2})
bn_given.add_node("x3",[],{(True,): 0.5})
bn_given.add_node("x4",["x1","x2","x3"],{(False,False,False): 0.05,
(False,True, False): 0.7,
(True, False,False): 0.3,
(True, True, False): 0.9,
(False,False,True): 0.5,
(False,True, True): 0.75,
(True, False,True): 0.7,
(True, True, True): 0.95})
bn_given.add_node("x5",["x1","x3"],{(False,False):0.05,
(False,True): 0.8,
(True,False): 0.07,
(True,True): 0.8})
bn_given.add_node("x6",["x4"],{(False,):0.2,(True,): 0.7})
bn_given.add_node("x7",["x4","x5"],{(False,False):0.1,
(False,True): 0.3,
(True,False): 0.3,
(True,True): 0.7})
# print_graph.BNtoPNG(bn_given,'given.png')
print bn_given.joint_probability()
# Try out the joint sample and take an ancestral sample
bn_given.print_joint_probability()
bn_given.print_ancestral_sample()
bn_given.reset_sampling()
print "\n/////////////////\n"
# Try markov blanket sampling:
print "Markov Blanket Sampling X5"
blanket_nodes = {"x1":False, "x3": True, "x4": False, "x7": True}
v = bn_given.markov_blanket_sampling("x5",blanket_nodes)
print "Sampled %s" % v
print "\n/////////////////\n"
if __name__ == '__main__':
main()
| {
"repo_name": "jensraaby/AdvancedTopicsDataModelling",
"path": "BayesianNetworks/run.py",
"copies": "1",
"size": "2131",
"license": "mit",
"hash": -1457039912658071800,
"line_mean": 35.7413793103,
"line_max": 72,
"alpha_frac": 0.3749413421,
"autogenerated": false,
"ratio": 3.7918149466192173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9379082752140863,
"avg_score": 0.05753470731567094,
"num_lines": 58
} |
# ATDM Bayesian networks
# Jens Raaby, May 2013
"""
This file contains the code for Bayesian Networks with Binary Random Variables
Ancestral sampling and markov blanket sampling are implemented
"""
# We use the built in random generator and ordered dictionary libraries
import random
from collections import OrderedDict
class BN:
""" A simple Bayesian network class.
Stores nodes in a dictionary (str name -> BinaryRandomVariable)
Edges are encoded in the nodes themselves
"""
def __init__(self):
# This dictionary keeps nodes, ordered by insertion
self._nodes = OrderedDict()
# This dictionary keeps the ancestral ordering for the nodes
self.ordering = None
def add_node(self, name, parents, pt):
"""Creates a random variable and inserts as a node"""
if name in self._nodes:
raise Exception("A random variable already exists with the name (%s)" % name)
# we validate the parents, while adding references to list 'ps'
# ps must be a list so the order is maintained
ps = list()
for p in parents:
if p not in self._nodes:
raise Exception("Error adding %s: Parent %s does not exist" % (name,p))
else:
ps.append(self._nodes[p])
# create the node
n = BinaryRandomVariable(name,ps,pt)
# we add child references to the parents
for p in ps:
p.add_child(name)
# insert the node
self._nodes[name] = n
def get_binary_random_variable(self, name):
"""Returns the node for a given random variable"""
return self._nodes[name]
def observe_node(self,node,value):
"""Sets the value of the given node"""
self._nodes[node].observe(value)
def observe_nodes(self,node_values={}):
"""Sets several values to the given observed values.
Input is a dict from node name to observation (True or False)"""
for n,v in node_values.items():
self.observe_node(n,v)
def sample_node(self,node):
"""Samples the given node using a random number"""
return self._nodes[node].sample()
def joint_probability(self):
"""
Compute joint probability of all nodes.
This is done using the equation:
p(\mathbf{X}) = \prod_{i=1}^{N} p(\mathbf{X}_i | pa_i)
"""
if self.ordering is None:
self.validate()
px = 1.0
# we iterate over the nodes in ancestral order
for k in self.ordering:
node = self._nodes[k]
if node.is_root():
# no ancestors involved
px *= node.p()
else:
# generate the probability conditions for all parents being true:
conditions = tuple([True for i in xrange(len(node.parents))])
# get the probability of this node given all parents sampled True
px *= node.p(conditions)
return px
def print_joint_probability(self):
"""Computes and prints the joint probability for the network"""
jp = self.joint_probability()
print "Joint probability: \n\tp(%s) = %s" % (','.join([n for n in self._nodes.keys()]), jp)
return jp
def ancestral_sample(self, selected=None):
"""Assigns values to all variables using ancestral sampling
If selected contains a list of nodes, then only their assignments will be returned."""
if self.ordering is None:
self.validate()
if selected is None:
for k in self.ordering:
node = self._nodes[k]
node.sample()
return [(name,n.sample()) for (name,n) in self._nodes.items() ]
else:
# if only interested in a subset of nodes,
# stop sampling process when they have been sampled
remaining = list(selected)
for k in self.ordering:
node = self._nodes[k]
node.sample()
if k in remaining:
remaining.remove(k)
# check if further sampling needed
if len(remaining) == 0:
return [(name, self.sample_node(name)) for name in selected ]
def print_ancestral_sample(self):
sampling = self.ancestral_sample()
print "Ancestral sample: \n \t %s" % '\n\t '.join(["%s: %s"%(n,v) for (n,v) in sampling])
return sampling
def print_multi_sample(self,N):
"""
Performs N ancestral samples and computes the frequencies of each state.
The results are printed (along with proportions of the total)
"""
stats = {}
for n in xrange(N):
sample = self.ancestral_sample()
count = stats.setdefault(tuple(sample),0)
stats[tuple(sample)] += 1
self.reset_sampling()
stats = OrderedDict(sorted(stats.items(), key=lambda x: x[1],reverse=True))
print "Frequencies after %s samples: \n\t" %N, "\t".join(["%80s: %4s (%4s)\n" % (sample,stats[sample],stats[sample]/float(N)) for sample in stats.keys() ])
return stats
def markov_blanket(self,node):
"""
Identifies the markov blanket of a given variable.
This is the set of the parents, the children and the co-parents
of the children.
"""
if self.ordering is None:
self.validate()
n = self._nodes[node] # raises exception for missing node
mb = set()
for p in n.parents:
mb.add(p.name)
for c in n.children:
mb.add(c)
cps = ([p.name for p in self._nodes[c].parents])
for cp in cps:
if not cp == node:
mb.add(cp)
return mb
def markov_blanket_sampling(self,node,assignments):
"""Generates a sample for a given variable given the
markov blanket assignments.
Takes the variable name, and the assignments to variables.
The blanket variables should be assigned, but this isn't checked
"""
n = self._nodes[node]
mb = self.markov_blanket(node)
# set the interest node to true for the sampling
n.observe(True)
print n,n._set
# Set the assignments
self.observe_nodes(assignments)
numerator = (n.get_probability())
for p in n.children:
# print p,": %s" % self.get_node(p).get_probability()
numerator *= self.get_node(p).get_probability()
# print numerator
# set the variable to false
n.observe(False)
p_false = 1 - n.get_probability()
# print "Prob false %s" % p_false
p_not = n.get_probability() * self.product([self.get_node(p).get_probability() for p in n.children])
# set the variable to true
n.observe(True)
# print "Prob true %s" % n.get_probability()
p = n.get_probability() * self.product([self.get_node(p).get_probability() for p in n.children])
denominator = p_not + p
p_n_given_mb = (numerator/denominator)
print "p(%s=True | %s) = %s" % (node,assignments, p_n_given_mb)
rp = random.uniform(0.0,1.0)
val = rp < p_n_given_mb
n.observe(val)
return val
def rejection_sampling(self,evidence={},N=100):
"""
If any variables are observed, then any samples
which do not agree are ignored until we find one that does
Note that if no variables are observed this is equivalent to
ancestral_sample()
evidence is the dictionary of assignments (can be empty)
N is the number of samples to generate
"""
print "Rejection sampling given evidence: \n\t"
print "\n".join(["%20s: %10s" % (v,a) for v,a in evidence.items()])
e_nodes = evidence.keys()
stats = {}
N_orig = N
N_attempts = 0
while N>0:
failed = False
self.reset_sampling()
s = self.ancestral_sample()
# verify against evidence:
for n in e_nodes:
samples = dict((x,y) for x, y in s)
if not samples[n] == evidence[n]:
failed = True
if not failed:
count = stats.setdefault(tuple(s),0)
stats[tuple(s)] += 1
N -= 1
N_attempts +=1
stats = OrderedDict(sorted(stats.items(), key=lambda x: x[1],reverse=True))
print "Rejected %s samples out of %s" %(N_attempts-N_orig,N_attempts)
print "Sample frequencies after %s samples: \n\t" %N_orig, "\t".join(["%80s: %4s (%4s)\n" % (sample,stats[sample],stats[sample]/float(N_orig)) for sample in stats.keys() ])
return stats
def reset_sampling(self):
"""
Removes all observations from the network
"""
for (name,rv) in self._nodes.items():
rv.reset()
def reset_node(self,node):
self._nodes[node].reset()
def print_ancestral_order(self):
print "Ancestral ordering: \n\t", self.ordering
# Utility functions
def number_nodes(self):
return len(self._nodes)
def validate(self):
"""
Sets the ancestral order for all the nodes
"""
if not self.ordering is None:
return
print "Setting ancestral order for network"
self.ordering = {}
nextID = 1
roots = []
for i in self._nodes:
if self._nodes[i].is_root():
roots.append(i)
self.ordering[i] = nextID
self._nodes[i].set_order(nextID)
nextID += 1
# order the next level of nodes
self.order_nodes(roots,nextID)
def order_nodes(self,parents,nextID):
""" Recursive method for setting ancestral order for
all the descendant nodes for the given parents"""
nextlevel = []
for n in parents:
for c in self._nodes[n].children:
# only assign once for each node
if not c in nextlevel:
nextlevel.append(c)
# order the nextlevel:
for p in nextlevel:
self.ordering[p] = nextID
self._nodes[p].set_order(nextID)
nextID += 1
# recursive call:
if len(nextlevel) > 1:
self.order_nodes(nextlevel,nextID)
def node_sampled(self,node):
"""
Return true is the given node is sampled or observed
"""
return not self._nodes[node]._set is None
def get_node(self,node):
return self._nodes[node]
def product(self,numbers):
"""
"""
r = 1.0
for n in numbers:
r *= n
return r
def __str__(self):
return "BN (%s nodes)" % self.number_nodes()
class BinaryRandomVariable:
""" A BinaryRandomVariable is a random variable that can take 2 states:
- True or False - with some probability p(True)
The variable can have N parents, in which case the probability table (PT)
must have size 2^N. That is, you must enumerate the probability of this variable
being true, given all the combinations of the parent variables
"""
def __init__(self, name, parents=list(), pt={}):
self.name = name
self.parents = parents
# number of children can vary, so make it an array
self.children = []
# verify the PT dimensions
# Since this is a binary variable there are 2^N (N = num. parents)
if not (len(pt) == 2**len(parents) ):
raise Exception("Wrong size probability table for %s parents, should be %s" % (len(parents),2**len(parents)))
# store the probability table, set the initial state to indicate unsampled
self.pt = pt
self._set = None
def is_root(self):
return len(self.parents) == 0
def is_leaf(self):
return len(self.children) == 0
def set_order(self,order_index):
"""Stores the topographical order of this node locally
We assume that all probability tuples are ordered using this order field.
It's mainly for bookkeeping that we store this
"""
self._order = order_index
def observe(self,value):
"""
Observes the value of this node with the given value.
"""
if not isinstance(value,bool):
raise Exception("Binary variables can only be observed as True or False")
self._set = value
def p(self,conditions={}):
"""
Get the probability of this event, given conditions
(if this is a root node, then just returns p(true))
The conditions should be a tuple of truth values ordered by parents.
Note the order of the conditions is assumed to be the same as the order
used when creating the random variable.
"""
if self.is_root():
return self.pt[(True,)]
else:
# we do still have a risk that the parents were supplied out of order
return self.pt[conditions]
def sample(self):
"""Take a sample for this node. Generated using PRNG and ancestral sampling"""
# generate a random probability
rp = random.uniform(0.0,1.0)
# if the node was already sampled, we just return that value
if not self._set is None:
return self._set
if self.is_root():
self._set = rp < self.pt[(True,)]
return self._set
# when there are parents:
samples = [None for p in self.parents]
for i in xrange(len(self.parents)):
samples[i] = self.parents[i].sample()
# look up the probability based on the parents samples
conditions = tuple(samples)
self._set = rp < self.pt[conditions]
return self._set
def get_probability(self):
"""
Similar to sample(), but just returns probability based on parents and current set state
"""
if self.is_root():
return self.pt[(True,)]
# when there are parents:
samples = [None for p in self.parents]
for i in xrange(len(self.parents)):
samples[i] = self.parents[i].sample()
# look up the probability based on the parents samples
conditions = tuple(samples)
# if this variable is set, then return the prob:
if not self._set is None:
if self._set:
return self.pt[conditions]
else:
return 1-self.pt[conditions]
# otherwise just the prob that it is true given the ancestors:
return self.pt[conditions]
def reset(self):
"""Clear any sampled observation"""
self._set = None
def parents_orders(self):
"""Get the ancestral ordering for the parents.
Not currently used, but might be useful in future"""
return [p._order for p in self.parents]
def add_child(self, child):
"""Adds a child to the node. Never adds duplicates"""
if not child in set(self.children):
self.children.append(child)
def __str__(self):
if self.is_root():
return "Root: p(%s) = %s" % (self.name,self.pt[(True,)])
return "Node: p(%s | %s)" % (self.name, [p.name for p in self.parents])
| {
"repo_name": "jensraaby/AdvancedTopicsDataModelling",
"path": "BayesianNetworks/sampling.py",
"copies": "1",
"size": "16214",
"license": "mit",
"hash": -8626689307434455000,
"line_mean": 32.8496868476,
"line_max": 180,
"alpha_frac": 0.5459479462,
"autogenerated": false,
"ratio": 4.345751809166443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5391699755366444,
"avg_score": null,
"num_lines": null
} |
# ATDM Bayesian networks
# Jens Raaby, May 2013
# This script runs some examples using the popular wet grass network
import sampling
def main():
# We create the network used in the Sampling and MCMC lecture slides
bn = sampling.BN()
bn.add_node("Cloudy",[],{(True,): 0.5})
bn.add_node("Sprinkler",["Cloudy"],{(True,): 0.1, (False,): 0.5})
bn.add_node("Rain",["Cloudy"],{(False,):(0.2),(True,): (0.8)})
bn.add_node("WetGrass",["Sprinkler","Rain"],{(False,False):0.01, # otherwise we can get divide by zero
(False,True): 0.9,
(True,False): 0.9,
(True,True): 0.99})
bn.validate()
# Try out the joint sample and take an ancestral sample
bn.print_joint_probability()
bn.print_ancestral_sample()
bn.reset_sampling()
print "Ancestral sampling for only Cloudy and Sprinkler: \n%s" % bn.ancestral_sample(["Cloudy","Sprinkler"])
print "\n/////////////////\n"
# Perform lots of samples, printing the results as a frequency table
bn.print_multi_sample(10000)
print "\n/////////////////\n"
# Get the markov blanket for Rain
mb = bn.markov_blanket("Rain")
print "The markov blanket for Rain is \n\t%s" % mb
# Try markov blanket sampling:
blanket_nodes = {"Cloudy":False, "WetGrass": True, "Sprinkler": False}
bn.markov_blanket_sampling("Rain",blanket_nodes)
print "\n/////////////////\n"
obs = {"Cloudy":True, "Rain": False}
# try rejection sampling:
bn.rejection_sampling(blanket_nodes,500)
if __name__ == '__main__':
main()
| {
"repo_name": "jensraaby/AdvancedTopicsDataModelling",
"path": "BayesianNetworks/wetgrass.py",
"copies": "1",
"size": "1721",
"license": "mit",
"hash": 8525560771570223000,
"line_mean": 34.8541666667,
"line_max": 112,
"alpha_frac": 0.553747821,
"autogenerated": false,
"ratio": 3.435129740518962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.925872030155954,
"avg_score": 0.04603145199188433,
"num_lines": 48
} |
"""A Team Cowboy API client."""
import hashlib
import httplib
import json
import random
import sys
import time
import urllib
class TeamCowboyException(Exception):
"""A Team Cowboy error."""
pass
class TeamCowboy(object):
"""A Team Cowboy API client."""
def __init__(self, public_key, private_key):
"""Creates a new client.
Args:
public_key: The API public key.
private_key: The API private key.
"""
self._public_key = public_key
self._private_key = private_key
self._host = 'api.teamcowboy.com'
self._path = '/v1/'
def auth_get_user_token(self, username, password):
"""Gets an authentication token for the given user.
Args:
username: The username.
password: The password.
Returns:
An authentication token for the given user.
"""
params = {
'username': username,
'password': password
}
return self._send('POST', 'Auth_GetUserToken', params, True)
def test_get_request(self, test_param=''):
"""Sends a GET request to the test endpoint.
Args:
test_param: An optional string to send.
Returns:
A dict containing a 'helloWorld' attribute and, optionally, a 'testParam'
attribute.
"""
params = {'testParam': test_param}
return self._send('GET', 'Test_GetRequest', params)
def test_post_request(self, test_param=''):
"""Sends a POST request to the test endpoint.
Args:
test_param: An optional string to send.
Returns:
A dict containing a 'helloWorld' attribute and, optionally, a 'testParam'
attribute.
"""
params = {'testParam': test_param}
return self._send('POST', 'Test_PostRequest', params)
def user_get_teams(self, user_token, dashboard_teams_only=False):
params = {
'userToken': user_token['token'],
'dashboardTeamsOnly': '1' if dashboard_teams_only else '0'
}
return self._send('GET', 'User_GetTeams', params)
def team_get_events(
self, user_token, team_id, season_id=None, filter_type='future',
start_date_time=None, end_date_time=None, offset=0, qty=10,
include_rsvp_info=False):
"""Gets a list of events for a given team.
Args:
user_token: The user's auth token.
team_id: The team ID.
season_id: The season ID.
filter_type: The search filter type.
start_date_time: The start date from when to search.
end_date_time: The end date until when to search.
offset: The event offset.
qty: The number of games to fetch.
include_rsvp_info: Whether to include RSVP info.
Returns:
A list of events matching the given criteria.
"""
params = {
'userToken': user_token['token'],
'teamId': str(team_id),
'seasonId': str(season_id) if season_id else '',
'includeRSVPInfo': 'true' if include_rsvp_info else '',
'filter': filter_type,
'startDateTime': start_date_time if start_date_time else '',
'endDateTime': end_date_time if end_date_time else '',
'offset': str(offset),
'qty': str(qty)
}
return self._send('GET', 'Team_GetEvents', params)
def _send(self, http_method, tc_method, params, use_https=False):
"""Prepares and sends a request to Team Cowboy.
Args:
http_method: The HTTP method to use.
tc_method: The Team Cowboy method name.
params: The method parameters.
use_https: Whether to use HTTPS.
Returns:
A dict with response data.
Raises:
TeamCowboyException: When an error is returned.
"""
params.update({
'api_key': self._public_key,
'method': tc_method,
'timestamp': str(int(time.time())),
'nonce': str(self._generate_nonce())
})
params['sig'] = self._generate_signature(params, http_method)
data = urllib.urlencode(params)
if use_https:
http = httplib.HTTPSConnection(self._host)
else:
http = httplib.HTTPConnection(self._host)
if http_method == 'POST':
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
http.request(http_method, self._path, data, headers)
else:
http.request(http_method, '%s?%s' % (self._path, data))
response = http.getresponse()
result = json.loads(response.read())
http.close()
body = result['body']
if 'error' in body:
raise TeamCowboyException(body)
return body
def _generate_nonce(self):
"""Generates a one-time-use number.
Team Cowboy allows any value, so long as it is unique (it is unclear how
uniqueness is defined).
Returns:
A one-time-use number.
"""
return random.randint(10000000, sys.maxint)
def _generate_signature(self, params, method):
"""Generates a request signature.
Args:
params: A dict of request parameters.
method: The HTTP method of the request.
Returns:
A signature for the HTTP request.
"""
encoded = [
'%s=%s' % (urllib.quote(i).lower(), urllib.quote(params[i]).lower())
for i in sorted(params)]
s = '%s|%s|%s|%s|%s|%s' % (
self._private_key, method, params['method'], params['timestamp'],
params['nonce'], '&'.join(encoded))
return hashlib.sha1(s).hexdigest().lower()
| {
"repo_name": "kjiwa/sportszone-exporter",
"path": "teamcowboy.py",
"copies": "1",
"size": "5274",
"license": "mit",
"hash": 2379251952279876000,
"line_mean": 26.3264248705,
"line_max": 79,
"alpha_frac": 0.6224876754,
"autogenerated": false,
"ratio": 3.6778242677824267,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4800311943182427,
"avg_score": null,
"num_lines": null
} |
# A tee for TCP, similar to `socal -v`.
#
# | server
# client ---|
# | stdout
import socket
from select import select
import sys
import logging
import socks
class TcpTee:
def __init__(self, source_port, destination, proxy = None):
self.destination = destination
self.teesock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.teesock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.teesock.bind(('127.0.0.1', source_port))
self.teesock.listen(200)
socks.set_default_proxy(proxy['type'], proxy['server'], proxy['port'])
self.use_proxy = True
# Linked client/server sockets in both directions
self.channel = {}
def run(self):
while 1:
inputready, outputready, exceptready = select([self.teesock] + self.channel.keys(), [], [])
for s in inputready:
if s == self.teesock:
self.on_accept()
break
data = s.recv(4096)
if not data:
self.on_close(s)
break
self.on_recv(s, data)
def on_accept(self):
clientsock, clientaddr = self.teesock.accept()
if self.use_proxy:
serversock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
else:
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serversock.connect(self.destination)
except Exception:
print 'Could not connect to server %s. Closing connection to client %s' % (self.destination, clientaddr)
clientsock.close()
else:
print "%r has connected, connected to server %s" % (clientaddr, self.destination)
self.channel[clientsock] = serversock
self.channel[serversock] = clientsock
def on_close(self, sock):
print "%s has disconnected" % str(sock.getpeername())
othersock = self.channel[sock]
sock.close()
othersock.close()
del self.channel[sock]
del self.channel[othersock]
def on_recv(self, sock, data):
#print data
self.channel[sock].send(data)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("listen_port", help="The port this process will listen on.", type=int)
parser.add_argument("server_host", help="The remote host to connect to.")
parser.add_argument("server_port", help="The remote port to connect to.", type=int)
args = parser.parse_args()
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
proxy = {
"type": socks.SOCKS5,
"server": "127.0.0.1",
"port": 8000
}
tee = TcpTee(int(args.listen_port), (args.server_host, int(args.server_port)), proxy)
try:
tee.run()
except KeyboardInterrupt:
logging.info("Ctrl C - Good Bye")
sys.exit(1) | {
"repo_name": "the729/sync-over-the-wall",
"path": "tcptee_proxy.py",
"copies": "1",
"size": "2986",
"license": "mit",
"hash": -7445304767545405000,
"line_mean": 31.1182795699,
"line_max": 116,
"alpha_frac": 0.5880776959,
"autogenerated": false,
"ratio": 3.7845373891001266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.98562629913597,
"avg_score": 0.0032704187280852863,
"num_lines": 93
} |
"""A teeny Flask app to demonstrate how Grift can be used
Install requirements:
pip install -r requirements.txt
Start the server:
python app.py
Start the server in DEBUG mode:
DEBUG=1 python app.py
Once the server is running, try:
curl http://localhost:5000/varz
curl http://localhost:5000/check/grift
"""
import logging
import flask
from config import app_config
app = flask.Flask(__name__)
# Set the debug level and configure logging
app.debug = app_config.DEBUG
app.logger.setLevel(app_config.LOG_LEVEL)
# NOT IMPLEMENTED:
# - Use a Postgres database, using app_config.DATABASE_URL as the
# database url
# - Use another API with app_config.API_TOKEN and app_config.API_URL
@app.route('/check/<name>')
def check(name):
"""Check if a thing is awesome"""
app.logger.info('Checking if %s is awesome', name)
return '{} is awesome!!\n'.format(name)
@app.route('/varz')
def varz():
"""Expose non-sensitive settings"""
return flask.json.jsonify(app_config.varz)
if __name__ == '__main__':
# Run the app on the configured port
app.run(port=app_config.PORT)
| {
"repo_name": "cranti/grift",
"path": "examples/flask_app/app.py",
"copies": "1",
"size": "1108",
"license": "apache-2.0",
"hash": -5103685061938468000,
"line_mean": 20.7254901961,
"line_max": 68,
"alpha_frac": 0.7003610108,
"autogenerated": false,
"ratio": 3.307462686567164,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4507823697367164,
"avg_score": null,
"num_lines": null
} |
""" A template Class for top-level experimental runs. """
import os
from numpy.random import RandomState
from multiprocessing import Pool
from simfMRI.io import write_hdf, get_model_names
from simfMRI.analysis.plot import hist_t
from simfMRI.mapreduce import create_chunks, reduce_chunks
from simfMRI.misc import process_prng
class Run():
""" A template for an experimental run. """
def __init__(self):
# ----
# An instance of simfMRI.examples.* Class (or similar)
# should go here.
self.BaseClass = None ## = BaseClass()
# ----
# User Globals
self.nrun = None
self.TR = None
self.ISI = None
self.model_conf = None
self.savedir = None
self.ntrial = None
# --
# Optional Globals
self.ncore = None
# ----
# Misc
self.prngs = None ## A list of RandomState() instances
## setup by the go() attr
def __call__(self, (names, prng)):
return self._singleloop((names, prng))
def _single(self, name, prng):
""" Using the BaseClass attribute run a simulation exp named
<name> using the given prng. Returns a dictionary of results. """
print("Experiment {0}.".format(name))
exp = self.BaseClass(self.ntrial, TR=self.TR, ISI=self.ISI, prng=prng)
exp.populate_models(self.model_conf)
return exp.run(name)
def _singleloop(self, (names, prng)):
""" Loop over <names> and run an Exp for each. Each Exp() uses
prng, a RandomState(). Returns a list of results dictionaries. """
return [self._single(name, prng) for name in names]
def go(self, parallel=False):
""" Run an experimental run, results are stored the
results attribute. """
if parallel:
# ----
# Setup chunks and seeds
self.run_chunks = create_chunks(self.nrun, self.ncore)
self.prngs = [process_prng(ii+10) for ii in range(
len(self.run_chunks))]
# ----
# Create a pool, and use it,
# and store the results
pool = Pool(self.ncore)
results_in_chunks = pool.map(self, zip(self.run_chunks, self.prngs))
## Calling self here works via __call__
self.results = reduce_chunks(results_in_chunks)
else:
# Run an experimental Run, and save to
# self.results
self.prngs = [process_prng(42), ]
self.results = self._singleloop((range(self.nrun), self.prngs[0]))
## Calling self here works via __call__
def save_results(self, name):
""" Save results as <name> in the dir specified in the
savedir attribute. """
# ----
# Create savedir if needed
try:
os.mkdir(self.savedir)
except OSError:
pass
print("Writing results to disk.")
savepath = os.path.join(self.savedir, name+".hdf5")
write_hdf(self.results, savepath)
| {
"repo_name": "parenthetical-e/simfMRI",
"path": "runclass.py",
"copies": "1",
"size": "3264",
"license": "bsd-2-clause",
"hash": 2605480870608707600,
"line_mean": 29.5140186916,
"line_max": 80,
"alpha_frac": 0.5367647059,
"autogenerated": false,
"ratio": 4.147395171537484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5184159877437484,
"avg_score": null,
"num_lines": null
} |
""" A template for creating commands.
author: Brian Schrader
since: 2016-01-13
"""
import copy, collections
from .tokens import Input, Output, FileToken, PathToken, CommentToken
from .command import Command
class Ticker(object):
def __init__(self, maxlen, value=0):
self.maxlen = maxlen
self.value = value
def tick(self, n=1):
self.value += n
if self.value >= self.maxlen:
self.value -= self.maxlen
class CommandTemplate(Command):
def __init__(self, alias, parts=[], dependencies=[]):
self.alias = alias
self.parts = parts
self._dependencies = dependencies
def __repr__(self):
return '<CommandTemplate: {}, {} part(s), {} dep(s)>'.format(self.alias,
len(self.parts), len(self._dependencies))
@property
def depends_on(self):
""" Returns a list of command template aliases that the given command
template depends on.
"""
return [dep.alias for dep in self._dependencies]
@property
def file_parts(self):
""" Returns a list of the file tokens in the list of parts. """
return _search_for_files(self.parts)
def eval(self):
""" Returns a list of Command objects that can be evaluated as their
string values. Each command will track it's preliminary dependencies,
but these values should not be depended on for running commands.
"""
max_size = _get_max_size(self.parts)
parts_list = _grow([[]], max_size-1)
counter = Ticker(max_size)
parts = self.parts[:]
while len(parts) > 0:
parts_list, counter = _get_parts_list(parts,
parts_list, counter)
commands = []
for i, parts in enumerate(parts_list):
alias = self._get_alias(i+1)
new_parts = copy.deepcopy(parts)
commands.append(Command(alias=alias, parts=new_parts))
return commands
def _get_alias(self, index):
""" Given an index, return the string alias for that command. """
return '{}.{}'.format(self.alias, index)
def _get_parts_list(to_go, so_far=[[]], ticker=None):
""" Iterates over to_go, building the list of parts. To provide
items for the beginning, use so_far.
"""
try:
part = to_go.pop(0)
except IndexError:
return so_far, ticker
# Lists of input groups
if isinstance(part, list) and any(isinstance(e, list) for e in part):
while len(part) > 0:
so_far, ticker = _get_parts_list(part, so_far, ticker)
ticker.tick()
# Input Group
elif isinstance(part, list) and any(isinstance(e, Input) for e in part):
while len(part) > 0:
so_far, ticker = _get_parts_list(part, so_far, ticker)
# Magic Inputs
elif isinstance(part, Input) and part.is_magic:
inputs = part.eval()
while len(inputs) > 0:
so_far, ticker = _get_parts_list(inputs, so_far, ticker)
ticker.tick()
# Normal inputs
elif isinstance(part, Input) and not part.is_magic:
so_far[ticker.value].append(part)
# Everything else
else:
so_far = _append(so_far, part)
return so_far, ticker
def _get_max_size(parts, size=1):
""" Given a list of parts, find the maximum number of commands
contained in it.
"""
max_group_size = 0
for part in parts:
if isinstance(part, list):
group_size = 0
for input_group in part:
group_size += 1
if group_size > max_group_size:
max_group_size = group_size
magic_size = _get_magic_size(parts)
return max_group_size * magic_size
def _get_magic_size(parts, size=1):
for part in parts:
if isinstance(part, Input) and part.is_magic:
magic_size = len(part.eval())
if magic_size > size:
return magic_size
elif isinstance(part, list):
size = _get_magic_size(part, size)
return size
def _append(so_far, item):
""" Appends an item to all items in a list of lists. """
for sub_list in so_far:
sub_list.append(item)
return so_far
def _grow(list_of_lists, num_new):
""" Given a list of lists, and a number of new lists to add, copy the
content of the first list into the new ones, and add them to the list
of lists.
"""
first = list_of_lists[0]
for i in range(num_new):
list_of_lists.append(copy.deepcopy(first))
return list_of_lists
def _search_for_files(parts):
""" Given a list of parts, return all of the nested file parts. """
file_parts = []
for part in parts:
if isinstance(part, list):
file_parts.extend(_search_for_files(part))
elif isinstance(part, FileToken):
file_parts.append(part)
return file_parts
| {
"repo_name": "Sonictherocketman/metapipe",
"path": "metapipe/models/command_template.py",
"copies": "2",
"size": "4897",
"license": "mit",
"hash": -2550317245633691000,
"line_mean": 28.6787878788,
"line_max": 80,
"alpha_frac": 0.5942413723,
"autogenerated": false,
"ratio": 3.7756360832690823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006653886209441765,
"num_lines": 165
} |
""" A template that evaluates to muliple jobs and places them back on the queue.
author: Brian Schrader
since: 2016-02-19
"""
from .job import Job
class JobTemplate(Job):
def __init__(self, alias, command_template, depends_on, queue, job_class):
super(JobTemplate, self).__init__(alias, command_template, depends_on)
self.command_template = command_template
self.queue = queue
self.job_class = job_class
self.jobs = []
def __repr__(self):
return '<JobTemplate: {}>'.format(self.alias)
def submit(self):
jobs = self._get_jobs_from_template(self.command_template, self.job_class)
[self.queue.push(job) for job in jobs]
self.jobs = jobs
def is_running(self):
if len(self.jobs) > 0:
return any(job.is_running() for job in self.jobs)
return False
def is_queued(self):
return False
def is_complete(self):
if len(self.jobs) > 0:
return all(job.is_complete() for job in self.jobs)
return False
def is_error(self):
if len(self.jobs) > 0:
return all(job.is_error() for job in self.jobs)
return False
def is_fail(self):
self.attempts > self.MAX_RETRY
def _get_jobs_from_template(self, template, job_class):
""" Given a template, a job class, construct jobs from
the given template.
"""
jobs = []
for command in template.eval():
alias = command.alias
depends_on = [job.alias
for job in self.queue.all_jobs
for deps in command.depends_on
if deps == job.alias]
command.update_dependent_files([job.command
for job in self.queue.all_jobs
if not isinstance(job, JobTemplate)])
job = job_class(alias, command, depends_on)
jobs.append(job)
return jobs
| {
"repo_name": "TorkamaniLab/metapipe",
"path": "metapipe/models/job_template.py",
"copies": "2",
"size": "1959",
"license": "mit",
"hash": 945301795400278100,
"line_mean": 29.1384615385,
"line_max": 82,
"alpha_frac": 0.5758039816,
"autogenerated": false,
"ratio": 3.933734939759036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01731592947931037,
"num_lines": 65
} |
# A template the for the .h file
PPF_h_template = """
#ifndef {RefUpper:s}_H
#define {RefUpper:s}_H
class {Ref:s}Class : public Fluid{{
public:
{Ref:s}Class();
~{Ref:s}Class(){{}};
double psatL(double);
double psatV(double);
double rhosatL(double);
double rhosatV(double);
}};
#endif
"""
PPF_cpp_template = """
#if defined(_MSC_VER)
#define _CRTDBG_MAP_ALLOC
#define _CRT_SECURE_NO_WARNINGS
#include <stdlib.h>
#include <crtdbg.h>
#else
#include <stdlib.h>
#endif
#include "math.h"
#include "stdio.h"
#include <string.h>
#include "CoolProp.h"
#include "FluidClass.h"
#include "{Ref:s}.h"
{Ref:s}Class::{Ref:s}Class()
{{
// Constants for the ideal-gas contribution
static double a[]={{{acoeffs:s}}};
static double b[]={{{bcoeffs:s}}};
// Constants for the residual contribution
static double N[]={{{Ncoeffs:s}}};
static double t[]={{{tcoeffs:s}}};
static double d[]={{{dcoeffs:s}}};
static double l[]={{{Lcoeffs:s}}};
// Other fluid parameters
params.molemass = {molemass:g}; //[kg/kmol]
params.Ttriple = {Ttriple:g}; //[K]
params.accentricfactor = {accentric:g}; //[-]
params.R_u = 8.314472;
isPure = false;
// Critical parameters
crit.rho = {rhocrit:g};
crit.p = PressureUnit({pcrit:g},UNIT_KPA);
crit.T = {Tcrit:g};
crit.v = 1.0/crit.rho;
phirlist.push_back(new phir_power(N,d,t,l,1,{N_phir:d}-1,{N_phir:d}));
phi0list.push_back(new phi0_lead(0, 0));
phi0list.push_back(new phi0_logtau(-1.0));
phi0list.push_back(new phi0_cp0_poly(a[1],b[1],crit.T,298.15));
phi0list.push_back(new phi0_Planck_Einstein(a,b,2,{N_cp0:d},{N_cp0:d}+1));
// Adjust to the IIR reference state (h=200 kJ/kg, s = 1 kJ/kg for sat. liq at 0C)
params.HSReferenceState = "IIR";
// Limits of EOS
limits.Tmin = params.Ttriple;
name.assign("{Ref:s}");
}}
{pL:s}
{pV:s}
{rhoL:s}
{rhoV:s}
"""
| {
"repo_name": "henningjp/CoolProp",
"path": "dev/pseudo-pure/templates.py",
"copies": "2",
"size": "1950",
"license": "mit",
"hash": 2423575279145354000,
"line_mean": 23.375,
"line_max": 86,
"alpha_frac": 0.6041025641,
"autogenerated": false,
"ratio": 2.5523560209424083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9155740194237811,
"avg_score": 0.00014367816091954023,
"num_lines": 80
} |
# A template to be stored as .ycm_extra_conf.py either in the directory containing
# the sources to be edited or in one of the parent directories of them.
import os
import ycm_core
# If there is one, use the compilation database in .compdb/compile_commands.json
# For more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
compilation_database_folder = '.compdb'
# Otherwise these flags will apply
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-fexceptions',
# set to 'c99' or 'c11' for a C project
'-std=c++11',
'-x', 'c++',
# C
#'-std=c11',
#'-x', 'c',
'-I', 'include/'
]
#################################################
# No need to change anything below here, normally
#################################################
# TODO:
# - move the parsing of the compilation database into a separate python module
if os.path.exists( os.path.abspath( __file__ ) + "/" + compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| {
"repo_name": "MauroCalderara/LinAlg",
"path": ".ycm_extra_conf.py",
"copies": "1",
"size": "3470",
"license": "bsd-3-clause",
"hash": 8560277842327933000,
"line_mean": 28.6581196581,
"line_max": 85,
"alpha_frac": 0.6550432277,
"autogenerated": false,
"ratio": 3.7112299465240643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48662731742240645,
"avg_score": null,
"num_lines": null
} |
"""A Temporal Linkage implementation used in a DNC.
This Temporal Linkage is implemented as defined in the DNC architecture
in DeepMind's Nature paper:
http://www.nature.com/nature/journal/vaop/ncurrent/full/nature20101.html
Author: Austin Derrow-Pinion
"""
import collections
import sonnet as snt
import tensorflow as tf
TemporalLinkageState = collections.namedtuple('TemporalLinkageState', (
'linkage_matrix', 'precedence_weights'))
class TemporalLinkage(snt.RNNCore):
"""A Temporal Linkage matrix that keeps track of the order of writes.
This allows the DNC to iterate forward or backward in sequences of data
written to external memory. This is very important for the DNC to be able
to accomplish many tasks. For example, write a sequence of instructions to
memory to later be executed in order.
"""
def __init__(self,
memory_size=16,
name='temporal_linkage'):
"""Initialize a Temporal Linkage matrix used in a DNC.
Args:
memory_size: The number of memory slots in the external memory.
Written as `N` in the DNC paper. Default value is 16.
name: The name of the module (default 'temporal_linkage').
"""
super(TemporalLinkage, self).__init__(name=name)
self._memory_size = memory_size
self._state_size = TemporalLinkageState(
linkage_matrix=tf.TensorShape([self._memory_size,
self._memory_size]),
precedence_weights=tf.TensorShape([self._memory_size]))
def _build(self, write_weightings, prev_state):
"""Compute one timestep of computation for the Temporal Linkage.
Using the write weightings, `w_t^w`, this updates both the temporal
linkage matrix, `L_t`, and the precedence weights, `p_t` for the next
timestep.
Args:
write_weightings: A Tensor of shape `[batch_size, memory_size]`
containing the weights to write with. Represented as `w_t^w`
in the DNC paper for time `t`. If `w_t^w[i]` is 0 then nothing
is written to memory regardless of the other parameters.
Therefore it can be used to protect the external memory from
unwanted modifications.
prev_state: An instance of `TemporalLinkageState` containing the
previous state of this Temporal Linkage.
"""
new_linkage = self.updated_temporal_linkage_matrix(
write_weightings, prev_state.precedence_weights,
prev_state.linkage_matrix)
new_precedence_weights = self.updated_precedence_weights(
write_weightings, prev_state.precedence_weights)
return TemporalLinkageState(linkage_matrix=new_linkage,
precedence_weights=new_precedence_weights)
def directional_weights(self, linkage, prev_read_weights):
"""Compute the forward and backward weighting for the DNC read heads.
Given the temporal linkage matrix, `L_t`, and the previous timestep's
read weightings, `w_{t-1}^{r,i}`, forward and backward weightings can
be calculated as:
f_t^i = L_t * w_{t-1}^{r,i}
b_t^i = TRANSPOSE(L_t) * w_{t-1}^{r,i}
Args:
linkage: A Tensor of shape `[batch_size, memory_size, memory_size]`
containing the values in the temporal linkage matrix.
Represented in the DNC paper as `L_t` were `L_t[i, j]` is the
degree to which slot `i` in the external memory matrix was the
location written to after location `j`.
prev_read_weights: A Tensor of shape
`[batch_size, num_reads, memory_size]` containing the previous
read weights. This is written in the DNC paper as
`w_{t-1}^{r,i}` for time `t-1` for read head `i`.
Returns:
A tuple `(f_t^i, b_t^i)`. `f_t^i` is a Tensor of shape
`[batch_size, num_reads, memory_size]` containing the values for
forward weighting on every read head. `b_t^i` is a Tensor of shape
`[batch_size, num_reads, memory_size]` containing the values for
backward weighting on every read head.
"""
f_t = tf.matmul(linkage, prev_read_weights, transpose_b=True)
b_t = tf.matmul(linkage, prev_read_weights, transpose_a=True,
transpose_b=True)
return (tf.matrix_transpose(f_t), tf.matrix_transpose(b_t))
def updated_temporal_linkage_matrix(self,
write_weightings,
precedence_weights,
linkage):
"""Compute the next timestep values for the temporal linkage matrix.
The temporal linkage matrix, represented in the DNC paper as `L_t`
is defined by the following recurrence relation:
L_0[i, j] = 0, for all i, j
L_t[i, i] = 0, for all i
L_t[i, j] = (1 - w_t^w[i] - w_t^w[j]) * L_{t - 1}[i, j]
+ w_t^w[i] * p_{t - 1}[j]
Args:
write_weightings: A Tensor of shape `[batch_size, memory_size]`
containing the weights to write with. Represented as `w_t^w`
in the DNC paper for time `t`. If `w_t^w[i]` is 0 then nothing
is written to memory regardless of the other parameters.
Therefore it can be used to protect the external memory from
unwanted modifications.
precedence_weights: A Tensor of shape `[batch_size, memory_size]`.
Represented in the DNC paper as `p_t` for time `t`. The value
`p_t[i]` represents the degree to which location `i` was the
last slot in external memory to be written to.
linkage: A Tensor of shape `[batch_size, memory_size, memory_size]`
containing the values in the temporal linkage matrix.
Represented in the DNC paper as `L_t` were `L_t[i, j]` is the
degree to which slot `i` in the external memory matrix was the
location written to after location `j`.
Returns:
A Tensor of shape `[batch_size, memory_size, memory_size]`
containing the next timestep values for the temporal linkage
matrix.
"""
w_t_i = tf.expand_dims(write_weightings, 2)
w_t_j = tf.expand_dims(write_weightings, 1)
p_t_j = tf.expand_dims(precedence_weights, 1)
weight_differences = 1 - w_t_i - w_t_j
weighted_precedence = w_t_i * p_t_j
new_linkage = weight_differences * linkage + weighted_precedence
batch_size = linkage.get_shape()[0].value
updated_linkage = tf.matrix_set_diag(
new_linkage,
tf.zeros([batch_size, self._memory_size], dtype=linkage.dtype))
return updated_linkage
def updated_precedence_weights(self, write_weightings, precedence_weights):
"""Compute the next timestep value for the precedence weights.
`p_t` is defined in the DNC paper as the following recurrence
relationship where `w_t^w` is the write weightings at time `t`:
p_0 = (p_{0,0}, p_{0,1}, ..., p_{0, N}) = (0, 0, ..., 0)
p_t = (1 - SUM_i(w_t^w[i])) * p_{t-1} + w_t^w
Args:
write_weightings: A Tensor of shape `[batch_size, memory_size]`
containing the weights to write with. Represented as `w_t^w`
in the DNC paper for time `t`. If `w_t^w[i]` is 0 then nothing
is written to memory regardless of the other parameters.
Therefore it can be used to protect the external memory from
unwanted modifications.
precedence_weights: A Tensor of shape `[batch_size, memory_size]`.
Represented in the DNC paper as `p_t` for time `t`. The value
`p_t[i]` represents the degree to which location `i` was the
last slot in external memory to be written to.
Returns:
A Tensor of shape `[batch_size, memory_size]` containing the next
timestep values for the precedence weights as defined by the
recurrence relation for `p_t`.
"""
# A Tensor of shape `[batch_size, 1]`
subtracted_write_weights = 1 - tf.reduce_sum(write_weightings,
axis=1,
keep_dims=True)
# A Tensor of shape `[batch_size, memory_size]`
p_t = subtracted_write_weights * precedence_weights + write_weightings
return p_t
@property
def state_size(self):
"""Return a description of the state size."""
return self._state_size
| {
"repo_name": "derrowap/DNC-TensorFlow",
"path": "src/dnc/temporal_linkage.py",
"copies": "1",
"size": "8980",
"license": "mit",
"hash": -4269342627658478000,
"line_mean": 47.8043478261,
"line_max": 79,
"alpha_frac": 0.588752784,
"autogenerated": false,
"ratio": 4.076259645937358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 184
} |
'''A temporary module to save random Github repos to the database.'''
from wtfhack.base.models import *
import random
from github import Github
LANGUAGES = ['Android',
'PHP',
'Clojure',
'Haskell',
'Java',
'Javascript',
'Coffeescript',
'Objective-C',
'Python',
'Ruby',
'Scala',
]
def get_repos(language, query='popular'):
'''Returns 100 most popular or random repos.
Arguments:
language (str) - The language to search for
query (str) - Search from either 'popular' repos or
'random'.
'''
g = Github()
repos = None
search = g.legacy_search_repos(language)
if query == 'popular':
repos = search.get_page(0)
elif query == 'random':
page = random.randint(2, 150)
repos = search.get_page(page)
else:
raise ValueError("query must be either 'popular' or 'random'")
return repos
def gen_repos(n=5):
'''Saves n repos for each language to the database.'''
for language in LANGUAGES:
print "Generating {n} repos written in {lang}".format(n=n, lang=language)
language_obj, created_lang = Language.objects.get_or_create(name=language.lower())
repos = get_repos(language, query='random')
for i in range(n):
if len(repos) > 0:
repo = repos.pop(random.randint(0, len(repos) - 1))
repo_obj, created_repo = Repo.objects.get_or_create(full_name=repo.full_name,
description=repo.description,
language=language_obj)
return True
| {
"repo_name": "sloria/wtfhack",
"path": "wtfhack/base/genrepos.py",
"copies": "1",
"size": "1738",
"license": "bsd-3-clause",
"hash": -7940944990533169000,
"line_mean": 31.1851851852,
"line_max": 93,
"alpha_frac": 0.5448791715,
"autogenerated": false,
"ratio": 4.108747044917258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5153626216417257,
"avg_score": null,
"num_lines": null
} |
"""A temporary plugin for debugging tests
This is useful for finding tests that do not cleanup after themselves.
Usage:
- Uncomment 'corehq.tests.noseplugins.debug.DebugPlugin' in testsettings.py
- Customize DebugPlugin below to inspect state.
Tips:
- Write to `sys.__stdout__` to bypass stdout and logging collector.
- `afterContext` is run at test collection time, not after teardown.
- Plugin interface:
https://nose.readthedocs.org/en/latest/plugins/interface.html
"""
import sys
from nose.plugins import Plugin
class DebugPlugin(Plugin):
"""Temporary debugging plugin"""
name = "debug"
enabled = True
def options(self, parser, env):
"""Avoid adding a ``--with`` option for this plugin."""
def configure(self, options, conf):
"""Do not call super (always enabled)"""
# def prepareTestCase(self, case):
# from custom.ewsghana.models import FacilityInCharge
# def audit(result):
# try:
# case.test(result)
# finally:
# sys.__stdout__.write("{}: {}\n".format(
# case.test,
# [f.id for f in FacilityInCharge.objects.all()],
# ))
# return audit
def stopContext(self, context):
from django.contrib.auth.models import User
num = User.objects.filter(username='user1').count()
if num:
sys.__stdout__.write("\n{} {}\n".format(num, context))
# def wantFunction(self, func):
# """Do not want 'test' functions with required args"""
# import inspect
# if "test" in func.__name__ and getattr(func, '__test__', True):
# spec = inspect.getargspec(func)
# return len(spec.args) <= len(spec.defaults or [])
# return None
| {
"repo_name": "qedsoftware/commcare-hq",
"path": "corehq/tests/noseplugins/debug.py",
"copies": "2",
"size": "1781",
"license": "bsd-3-clause",
"hash": -2115986498254101000,
"line_mean": 30.2456140351,
"line_max": 75,
"alpha_frac": 0.6103312746,
"autogenerated": false,
"ratio": 3.8971553610503284,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5507486635650328,
"avg_score": null,
"num_lines": null
} |
""" A temporary test program
This is the details.
"""
# import collections
# import functools
# c = collections.defaultdict(str)
# c["a"] = "a"
# print(len(c))
# if c["a"] == "":
# print("OK")
# s = "\taa bb"
# if s.startswith("\t"):
# p = s.split(" ")
# print(p)
# l1 = [1, 2, 3, 1, 4, 5]
# for i in li:
# if i == 1:
# li.remove(i)
# for idx, val in enumerate(l1):
# if val == 1 or val == 4:
# del l1[idx]
# print(l1)
# s = " "
# if s.isspace():
# print('"%s" is space' % s)
# else:
# print('"%s" is not space' % s)
# print("%d %d".rjust(20) % (2, 5))
# class Info:
# """ A class for information
# Descriptions.
# """
# def __init__(self):
# self.name = ""
# self.age = 0
# self.addr = ""
# def print_name(self):
# print(self.name)
# print(globals().keys())
# info1 = Info()
# info1.name = "Kevin"
# info1.print_name()
# print(vars(info1))
# help("tmp")
# help("__main__")
# from ctypes import *
# msvcrt = cdll.msvcrt
# msvcrt.printf(b"Hello, World\n")
# msvcrt.printf(bytes("Hello\n\n".encode("utf-8")))
# with open("foo.txt", "w", encoding="utf-8") as f:
# f.write("阿斯顿\n")
# s = "a\xac\u1234\u20ac\U00008000"
# f.write(s)
# with open("foo.txt", "r", encoding="utf-8") as f:
# s = f.read()
# print(s) # Error on Windows Chinese for '\xac' can not convert to 'gbk'.
# with open("foo.txt", "wb") as f:
# f.write(bytes([0x41, 0x61, 0x0A]))
# f.write(bytes([0x41, 0x61]))
# with open("foo.txt", "rb") as f:
# lines = [line.decode("utf-8").rstrip('\n') for line in f]
# print(lines)
# a = [1, 2]
# b = [1, 2]
# for x, y in zip(a, b):
# print(x, y)
# s = "asdadddsd"
# sps = s.split(" ", 1)
# sps.append("")
# print(sps[1])
# ret = functools.reduce(lambda a, b: a * b, [1, 2, 3, 4], 1)
# print(ret)
# li = [1, 2, 3, 4]
# li.extend([5, 6])
# print(li)
| {
"repo_name": "quchunguang/test",
"path": "testpy3/tmp.py",
"copies": "1",
"size": "1927",
"license": "mit",
"hash": -1689538153275200800,
"line_mean": 17.1226415094,
"line_max": 75,
"alpha_frac": 0.5096304008,
"autogenerated": false,
"ratio": 2.3570552147239265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33666856155239266,
"avg_score": null,
"num_lines": null
} |
# A temporary wrapper to connect to the HLC LLVM binaries.
# Currently, connect to commandline interface.
from __future__ import print_function, absolute_import
import sys
from subprocess import check_call, check_output
import subprocess
import tempfile
import os
import re
from collections import namedtuple
from numba import config
from numba.roc.hsadrv import devices
from .common import AMDGCNModule
from .config import ROCM_BC_PATH
from . import TRIPLE
from datetime import datetime
from contextlib import contextmanager
from numba import utils
from numba.roc.hsadrv.error import HsaSupportError
_real_check_call = check_call
NOISY_CMDLINE = False
@contextmanager
def error_pipe():
if NOISY_CMDLINE:
yield subprocess.STDOUT
else:
if utils.IS_PY3:
yield subprocess.DEVNULL
else:
with open(os.devnull, 'wb') as devnull:
yield devnull
def check_call(*args, **kwargs):
# This is so that time is stamped against invocation
# such that correlations can be looked for against messages in the
# sys and kernel logs.
try:
with error_pipe() as stderr:
if NOISY_CMDLINE:
print(datetime.now().strftime("%b %d %H:%M:%S"),
file=sys.stdout)
print('CMD: ' + ';'.join(args), file=sys.stdout)
ret = _real_check_call(*args, stderr=stderr, **kwargs)
except subprocess.CalledProcessError as e:
print(e)
raise(e)
return ret
class CmdLine(object):
def _initialize(self):
if not self.initialized:
dev_ctx = devices.get_context()
target_cpu = dev_ctx.agent.name.decode('UTF-8')
self.target_cpu = "-mcpu %s" % target_cpu
self.CMD_OPT = ' '.join([
self.opt,
"-O3",
self.triple_flag,
self.target_cpu,
"-disable-simplify-libcalls",
"-verify",
"-S",
"-o {fout}",
"{fin}"])
self.CMD_VERIFY = ' '.join([
self.opt,
"-verify",
self.triple_flag,
self.target_cpu,
"-S",
"-o {fout}",
"{fin}"])
self.CMD_GEN_HSAIL = ' '.join([self.llc,
"-O2",
self.triple_flag,
self.target_cpu,
"-filetype=asm",
"-o {fout}",
"{fin}"])
self.CMD_GEN_BRIG = ' '.join([self.llc,
"-O2",
self.triple_flag,
self.target_cpu,
"-filetype=obj",
"-o {fout}",
"{fin}"])
self.CMD_LINK_BUILTINS = ' '.join([
self.llvm_link,
"-S",
"-o {fout}",
"{fin}",
"{lib}"])
self.CMD_LINK_LIBS = ' '.join([self.llvm_link,
"-S",
"-o {fout}",
"{fin}"])
self.CMD_LINK_BRIG = ' '.join([self.ld_lld,
"-shared",
"-o {fout}",
"{fin}"])
def __init__(self):
self._binary_path = os.environ.get('HSAILBIN', None)
def _setup_path(tool):
if self._binary_path is not None:
return os.path.join(self._binary_path, tool)
else:
binpath = os.path.join(sys.prefix, 'bin', tool)
return binpath
self._triple = TRIPLE
self.opt = _setup_path("opt")
self.llc = _setup_path("llc")
self.llvm_link = _setup_path("llvm-link")
self.ld_lld = _setup_path("ld.lld")
self.triple_flag = "-mtriple %s" % self._triple
self.initialized = False
def check_tooling(self):
# make sure the llc can actually target amdgcn, ideally all tooling
# should be checked but most don't print anything useful and so
# compilation for AMDGCN would have to be tested instead. This is a
# smoke test like check.
try:
if not os.path.isfile(self.llc):
raise HsaSupportError('llc not found')
output = check_output([self.llc, '--version'],
universal_newlines=True)
olines = [x.strip() for x in output.splitlines()]
tgtidx = olines.index('Registered Targets:')
targets = olines[tgtidx + 1:]
for tgt in targets:
if 'amdgcn' in tgt:
break
else:
msg = 'Command line tooling does not support "amdgcn" target'
raise HsaSupportError(msg)
except BaseException as e:
raise
def verify(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_VERIFY.format(fout=opath, fin=ipath), shell=True)
def optimize(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_OPT.format(fout=opath, fin=ipath), shell=True)
def generate_hsail(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_GEN_HSAIL.format(fout=opath, fin=ipath), shell=True)
def generate_brig(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_GEN_BRIG.format(fout=opath, fin=ipath), shell=True)
def link_libs(self, ipath, libpaths, opath):
if not self.initialized:
self._initialize()
cmdline = self.CMD_LINK_LIBS.format(fout=opath, fin=ipath)
cmdline += ' '.join(["{0}".format(lib) for lib in libpaths])
check_call(cmdline, shell=True)
def link_brig(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_LINK_BRIG.format(fout=opath, fin=ipath), shell=True)
class Module(AMDGCNModule):
def __init__(self):
"""
Setup
"""
self._tmpdir = tempfile.mkdtemp()
self._tempfiles = []
self._linkfiles = []
self._cmd = CmdLine()
AMDGCNModule.__init__(self)
def __del__(self):
return
self.close()
def close(self):
# Remove all temporary files
for afile in self._tempfiles:
os.unlink(afile)
#Remove directory
os.rmdir(self._tmpdir)
def _create_temp_file(self, name, mode='wb'):
path = self._track_temp_file(name)
fobj = open(path, mode=mode)
return fobj, path
def _track_temp_file(self, name):
path = os.path.join(self._tmpdir,
"{0}-{1}".format(len(self._tempfiles), name))
self._tempfiles.append(path)
return path
def load_llvm(self, llvmir):
"""
Load LLVM with HSAIL SPIR spec
"""
# Preprocess LLVM IR
llvmir = self._preprocess(llvmir)
# Create temp file to store the input file
tmp_llvm_ir, fin = self._create_temp_file("dump-llvm-ir")
with tmp_llvm_ir:
tmp_llvm_ir.write(llvmir.encode('ascii'))
# Create temp file for optimization
fout = self._track_temp_file("verified-llvm-ir")
self._cmd.verify(ipath=fin, opath=fout)
if config.DUMP_OPTIMIZED:
with open(fout, 'rb') as fin_opt:
print(fin_opt.read().decode('ascii'))
self._linkfiles.append(fout)
def link_builtins(self, ipath, opath):
# progressively link in all the bitcodes
for bc in self.bitcodes:
if bc != self.bitcodes[-1]:
tmp_opath = opath + bc.replace('/', '_').replace('.','_')
else:
tmp_opath = opath
lib = os.path.join(ROCM_BC_PATH, bc)
cmd = self._cmd.CMD_LINK_BUILTINS.format(fout=tmp_opath, fin=ipath, lib=lib)
check_call(cmd, shell=True)
ipath = tmp_opath
def generateGCN(self):
"""
Generate GCN from a module and also return the HSAIL code.
"""
assert not self._finalized, "Module already has GCN generated"
# Link dependencies libraries
llvmfile = self._linkfiles[0]
pre_builtin_path = self._track_temp_file("link-dep")
libpaths = self._linkfiles[1:]
self._cmd.link_libs(ipath=llvmfile, libpaths=libpaths,
opath=pre_builtin_path)
# Link library with the builtin modules
linked_path = self._track_temp_file("linked-path")
self.link_builtins(ipath=pre_builtin_path, opath=linked_path)
# Optimize
opt_path = self._track_temp_file("optimized-llvm-ir")
self._cmd.optimize(ipath=linked_path, opath=opt_path)
if config.DUMP_OPTIMIZED:
with open(opt_path, 'rb') as fin:
print(fin.read().decode('ascii'))
# Compile the llvm to HSAIL
hsail_path = self._track_temp_file("create-hsail")
self._cmd.generate_hsail(ipath=opt_path, opath=hsail_path)
# Compile the llvm to BRIG
brig_path = self._track_temp_file("create-brig")
self._cmd.generate_brig(ipath=opt_path, opath=brig_path)
# link
end_brig_path = self._track_temp_file("linked-brig")
self._cmd.link_brig(ipath = brig_path, opath=end_brig_path)
self._finalized = True
# Read HSAIL
with open(hsail_path, 'rb') as fin:
hsail = fin.read().decode('ascii')
# Read BRIG
with open(end_brig_path, 'rb') as fin:
brig = fin.read()
if config.DUMP_ASSEMBLY:
print(hsail)
return namedtuple('FinalizerResult', ['hsail', 'brig'])(hsail, brig)
| {
"repo_name": "cpcloud/numba",
"path": "numba/roc/hlc/hlc.py",
"copies": "2",
"size": "10064",
"license": "bsd-2-clause",
"hash": 2596910983498962400,
"line_mean": 31.7817589577,
"line_max": 88,
"alpha_frac": 0.5308028617,
"autogenerated": false,
"ratio": 3.8426880488736157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027730700234691362,
"num_lines": 307
} |
# A temporary wrapper to connect to the HLC LLVM binaries.
# Currently, connect to commandline interface.
import sys
from subprocess import check_call, check_output
import subprocess
import tempfile
import os
import re
from collections import namedtuple
from numba.roc.hsadrv import devices
from .common import AMDGCNModule
from .config import ROCM_BC_PATH
from numba.roc.hlc import TRIPLE
from datetime import datetime
from contextlib import contextmanager
from numba.core import utils, config
from numba.roc.hsadrv.error import HsaSupportError
_real_check_call = check_call
NOISY_CMDLINE = False
@contextmanager
def error_pipe():
if NOISY_CMDLINE:
yield subprocess.STDOUT
else:
yield subprocess.DEVNULL
def check_call(*args, **kwargs):
# This is so that time is stamped against invocation
# such that correlations can be looked for against messages in the
# sys and kernel logs.
try:
with error_pipe() as stderr:
if NOISY_CMDLINE:
print(datetime.now().strftime("%b %d %H:%M:%S"),
file=sys.stdout)
print('CMD: ' + ';'.join(args), file=sys.stdout)
ret = _real_check_call(*args, stderr=stderr, **kwargs)
except subprocess.CalledProcessError as e:
print(e)
raise(e)
return ret
class CmdLine(object):
def _initialize(self):
if not self.initialized:
dev_ctx = devices.get_context()
target_cpu = dev_ctx.agent.name.decode('UTF-8')
self.target_cpu = "-mcpu %s" % target_cpu
self.CMD_OPT = ' '.join([
self.opt,
"-O3",
self.triple_flag,
self.target_cpu,
"-disable-simplify-libcalls",
"-verify",
"-S",
"-o {fout}",
"{fin}"])
self.CMD_VERIFY = ' '.join([
self.opt,
"-verify",
self.triple_flag,
self.target_cpu,
"-S",
"-o {fout}",
"{fin}"])
self.CMD_GEN_HSAIL = ' '.join([self.llc,
"-O2",
self.triple_flag,
self.target_cpu,
"-filetype=asm",
"-o {fout}",
"{fin}"])
self.CMD_GEN_BRIG = ' '.join([self.llc,
"-O2",
self.triple_flag,
self.target_cpu,
"-filetype=obj",
"-o {fout}",
"{fin}"])
self.CMD_LINK_BUILTINS = ' '.join([
self.llvm_link,
"-S",
"-o {fout}",
"{fin}",
"{lib}"])
self.CMD_LINK_LIBS = ' '.join([self.llvm_link,
"-S",
"-o {fout}",
"{fin}"])
self.CMD_LINK_BRIG = ' '.join([self.ld_lld,
"-shared",
"-o {fout}",
"{fin}"])
def __init__(self):
self._binary_path = os.environ.get('HSAILBIN', None)
def _setup_path(tool):
if self._binary_path is not None:
return os.path.join(self._binary_path, tool)
else:
binpath = os.path.join(sys.prefix, 'bin', tool)
return binpath
self._triple = TRIPLE
self.opt = _setup_path("opt")
self.llc = _setup_path("llc")
self.llvm_link = _setup_path("llvm-link")
self.ld_lld = _setup_path("ld.lld")
self.triple_flag = "-mtriple %s" % self._triple
self.initialized = False
def check_tooling(self):
# make sure the llc can actually target amdgcn, ideally all tooling
# should be checked but most don't print anything useful and so
# compilation for AMDGCN would have to be tested instead. This is a
# smoke test like check.
try:
if not os.path.isfile(self.llc):
raise HsaSupportError('llc not found')
output = check_output([self.llc, '--version'],
universal_newlines=True)
olines = [x.strip() for x in output.splitlines()]
tgtidx = olines.index('Registered Targets:')
targets = olines[tgtidx + 1:]
for tgt in targets:
if 'amdgcn' in tgt:
break
else:
msg = 'Command line tooling does not support "amdgcn" target'
raise HsaSupportError(msg)
except Exception as e:
raise
def verify(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_VERIFY.format(fout=opath, fin=ipath), shell=True)
def optimize(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_OPT.format(fout=opath, fin=ipath), shell=True)
def generate_hsail(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_GEN_HSAIL.format(fout=opath, fin=ipath), shell=True)
def generate_brig(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_GEN_BRIG.format(fout=opath, fin=ipath), shell=True)
def link_libs(self, ipath, libpaths, opath):
if not self.initialized:
self._initialize()
cmdline = self.CMD_LINK_LIBS.format(fout=opath, fin=ipath)
cmdline += ' '.join(["{0}".format(lib) for lib in libpaths])
check_call(cmdline, shell=True)
def link_brig(self, ipath, opath):
if not self.initialized:
self._initialize()
check_call(self.CMD_LINK_BRIG.format(fout=opath, fin=ipath), shell=True)
class Module(AMDGCNModule):
def __init__(self):
"""
Setup
"""
self._tmpdir = tempfile.mkdtemp()
self._tempfiles = []
self._linkfiles = []
self._cmd = CmdLine()
AMDGCNModule.__init__(self)
def __del__(self):
return
self.close()
def close(self):
# Remove all temporary files
for afile in self._tempfiles:
os.unlink(afile)
#Remove directory
os.rmdir(self._tmpdir)
def _create_temp_file(self, name, mode='wb'):
path = self._track_temp_file(name)
fobj = open(path, mode=mode)
return fobj, path
def _track_temp_file(self, name):
path = os.path.join(self._tmpdir,
"{0}-{1}".format(len(self._tempfiles), name))
self._tempfiles.append(path)
return path
def load_llvm(self, llvmir):
"""
Load LLVM with HSAIL SPIR spec
"""
# Preprocess LLVM IR
llvmir = self._preprocess(llvmir)
# Create temp file to store the input file
tmp_llvm_ir, fin = self._create_temp_file("dump-llvm-ir")
with tmp_llvm_ir:
tmp_llvm_ir.write(llvmir.encode('ascii'))
# Create temp file for optimization
fout = self._track_temp_file("verified-llvm-ir")
self._cmd.verify(ipath=fin, opath=fout)
if config.DUMP_OPTIMIZED:
with open(fout, 'rb') as fin_opt:
print(fin_opt.read().decode('ascii'))
self._linkfiles.append(fout)
def link_builtins(self, ipath, opath):
# progressively link in all the bitcodes
for bc in self.bitcodes:
if bc != self.bitcodes[-1]:
tmp_opath = opath + bc.replace('/', '_').replace('.','_')
else:
tmp_opath = opath
lib = os.path.join(ROCM_BC_PATH, bc)
cmd = self._cmd.CMD_LINK_BUILTINS.format(fout=tmp_opath, fin=ipath, lib=lib)
check_call(cmd, shell=True)
ipath = tmp_opath
def generateGCN(self):
"""
Generate GCN from a module and also return the HSAIL code.
"""
assert not self._finalized, "Module already has GCN generated"
# Link dependencies libraries
llvmfile = self._linkfiles[0]
pre_builtin_path = self._track_temp_file("link-dep")
libpaths = self._linkfiles[1:]
self._cmd.link_libs(ipath=llvmfile, libpaths=libpaths,
opath=pre_builtin_path)
# Link library with the builtin modules
linked_path = self._track_temp_file("linked-path")
self.link_builtins(ipath=pre_builtin_path, opath=linked_path)
# Optimize
opt_path = self._track_temp_file("optimized-llvm-ir")
self._cmd.optimize(ipath=linked_path, opath=opt_path)
if config.DUMP_OPTIMIZED:
with open(opt_path, 'rb') as fin:
print(fin.read().decode('ascii'))
# Compile the llvm to HSAIL
hsail_path = self._track_temp_file("create-hsail")
self._cmd.generate_hsail(ipath=opt_path, opath=hsail_path)
# Compile the llvm to BRIG
brig_path = self._track_temp_file("create-brig")
self._cmd.generate_brig(ipath=opt_path, opath=brig_path)
# link
end_brig_path = self._track_temp_file("linked-brig")
self._cmd.link_brig(ipath = brig_path, opath=end_brig_path)
self._finalized = True
# Read HSAIL
with open(hsail_path, 'rb') as fin:
hsail = fin.read().decode('ascii')
# Read BRIG
with open(end_brig_path, 'rb') as fin:
brig = fin.read()
if config.DUMP_ASSEMBLY:
print(hsail)
return namedtuple('FinalizerResult', ['hsail', 'brig'])(hsail, brig)
| {
"repo_name": "sklam/numba",
"path": "numba/roc/hlc/hlc.py",
"copies": "4",
"size": "9883",
"license": "bsd-2-clause",
"hash": -1925057852013914600,
"line_mean": 31.8338870432,
"line_max": 88,
"alpha_frac": 0.5301021957,
"autogenerated": false,
"ratio": 3.8321054672353627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6362207662935362,
"avg_score": null,
"num_lines": null
} |
""" A temporay library to help eRSA reporting until Nectar reporting APIs are availabe
"""
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneclient.v3 import client
from novaclient import client as nova_client
AUTH_URL = 'https://keystone.rc.nectar.org.au:5000/v3'
NOVA_VERSION = 2
def to_dict(object, attrs):
"""Generate dictionary with specified attributes."""
output = {}
for attr in attrs:
if hasattr(object, attr):
if ":" in attr:
# to remove the part before the colon: e.g. OS-FLV-EXT-DATA:ephemeral
# to match the output of command
short_attr = attr.split(":")[1]
output[short_attr] = getattr(object, attr)
else:
output[attr] = getattr(object, attr)
return output
def create_session(username, password, project_name):
auth = v3.Password(auth_url=AUTH_URL,
username=username,
password=password,
project_name=project_name,
project_domain_name='default',
user_domain_name='default')
return session.Session(auth=auth)
def get_domain(name):
"""Extract an organisational domain from an email address.
For Australian educational institutions, last 3 parts.
For US educational institutions, last 2 parts.
For the rest, domain name of email address.
"""
if "@" in name:
domain_name = name.split("@")[1]
if domain_name.endswith(".edu.au"):
domain_name = ".".join(domain_name.split(".")[-3:])
elif domain_name.endswith(".edu"):
domain_name = ".".join(domain_name.split(".")[-2:])
return domain_name
else:
return None
class Keystone(object):
# id: openstack_id in models
USER_ATTRS = ["default_project_id", "email", "name", "id"]
TENANT_ATTRS = ["allocation_id", "name", "description", "enabled", "id"]
# Nectar also has domain_id, expires, status, parent_id
def __init__(self, username, password, project_name):
sess = create_session(username, password, project_name)
self.client = client.Client(session=sess)
roles = self.client.roles.list()
self.role_dict = {role.name: role.id for role in roles}
def get_user(self, user_id):
"""Get a user information"""
meta = to_dict(self.client.users.get(user_id), self.USER_ATTRS)
if 'email' in meta and meta['email'] and len(meta['email'].strip()):
meta['domain'] = get_domain(meta['email'].strip())
else:
meta['domain'] = ''
return meta
def get_role_id(self, name="TenantManager"):
# TenantManager 14
# Member 2
return self.role_dict[name]
# Here tenant == project
def get_tenants(self):
projects = self.client.projects.list()
return [to_dict(project, self.TENANT_ATTRS) for project in projects]
def get_tenant(self, project_id):
"""Get a tenant(project) information"""
return to_dict(self.client.projects.get(project_id), self.TENANT_ATTRS)
def get_managers(self, project_id=None):
"""Get a list of roles"""
manager_role_id = self.get_role_id()
assignments = self.client.role_assignments.list(role=manager_role_id)
managers = {}
for assignment in assignments:
project_id = assignment.scope['project']['id']
user = self.get_user(assignment.user['id'])
del user['default_project_id']
if project_id in managers:
managers[assignment.scope['project']['id']].append(user)
else:
managers[assignment.scope['project']['id']] = [user]
return managers
def _get_tenant_mananger_ids(self, project_id):
"""Get id of managers of a tenant(project)"""
manager_role = self.get_role_id()
assignments = self.client.role_assignments.list(project=project_id, role=manager_role)
manager_ids = [assignment.user["id"] for assignment in assignments]
return manager_ids
def get_tenant_manangers(self, project_id):
manager_ids = self._get_tenant_mananger_ids(project_id)
managers = []
for manager_id in manager_ids:
managers.append(self.get_user(manager_id))
return managers
def get_tenant_domains(self, project_id):
"""Get domain (guessed from manager's email) of a tenant(project)
"""
managers = self.get_tenant_manangers(project_id)
return [manager['domain'] for manager in managers]
class Nova(object):
FLAVOR_ATTRS = ["id", "name", "ram", "vcpus", "disk", "swap", "rxtx_factor",
"OS-FLV-EXT-DATA:ephemeral", "OS-FLV-DISABLED:disabled"]
def __init__(self, username, password, project_name):
sess = create_session(username, password, project_name)
self.client = nova_client.Client(NOVA_VERSION, session=sess)
def get_flavors(self):
"""Get instance flavors in a list of dicts"""
flavor_objs = self.client.flavors.list()
flavors = []
for flavor in flavor_objs:
flavors.append(to_dict(flavor, self.FLAVOR_ATTRS))
return flavors
def get_quota(self, project_id):
"""Get quota dict of a project"""
quotas = self.client.quotas.get(project_id)
return quotas.to_dict()
if __name__ == '__main__':
from argparse import ArgumentParser
import random
parser = ArgumentParser(description="Demo script of queries on Nectar Keystone")
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-p', '--password', required=True)
args = parser.parse_args()
nclient = Nova(args.username, args.password, 'Admin')
print(nclient.get_flavors())
print(nclient.get_quota('6119cd97a3bd478d809845aeacc6ea12'))
exit(0)
kclient = Keystone(args.username, args.password, 'Admin')
# test_get_tenant(kclient, '8d6f8f0aa02048fbb2fbd7daad94fd61')
# test_get_tenant(kclient, 'c00ec4654f9d4f1da70b63d8649c6718')
# test_get_user(kclient, 'e001f58e113c41c3a3a153846bab69a3')
# exit(0)
results = kclient.get_managers()
print(results)
import json
with open('nectar_tenant_managers.json', 'w') as jf:
json.dump(results, jf)
exit(0)
tenants = kclient.get_tenants()
selected = random.sample(range(len(tenants)), 5)
print("You may see managers if a tenant is not a personal tenant.\n")
for select in selected:
# for select in range(len(tenants)):
print(tenants[select])
managers = kclient.get_tenant_manangers(tenants[select]['id'])
for manager in managers:
print(manager)
| {
"repo_name": "eResearchSA/reporting-unified",
"path": "nectar.py",
"copies": "1",
"size": "6817",
"license": "apache-2.0",
"hash": -6430687438402267000,
"line_mean": 35.2606382979,
"line_max": 94,
"alpha_frac": 0.6186005574,
"autogenerated": false,
"ratio": 3.7332968236582693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4851897381058269,
"avg_score": null,
"num_lines": null
} |
""" ATEMview Location Window """
from PyQt5 import QtCore, QtWidgets
import pyqtgraph as pg
import numpy as np
from .ATEMWidget import ATEMWidget
from .colormaps import jetCM, jetBrush
class LocWidget(ATEMWidget):
"""docstring for LocWidget"""
def __init__(self, parent):
super(LocWidget, self).__init__(parent)
self.parent = parent
self.init_ui()
self.showData = False
self.data = None
self.tInd = -1
self.x = None
self.y = None
self.minVal = 1.
self.maxVal = 1.
self.cbFormatStr = '{:.2f}'
self.show()
def init_ui(self):
""" Docstring """
# Make the background white
palette = self.palette()
palette.setColor(self.backgroundRole(), QtCore.Qt.white)
self.setPalette(palette)
self.plotWidget = pg.PlotWidget(enableMenu=False)
self.plotWidget.setLabel('left', 'Easting', units='m')
self.plotWidget.setLabel('bottom', 'Northing', units='m')
self.plotWidget.showGrid(x=True, y=True)
self.plotWidget.getViewBox().setAspectLocked()
self.scatter = pg.ScatterPlotItem(pen=None, pxMode=True)
self.plotWidget.addItem(self.scatter)
self.selectedLocVline = pg.InfiniteLine(angle=90,
movable=False,
pen={'color':'k',
'width':2,
'style':QtCore.Qt.DotLine})
self.plotWidget.addItem(self.selectedLocVline, ignoreBounds=True)
self.selectedLocHline = pg.InfiniteLine(angle=0,
movable=False,
pen={'color':'k',
'width':2,
'style':QtCore.Qt.DotLine})
self.plotWidget.addItem(self.selectedLocHline, ignoreBounds=True)
self.plotWidget.scene().sigMouseClicked.connect(self.clickEvent)
self.colorbarWidget = pg.PlotWidget(enableMenu=False)
self.colorbarWidget.setMaximumWidth(100)
self.colorbarWidget.getViewBox().setMouseEnabled(False, False)
self.colorbarWidget.setXRange(0, 20, padding=0)
self.colorbarWidget.setYRange(0, 256, padding=0)
self.colorbarWidget.getAxis('bottom').setPen(None)
self.colorbarWidget.getAxis('left').setPen(None)
self.colorbarWidget.setVisible(False)
self.cbMinLabel = QtWidgets.QLabel()
self.cbMinLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.cbMinLabel.setText('0.00')
self.cbMaxLabel = QtWidgets.QLabel()
self.cbMaxLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.cbMaxLabel.setText('1.00')
self.colorbar = pg.ImageItem()
cbData = np.arange(0, 256)[:, np.newaxis].repeat(20, axis=1).T
self.colorbar.setImage(jetCM[cbData])
self.colorbarWidget.addItem(self.colorbar)
self.misfitCheckBox = QtWidgets.QCheckBox('Show Misfit')
self.misfitCheckBox.toggled.connect(self.toggleMisfit)
self.selectCombo = QtWidgets.QComboBox()
self.selectCombo.addItem("Misfit (time)")
self.selectCombo.addItem("Misfit (total)")
self.selectCombo.addItem("Observed")
self.selectCombo.addItem("Predicted")
self.selectCombo.activated[str].connect(self.changeCombo)
self.selectCombo.setVisible(False)
self.titleLabel = QtWidgets.QLabel(self.selectCombo.currentText())
self.titleLabel.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.titleLabel.setVisible(False)
self.maxCvalSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.maxCvalSlider.setMaximum(100)
self.maxCvalSlider.setValue(100)
self.maxCvalSlider.valueChanged.connect(self.setClim)
self.maxCvalSlider.setVisible(False)
self.minCvalSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.minCvalSlider.setMaximum(100)
self.minCvalSlider.setValue(0)
self.minCvalSlider.valueChanged.connect(self.updatePlot)
self.minCvalSlider.setVisible(False)
cbvLayout = QtWidgets.QVBoxLayout()
cbvLayout.addWidget(self.cbMaxLabel)
cbvLayout.addWidget(self.colorbarWidget)
cbvLayout.addWidget(self.cbMinLabel)
hLayout = QtWidgets.QHBoxLayout()
hLayout.addWidget(self.plotWidget)
hLayout.addLayout(cbvLayout)
vLayout = QtWidgets.QVBoxLayout(self)
hMisLayout = QtWidgets.QHBoxLayout()
hMisLayout.addWidget(self.misfitCheckBox)
hMisLayout.addWidget(self.selectCombo)
vLayout.addLayout(hMisLayout)
vLayout.addWidget(self.titleLabel)
vLayout.addLayout(hLayout)
vLayout.addWidget(self.maxCvalSlider)
vLayout.addWidget(self.minCvalSlider)
def clickEvent(self, event):
if self.plotWidget.sceneBoundingRect().contains(event.scenePos()):
mousePoint = self.plotWidget.getViewBox().mapSceneToView(event.scenePos())
signal = {'name':'closestLoc',
'x':mousePoint.x(),
'y':mousePoint.y()}
self.ChangeSelectionSignal.emit(signal)
else:
pass
@QtCore.pyqtSlot(bool)
def toggleMisfit(self, show):
""" Callback that gets fired 'Show Misfit' box is toggled """
if self.data is not None:
if show:
self.colorbarWidget.setVisible(True)
self.maxCvalSlider.setVisible(True)
self.minCvalSlider.setVisible(True)
self.selectCombo.setVisible(True)
self.titleLabel.setVisible(True)
self.showData = True
else:
self.colorbarWidget.setVisible(False)
self.maxCvalSlider.setVisible(False)
self.minCvalSlider.setVisible(False)
self.selectCombo.setVisible(False)
self.titleLabel.setVisible(False)
self.updatePlot()
@QtCore.pyqtSlot(str)
def changeCombo(self, text):
if self.selectCombo.currentText() == "Misfit (time)":
self.cbFormatStr = "{:.2f}"
elif self.selectCombo.currentText() == "Misfit (total)":
self.cbFormatStr = "{:.2f}"
elif self.selectCombo.currentText() == "Observed":
self.cbFormatStr = "{:.2e}"
elif self.selectCombo.currentText() == "Predicted":
self.cbFormatStr = "{:.2e}"
self.titleLabel.setText(text)
self.setData()
self.updatePlot()
def updatePlot(self):
if self.showData & (self.data is not None):
clMin, clMax = self.getClim()
self.cbMaxLabel.setText(self.cbFormatStr.format(clMax))
self.cbMinLabel.setText(self.cbFormatStr.format(clMin))
bins = np.linspace(clMin, clMax, 255)
di = np.digitize(self.data, bins)
self.scatter.setData(self.x, self.y, pen=None,
brush=jetBrush[di], symbolSize=10.)
else:
self.scatter.setData(self.x, self.y, pen=None, brush='k', symbolSize=10.)
def setAll(self, x, y):
""" Docstring """
self.scatter.setData(x, y, pen=None, brush='k', symbolSize=10.)
self.plotWidget.setXRange(x.min()-100., x.max()+100.)
self.plotWidget.setYRange(y.min()-100., y.max()+100.)
def setLocation(self, loc):
""" Docstring """
xl = loc.iloc[0].x
yl = loc.iloc[0].y
self.selectedLocVline.setPos(xl)
self.selectedLocHline.setPos(yl)
def setTime(self, data_times):
""" Set the displayed misfit data """
self.tInd = data_times.tInd.iloc[0]
if self.selectCombo.currentText() != "Misfit (total)":
self.setData()
self.updatePlot()
def setData(self):
data_time = self.parent.data.getTime(self.tInd)
self.x = data_time.x.values
self.y = data_time.y.values
if self.selectCombo.currentText() == "Misfit (time)":
if data_time.dBdt_Z_pred.any():
self.data = (data_time.dBdt_Z-data_time.dBdt_Z_pred).abs()/data_time.dBdt_Z_uncert
else:
self.data = None
elif self.selectCombo.currentText() == "Misfit (total)":
if data_time.dBdt_Z_pred.any():
grp = self.parent.data.df.groupby('locInd')
l22 = lambda g: np.linalg.norm((g.dBdt_Z - g.dBdt_Z_pred)/g.dBdt_Z_uncert)**2/g.shape[0]
grp = grp.agg(l22)[['x', 'y', 'dBdt_Z']]
self.data = grp.dBdt_Z.values
self.x = self.parent.data.locs.sort_index().x.values
self.y = self.parent.data.locs.sort_index().y.values
print(self.data)
else:
self.data = None
elif self.selectCombo.currentText() == "Observed":
self.data = data_time.dBdt_Z
elif self.selectCombo.currentText() == "Predicted":
self.data = data_time.dBdt_Z_pred
else:
self.data = None
if self.data is not None:
self.minVal = self.data.min()
self.maxVal = self.data.max()
def setClim(self):
""" Set the color limits on the misfit scatter plot """
lsVal = self.minCvalSlider.value()
hsVal = self.maxCvalSlider.value()
if lsVal >= hsVal:
self.minCvalSlider.setValue(hsVal-1)
lsVal = self.minCvalSlider.value()
self.updatePlot()
def getClim(self):
lsVal = self.minCvalSlider.value()
hsVal = self.maxCvalSlider.value()
dv = self.data.max()-self.data.min()
clMin = self.data.min()+dv*lsVal/100.
clMax = self.data.min()+dv*hsVal/100.
return clMin, clMax
| {
"repo_name": "dwfmarchant/ATEMview",
"path": "ATEMview/LocWindow.py",
"copies": "1",
"size": "10027",
"license": "mit",
"hash": 3609604442642518000,
"line_mean": 39.2690763052,
"line_max": 104,
"alpha_frac": 0.5918021342,
"autogenerated": false,
"ratio": 3.755430711610487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4847232845810487,
"avg_score": null,
"num_lines": null
} |
# a tentative script to upload all existing drstree "versions" into CMIP sqlite database
# each variable, mip, experiment, model, ensemble combination add a new instance in "instance"
# for each instance there should be at least one version in "version" table
# for each version add at least one file in table "files"
from __future__ import print_function
from ARCCSSive.CMIP5.update_db_functions import insert_unique, add_bulk_items
from ARCCSSive.CMIP5.other_functions import *
#NB tmptree root dir is also defined there
from ARCCSSive.CMIP5 import DB
from ARCCSSive.CMIP5.Model import Instance, Version, VersionFile
import glob
# open local database using ARCSSive interface
conn = DB.connect()
db = conn.session
#kwargs={"institute":"BCC","model":"bcc-csm1-1-m", "experiment":"historical"}
kwargs=defaultdict(lambda: "*")
#kwargs=dict(model="IPSL-CM5A-MR", experiment="amip", mip="fx")
kwargs=dict(model="IPSL-CM5A-MR", experiment="amip", frequency="mon")
#loop through entire drstree or a subdir by using constraints **kwargs
instances=list_drstree(**kwargs)
print(instances)
#for each instance individuated add instance row
for inst in instances:
# call file_details to retrieve experiment, variable, model etc. from filename
# call drs_details to retrieve model, experiment, freq. & realm (become mip), variable, ensemble from drstree path
# return dictionary
# could i create an Instance, Version and file object instead and pass that on?
kw_instance={}
kw_version={}
kw_files={}
frequency, kw_instance = drs_details(inst)
filename=glob.glob(inst+"/latest/*.nc")
kw_instance['mip'] = get_mip(filename)
#print(kw_instance)
# make sure details list isn't empty
if kw_instance:
versions = list_drs_versions(inst)
# add instance to db if not already existing
inst_obj,new = insert_unique(db, Instance, **kw_instance)
print(inst)
print(inst_obj.id,new)
#P use following two lines if tmp/tree
#kw_version['version'] = find_version(bits[:-1], version)
#kw_version['path'] = '/'.join(bits[:-1])
kw_version['instance_id'] = inst_obj.id
for v in versions:
# add version to db if not already existing
kw_version['version'] = v
files = list_drs_files(inst+"/"+v)
kw_version['path'] = tree_path("/".join([inst,v,files[0]]))
#print(kw_version.items())
v_obj,new = insert_unique(db, Version, **kw_version)
print(v)
print(v_obj.id,new)
if v_obj.filenames==[]:
rows=[]
for f in files:
checksum=check_hash(v_obj.path+"/"+f,'md5')
rows.append(dict(filename=f, md5=checksum, version_id=v_obj.id))
add_bulk_items(db, VersionFile, rows)
else:
kw_files['version_id']=v_obj.id
for f in files:
kw_files['filename']=f
kw_files['md5']=check_hash(v_obj.path+"/"+f,'md5')
insert_unique(db, VersionFile, **kw_files)
# need to have function to map bits of path to db instance fields!!
#model,experiment,variable,mip,ensemble
#kwargs[k]=
| {
"repo_name": "coecms/ARCCSSive",
"path": "database_updates/upload_drstree.py",
"copies": "1",
"size": "3256",
"license": "apache-2.0",
"hash": -1362004406274418000,
"line_mean": 41.8421052632,
"line_max": 114,
"alpha_frac": 0.640970516,
"autogenerated": false,
"ratio": 3.621802002224694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4762772518224694,
"avg_score": null,
"num_lines": null
} |
'''A terminal client for sillygo.
'''
from . import game
import sys
import re
import itertools as it
class Turn:
'''Base class for a player's turn, which may be
{Move, Pass, Resign}.
'''
def __init__(self, player):
self.player = player
class Move(Turn):
def __init__(self, player, position):
super().__init__(player)
self.position = position
class Pass(Turn):
def __init__(self, player):
super().__init__(player)
class Resign(Turn):
def __init__(self, player):
super().__init__(player)
class TerminalPlayer:
MOVE_PATTERN = re.compile('([a-z]+)[^a-z0-9]*([0-9]+)')
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
def __init__(self, player):
self.player = player
def __call__(self, board):
while True:
sys.stderr.write('Your move (%s):%s\n' % (self.player.name, board))
line = sys.stdin.readline().strip().lower()
if line == 'pass':
return Pass(self.player)
elif line == 'resign':
return Resign(self.player)
else:
m = self.MOVE_PATTERN.match(line)
if m is not None:
x = self.ALPHABET.index(m.group(1))
y = int(m.group(2)) - 1
return Move(self.player, (x, y))
else:
sys.stderr.write('Error! unrecognized move "%s", '
'should be something like "C7", '
'"pass" or "resign"\n' % line)
def play_move(board, player):
while True:
turn = player(board)
if isinstance(turn, Resign):
return board, turn
if isinstance(turn, Pass):
return board, turn
if isinstance(turn, Move):
result = board.move(player.player, turn.position)
if isinstance(result, game.Board):
return result, turn
class Result:
pass
class PointsResult(Result):
def __init__(self, black_points, white_points, handicap):
self.black_points = black_points
self.white_points = white_points
self.handicap = handicap
@property
def winner(self):
if self.black_points < self.white_points + self.handicap:
return game.State.white
else:
return game.State.black
class Resignation(Result):
def __init__(self, resigned_player):
self.resigned_player = resigned_player
@property
def winner(self):
return self.resigned_player.enemy()
def get_result(board, scoring, handicap):
territory = board.territory(scoring)
return PointsResult(territory.score(game.State.black),
territory.score(game.State.white),
handicap)
def play_game():
scoring = game.Scoring.area
handicap = 3.5
board_size = 9
board = game.Board.empty(board_size)
player_black = TerminalPlayer(game.State.black)
player_white = TerminalPlayer(game.State.white)
last_pass = False
for player in it.cycle([player_black, player_white]):
board, turn = play_move(board, player)
if isinstance(turn, Pass):
if last_pass:
return get_result(board, scoring, handicap)
last_pass = True
elif isinstance(turn, Resign):
return Resignation(player.player)
else:
last_pass = False
if __name__ == '__main__':
play_game()
| {
"repo_name": "DouglasOrr/Snippets",
"path": "sillygo/sillygo/terminal.py",
"copies": "1",
"size": "3511",
"license": "mit",
"hash": -6208216835398346000,
"line_mean": 26.0076923077,
"line_max": 79,
"alpha_frac": 0.5545428653,
"autogenerated": false,
"ratio": 3.8582417582417583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9912784623541758,
"avg_score": 0,
"num_lines": 130
} |
# A test for MixerDLL
#
# By nitzel
#
# You might want to configure the dll-path in line 16
#
import mixer_dll
import TimerControl # timed callbacks
import os # OS-check
import copy
if os.name != 'nt':
print "Need to be run on windows"
exit()
mixerdll = mixer_dll.get_mixer_dll("../MixerDLL/Release/mixer.dll")
from Tkinter import *
mixerdll.initAudio();
print "Mixerdll v. "+mixerdll.version()
mixerdll.printAudioInfo()
master = None
scaler = []
def sendToMC(data):
print "sending to mc"
for c in data:
print c, bin(c)
print "done"
def updateMC(id):
pass
def sliderChanged(id, value): # callback whenever a slider has changed
global mixerdll, scaler
value = float(value)
if scaler[id][1] != value:
scaler[id][1] = value
try:
mixerdll.setSessionVolume(id, value)
except: # catch some 0pointer erros, dont know where thy come from
print "error set", mixerdll.getErrorMessage()
pass
def volumeChanged():
global mixerdll, scaler
if mixerdll.getSessionCount()+1 != len(scaler):
initSliders()
else:
for s in scaler:
try:
if s[3]!=mixerdll.getSessionTitle(s[2]): # name changed
initSliders()
except:
print "error volume changed getsestitle",s
for s in scaler:
try:
vol = mixerdll.getSessionVolume(s[2])
if s[1] != vol:
s[1] = vol
s[0].set(s[1])
except: # catch some 0pointer erros, dont know where thy come from
print "error get", mixerdll.getErrorMessage()
def initSliders():
global scaler, master
newScaler = []
# init sliders (for each audio session (master volume(-1) inclusive))
for i in range(-1,mixerdll.getSessionCount()):
print "session",i,"/",mixerdll.getSessionCount()
m = int(copy.deepcopy(str(i)))# to loose reference to i
s = [Scale(master, from_=1, to=0, resolution=0.01, command=eval("lambda v: sliderChanged("+str(i)+",v)"), label=mixerdll.getSessionTitle(i)),mixerdll.getSessionVolume(i), i, mixerdll.getSessionTitle(i)] # create (slider, volume, id) obj
print "["+str(mixerdll.getSessionTitle(i))+"]"
s[0].set(s[1]) # udpate slider
newScaler.append(s) # put obj in list
if mixerdll.getSessionTitle(i)!=None:
s[0].pack(side=LEFT) # use slider in window
for s in newScaler:
print s
for s in scaler:
s[0].destroy()
scaler = newScaler
try:
# init window
master = Tk()
master.title("Testing MixerDLL - Juppiduppi")
Label(master, text="Using MixerDLL v."+mixerdll.version()).pack()
initSliders()
# TimerControl for timed polling
timemgr = TimerControl.TimerControl(0.01, [TimerControl.Timer(volumeChanged,0.01,True,True,None)],True)
# Tkinter.mainloop
mainloop() # blocking, runs until window closes
finally: # shutdown
print "\n\nExit & Shutdown"
timemgr.stop()
mixerdll.exitAudio()
| {
"repo_name": "nitzel/MixerDLL",
"path": "PythonExamples/test.py",
"copies": "1",
"size": "2763",
"license": "unlicense",
"hash": 6618153901830990000,
"line_mean": 24.8224299065,
"line_max": 238,
"alpha_frac": 0.6894679696,
"autogenerated": false,
"ratio": 2.8513931888544892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8471811075027715,
"avg_score": 0.11381001668535458,
"num_lines": 107
} |
# A test game
from Satay.HTTPGame.Game import Map, Item, NPC, HTTPGame as GameMode
from Satay.Base import NumeratedList, Dynamic, DialogMap, Dialog, Response, Action, Condition, Event
from HTTPCommands.basic import go
objects = {
"mPuddle":Map(
name="A Puddle",
desc="You see a nice puddle here.",
imgsrc="/assets/img/puddle.png",
itemlist=NumeratedList(
iEmerald=1,
),
forward=Dynamic(
"mHeree",
iPebble="mHouse",
),
),
"mHeree":Map(
name="Heree",
desc="At the Wall.",
imgsrc="/assets/img/wall.png",
itemlist=NumeratedList(
),
backward="mPuddle",
right="mHouse",
),
"mHouse":Map(
name="A House",
desc="A ramshackle old house.",
imgsrc="/assets/img/house.png",
itemlist=NumeratedList(
iStone=3,
),
left="mHeree",
),
"iTem":Item(
name="Item",
desc="A wonderous item for you.",
),
"iPebble":Item(
name="Pebble",
desc="A small gray pebble.",
),
"iStone":Item(
name="Stone",
desc="A boring gray stone.",
),
"iSword":Item(
name="Sword",
desc="A sharp, shiny sword.",
),
"iFork":Item(
name="Fork",
desc="A shiny, silver fork. It's quite pointy!",
),
"iChickenFork":Item(
name="Chicken Stuck to Fork",
desc="Some awful chicken glued to a now dirty fork.",
),
"iEmerald":Item(
name="Emerald",
desc="A green and valuable emerald.",
),
"iChicken":Item(
name="Chicken",
desc="Some juicy, cooked chicken.",
),
}
settings = {
"start":"mPuddle",
"title":"A Web Game",
"author":"Andy Brennan",
"items":NumeratedList(
iTem=3,
iSword=1,
iFork=1,
),
"objects":objects,
"commands":[go],
"variables":{
}
}
# Start game immediately
if __name__ == "__main__":
GameMode.Run("./webroot", settings)
| {
"repo_name": "Valdez42/Satay",
"path": "httpgame.py",
"copies": "1",
"size": "2076",
"license": "mit",
"hash": 3462000343207408600,
"line_mean": 19.9696969697,
"line_max": 100,
"alpha_frac": 0.5207129094,
"autogenerated": false,
"ratio": 3.2236024844720497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9199939814210907,
"avg_score": 0.008875115932228693,
"num_lines": 99
} |
# A test game
from Satay.PorkStyleTextGame.Game import Map, Item, NPC, PorkStyleTextGame as GameMode
from Satay.Base import NumeratedList, Dynamic, DialogMap, Dialog, Response, Action, Condition, Event
from Commands.kill import kill, murder
from Commands.basic import look, go, get, take, drop, inventory, inv, i, save, load, quit
from Commands.talk import talk
from Commands.eat import eat
objects = {
"mPuddle":Map(
name="A Puddle",
desc="You see a nice puddle here.",
nbase="puddle",
descriptors=["wet","nice","pleasant"],
itemlist=NumeratedList(
iTem=3,
iStone=2,
nMan=1,
),
s="mHeree",
events=[
Event(
Condition.History.Happened("go").With("mPuddle"),
Action("Print")("Now leaving the puddle...")
),
],
),
"mHeree":Map(
name="Heree",
desc="At the Wall.",
nbase="heree",
descriptors=[],
n="mPuddle",
itemlist=NumeratedList(
iStone=2,
iSword=1,
),
),
"iTem":Item(
name="Item",
desc="A wonderous item for you.",
nbase="item",
descriptors=['wonderous'],
kill_msg=Dynamic(
"My item! NOESSS!!!",
iSword="You killed my item with a sword!?!?",
),
kill_newitem=Dynamic(
"iStone",
iSword="iPebble"
),
),
"iPebble":Item(
name="Pebble",
desc="A small gray pebble.",
nbase="pebble",
descriptors=["small", "gray"],
),
"iStone":Item(
name="Stone",
desc="A boring gray stone.",
nbase="stone",
descriptors=['boring','gray'],
),
"iSword":Item(
name="Sword",
desc="A sharp, shiny sword.",
nbase="sword",
descriptors=['sharp','shiny'],
),
"iFork":Item(
name="Fork",
desc="A shiny, silver fork. It's quite pointy!",
nbase="fork",
descriptors=['shiny', 'silver', 'pointy'],
events=[
Event(
Condition.History.Happened("eat").With("iFork"),
Action("Replace")("iFork","iChickenFork")
),
],
),
"iChickenFork":Item(
name="Chicken Stuck to Fork",
desc="Some awful chicken glued to a now dirty fork.",
nbase="fork",
descriptors=['dirty', 'pointy', 'awful'],
),
"iEmerald":Item(
name="Emerald",
desc="A green and valuable emerald.",
nbase="emerald",
descriptors=['green', 'valuable'],
),
"iChicken":Item(
name="Chicken",
desc="Some juicy, cooked chicken.",
nbase="chicken",
descriptors=['juicy', 'cooked'],
eat_edible=True,
eat_message=Dynamic(
"Gah! This chicken is awful.",
iFork="Ewww. It seems glued to the fork now!",
),
),
"nMan":NPC(
name="A Man",
desc="An old, aging man.",
nbase="man",
descriptors=['old','aging'],
dialog=DialogMap(
start=Dialog(
"Hey there.",
Response(
"Hey.",
'a0',
Condition.History.Happened("talk").To("nMan"),
),
Response("Sup.",'a0'),
Response(
"I killed the item with my sword.",
'e2',
Condition.History.Happened("kill").To("iTem"),
Condition.History.Happened("kill").With("iSword")
),
),
a0=Dialog(
"Want some chicken?",
Response("I like chicken.","a1"),
Response("I hate chicken.","a2"),
Response(
"That chicken sucked last time.",
"a2",
Condition.History.Happened("talk").To("nMan"),
Condition.History.Happened("eat").To("iChicken"),
),
Response(
"I already have some.",
"a2",
Condition("inventory").Contains("iChicken"),
),
),
a1=Dialog(
"Nice! So do I. Have some!",
Response("Bye", "e1"),
action=[
Action("AddToInventory")("iChicken"),
],
),
a2=Dialog(
"Aw, dang.",
Response("Bye", "e1"),
),
e2=Dialog(
"Excellent! Take this emerald...",
action=Action("AddToInventory")("iEmerald"),
end=True,
),
e1=Dialog(
"Good bye, then.",
end=True,
),
)
)
}
settings = {
"start":"mPuddle",
"title":"A Game",
"author":"Andy Brennan",
"enableScopeChecking":True,
"items":NumeratedList(
iTem=3,
iSword=1,
iFork=1,
),
"objects":objects,
"commands":[kill, murder, talk, look, go, get, take, drop, inventory, inv, i, save, load, quit, eat],
"variables":{
}
}
# Start game immediately
if __name__ == "__main__":
aGame = GameMode(settings)
aGame.Run()
| {
"repo_name": "Valdez42/Satay",
"path": "agame.py",
"copies": "1",
"size": "5394",
"license": "mit",
"hash": -5041521262312477000,
"line_mean": 26.3807106599,
"line_max": 105,
"alpha_frac": 0.4584723767,
"autogenerated": false,
"ratio": 3.756267409470752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4714739786170752,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import tensorflow as tf
N_CLASSES = 9
def _conv1d(x, W, b, is_training=None, name='conv', strides=1):
''' Conv1D wrapper'''
# input tensor x: [batch, in_eight=1, in_width, in_channels]
# kernel tensor W: [filter_height=0, filter_width, in_channels, out_channels]
x = tf.nn.conv2d(x, W, strides=[1, 1, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
if is_training is not None:
x = _batch_norm(x, is_training)
return tf.nn.relu(x, name=name)
def _conv1d_depthwise(x, W, b, is_training=None, name='conv_depthwise', strides=1):
''' '''
x = tf.nn.depthwise_conv2d(x,W, strides=[1,1,strides,1], padding='SAME')
x = tf.nn.bias_add(x, b)
if is_training is not None:
x = _batch_norm(x, is_training)
return tf.nn.relu(x, name=name)
def _pool1d(x, k=2, strides=2, pool_type='max'):
'''Pool1D wrapper'''
pool_dict = {'ksize':[1, 1, k, 1], 'strides':[1, 1, strides, 1], 'padding':'SAME'}
if pool_type == 'max':
return tf.nn.max_pool(x, **pool_dict)
elif pool_type == 'average':
return tf.nn.avg_pool(x, **pool_dict)
def _batch_norm(x, is_training, reuse=False):
'''See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/layers.py#L110 '''
return tf.contrib.layers.batch_norm(x,
is_training=is_training,
decay=0.9,
center=True,
scale=True,
activation_fn=None,
updates_collections=None,
reuse=reuse,
trainable=True,
scope='batch_norm')
def _inception_module(x, is_training, dropout_conv=1, params={}, module_position=0):
'''See https://arxiv.org/pdf/1409.4842v1.pdf'''
#[batch, in_eight=1, in_width, in_channels]
x_shape = x.get_shape().as_list()
# PATH 1: gate
with tf.variable_scope('gate') as scope:
out_channels, strides = _get_params_inception(params, scope.name, module_position=module_position)
std = 1.0 / np.sqrt(x_shape[3]*x_shape[2])
kernel = tf.Variable(tf.truncated_normal([1, 1, x_shape[3], out_channels], stddev=std),name='kernel')
biases = tf.Variable(tf.zeros([out_channels]))
x1 = _conv1d(x, kernel, biases, is_training=is_training, strides=strides)
# PATH 2: gate + short filter conv (1x3)
with tf.variable_scope('short_conv') as scope:
out_channels_gate, strides_gate, out_channels_sc, strides_sc = _get_params_inception(params,
scope.name,
module_position=module_position)
# Gate
std = 1.0 / np.sqrt(x_shape[3]*x_shape[2])
kernel = tf.Variable(tf.truncated_normal([1, 1, x_shape[3], out_channels_gate], stddev=std),name='kernel')
biases = tf.Variable(tf.zeros([out_channels_gate]))
x2_gate = _conv1d(x, kernel, biases, strides=strides_gate)
# Short conv
std = 1.0 / np.sqrt(x2_gate.get_shape().as_list()[3]*x2_gate.get_shape().as_list()[2])
kernel = tf.Variable(tf.truncated_normal([1, 3, x2_gate.get_shape().as_list()[3], out_channels_sc],
stddev=std),name='kernel')
biases = tf.Variable(tf.zeros([out_channels_sc]))
x2 = _conv1d(x2_gate, kernel, biases, is_training=is_training, strides=strides_sc)
# PATH 3: gate + long filter conv (1x5)
with tf.variable_scope('long_conv') as scope:
out_channels_gate, strides_gate, out_channels_lc, strides_lc = _get_params_inception(params,
scope.name,
module_position=module_position)
# Gate
std = 1.0 / np.sqrt(x_shape[3]*x_shape[2])
kernel = tf.Variable(tf.truncated_normal([1, 1, x_shape[3], out_channels_gate],
stddev=std),name='kernel')
biases = tf.Variable(tf.zeros([out_channels_gate]))
x3_gate = _conv1d(x, kernel, biases, strides=strides_gate)
# Long conv
std = 1.0 / np.sqrt(x3_gate.get_shape().as_list()[3]*x3_gate.get_shape().as_list()[2])
kernel = tf.Variable(tf.truncated_normal([1, 5, x3_gate.get_shape().as_list()[3],
out_channels_lc],
stddev=std),name='kernel')
biases = tf.Variable(tf.zeros([out_channels_lc]))
x3 = _conv1d(x3_gate, kernel, biases, is_training=is_training, strides=strides_lc)
# PATH 4 pooling + gate:
with tf.variable_scope('pooling') as scope:
k, out_channels, strides = _get_params_inception(params, scope.name, module_position=module_position)
# Pooling
x4_pool = _pool1d(x,k=k,strides=1)
# Gate
std = 1.0 / np.sqrt(x4_pool.get_shape().as_list()[3]*x4_pool.get_shape().as_list()[2])
kernel = tf.Variable(tf.truncated_normal([1, 1, x4_pool.get_shape().as_list()[3], out_channels],
stddev=std),name='kernel')
biases = tf.Variable(tf.zeros([out_channels]))
x4 = _conv1d(x4_pool, kernel, biases, is_training=is_training, strides=strides)
# CONCATENATE in depth
with tf.variable_scope('concat') as scope:
concat = tf.concat(3,[x1,x2,x3,x4])
return concat
def _reshape_Conv_to_FullyConnected(conv):
'''Reshape output of convolution layer for full connection'''
shape = conv.get_shape().as_list()
dim = shape[1]*shape[2]*shape[3]
return tf.reshape(conv, [-1, dim])
def _linear_activation(x, W, b, is_training=None, keep_prob=1, name=None):
'''Linear layer with ReLU activation and dropout.'''
l = tf.add(tf.matmul(x, W), b)
if is_training is not None:
l = _batch_norm(l,is_training)
l = tf.nn.relu(l,name=name)
return tf.nn.dropout(l, keep_prob)
def _get_params_inception(params, path, module_position=0):
''''''
i = module_position
if i > 2: i = -1
if 'gate' in path:
out_channels = params.get('gate_out_channels', [14,21,21][i])
strides = params.get('gate_strides',1)
return out_channels, strides
if 'short_conv' in path:
out_channels_gate = params.get('short_conv_out_channels_gate', [14,21,21][i])
strides_gate = params.get('short_conv_strides_gate',1)
out_channels_sc = params.get('short_conv_out_channels_sc', [21,28,28][i])
strides_sc = params.get('short_conv_strides_sc',1)
return out_channels_gate, strides_gate, out_channels_sc, strides_sc
if 'long_conv' in path:
out_channels_gate = params.get('long_conv_out_channels_gate', [14,21,21][i])
strides_gate = params.get('long_conv_strides_gate',1)
out_channels_lc = params.get('long_conv_out_channels_lc', [21,28,28][i])
strides_lc = params.get('long_conv_strides_lc',1)
return out_channels_gate, strides_gate, out_channels_lc, strides_lc
if 'pooling' in path:
k = params.get('pooling_k', 3)
out_channels = params.get('pooling_out_channels_gate', [14,21,28][i])
strides = params.get('pooling_strides_gate',1)
return k, out_channels, strides
else:
raise ValueError('Inception path %s invalid' % path)
def inception_net(x, dropout_fc, is_training, dropout_conv=1, clip_norm=1e-1):
''' '''
# DEPTHWISE CONV (NOT IN USE!!!!!!!)
with tf.variable_scope('depthwise') as scope:
channel_multiplier = 2
xshape = x.get_shape().as_list()
std = 1.0 / np.sqrt(xshape[2]*xshape[3])
kernel = tf.Variable(tf.truncated_normal([1, 1, xshape[3], channel_multiplier],
stddev=std),name='kernel')
biases = tf.Variable(tf.zeros([xshape[3]*channel_multiplier]))
xdw = _conv1d_depthwise(x, kernel, biases, is_training=is_training, strides=1)
xdw = tf.nn.dropout(xdw, dropout_conv)
# INCEPTION 1
with tf.variable_scope('inception1') as scope:
incept1 = _inception_module(x, is_training, dropout_conv=dropout_conv, module_position=0)
# POOLING 1
pool1 = _pool1d(incept1, k=2, strides=2)
pool1 = tf.nn.dropout(pool1, dropout_conv)
# INCEPTION 2
with tf.variable_scope('inception2') as scope:
incept2 = _inception_module(pool1, is_training,dropout_conv=dropout_conv, module_position=1)
incept2 = tf.nn.dropout(incept2, dropout_conv)
# INCEPTION 3
with tf.variable_scope('inception3') as scope:
incept3 = _inception_module(incept2, is_training, dropout_conv=dropout_conv, module_position=2)
# POOLING 3
pool3 = _pool1d(incept3, k=2, strides=2)
pool3 = tf.nn.dropout(pool3, dropout_conv)
# FLATTEN
flatten = _reshape_Conv_to_FullyConnected(pool3)
# FULLY CONNECTED 1
with tf.variable_scope('fc1') as scope:
n1 = 140 #112
# Weighting and activation (with dropout)
dim = flatten.get_shape()[1].value
#print('Number of inputs to 1st fully connected layer: %d' % dim)
weights = tf.Variable(tf.truncated_normal([dim,n1], stddev=1e-3))
weights_rn = tf.clip_by_norm(weights, clip_norm, axes=[0,1], name='clip1')
biases = tf.Variable(tf.fill([n1],0.1))
fc1 = _linear_activation(flatten, weights_rn, biases, is_training=is_training, keep_prob=dropout_fc, name=scope.name)
# FULLY CONNECTED 2
with tf.variable_scope('fc2') as scope:
n2 = 70 #56
weights = tf.Variable(tf.truncated_normal([n1,n2], stddev=1e-3))
weights_rn = tf.clip_by_norm(weights, clip_norm, axes=[0,1], name='clip2')
biases = tf.Variable(tf.fill([n2],0.1))
fc2 = _linear_activation(fc1, weights_rn, biases, is_training=is_training, keep_prob=dropout_fc, name=scope.name)
# OUTPUT
with tf.variable_scope('output') as scope:
weights = tf.Variable(tf.truncated_normal([n2,N_CLASSES],stddev=1e-3))
biases = tf.Variable(tf.fill([N_CLASSES],0.1))
output = tf.add(tf.matmul(fc2, weights), biases)
return output
#norm = tf.sqrt(tf.reduce_sum(tf.square(weights), 1)
#weights_renormed = weights * tf.expand_dims(clip_norm / tf.maximum(clip_norm, norms), 1)
| {
"repo_name": "seg/2016-ml-contest",
"path": "itwm/ConvNet.py",
"copies": "1",
"size": "11408",
"license": "apache-2.0",
"hash": 3764891055311675400,
"line_mean": 42.5419847328,
"line_max": 125,
"alpha_frac": 0.5626753156,
"autogenerated": false,
"ratio": 3.372154892107597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9230241207772345,
"avg_score": 0.04091779998705024,
"num_lines": 262
} |
"""A test implementation of a PEG program builder in Python.
The PEG matcher needs to be in C for speed, but the instruction stream can be
built in Python and comprezssed into a C-style structure at the last minute,
just before matching. This allows for higher-level access to the instructions,
and hence more flexibility in experimenting with and implementing operations
and optimisations.
"""
# Opcodes
Opcodes = [
"IAny", "IChar", "ISet", "ISpan",
"IRet", "IEnd",
"IChoice", "IJmp", "ICall", "IOpenCall",
"ICommit", "IPartialCommit", "IBackCommit", "IFailTwice", "IFail",
"IGiveup", "IFunc",
"IFullCapture", "IEmptyCapture", "IEmptyCaptureIdx",
"IOpenCapture", "ICloseCapture", "ICloseRunTime"
]
class Op(object):
pass
Op = Op()
for n, op in enumerate(Opcodes):
setattr(Op, op, n)
class Instruction(object):
def __init__(self, op, aux = None, offset = 0):
self.op = op
self.aux = aux
self.offset = offset
def __str__(self):
return "%s(%s,%s)" % (Opcodes[self.op], self.aux, self.offset)
def Fail():
return [Instruction(Op.IFail)];
def Succeed():
return []
def Any(n):
# Breaking up into 255-chunks is implementation dependent. Maybe leave
# this until converting to final form???
if n == 0:
return []
if n > 0:
ret = []
while (n > 255):
ret.append(Instruction(Op.IAny, 255))
n -= 255
ret.append(Instruction(Op.IAny, n))
return ret
else:
ret = []
return [Instruction(Op.IAny, n)]
def Str(s):
return [Instruction(Op.IChar, c) for c in s]
| {
"repo_name": "moreati/ppeg",
"path": "PythonImpl.py",
"copies": "1",
"size": "1677",
"license": "mit",
"hash": -7725728969135295000,
"line_mean": 26.4237288136,
"line_max": 78,
"alpha_frac": 0.6064400716,
"autogenerated": false,
"ratio": 3.3406374501992033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9394815693142688,
"avg_score": 0.010452365731303,
"num_lines": 59
} |
"""A testing suite for trie data structure."""
from trie import TrieTree
import pytest
@pytest.fixture
def empty_trie():
"""And empty TrieTree."""
return TrieTree()
@pytest.fixture
def three_word_trie_no_overlap():
"""A trie with three words that do not overlap."""
new_trie = TrieTree()
new_trie.insert('cat')
new_trie.insert('bear')
new_trie.insert('mouse')
return new_trie
@pytest.fixture
def three_word_trie_with_overlap():
"""A trie with three words that will overlap."""
new_trie = TrieTree()
new_trie.insert('cake')
new_trie.insert('car')
new_trie.insert('carpet')
return new_trie
@pytest.fixture
def six_word_trie_with_some_overlap():
"""A trie with six words that will have some overlap."""
new_trie = TrieTree()
new_trie.insert('cake')
new_trie.insert('car')
new_trie.insert('carpet')
new_trie.insert('rats')
new_trie.insert('ratchet')
new_trie.insert('knife')
return new_trie
@pytest.fixture
def twelve_word_trie_with_some_overlap():
"""A trie with twelve words that will have some overlap."""
new_trie = TrieTree()
new_trie.insert('garden')
new_trie.insert('gardener')
new_trie.insert('bottle')
new_trie.insert('computer')
new_trie.insert('battle')
new_trie.insert('motorcycle')
new_trie.insert('motley')
new_trie.insert('dagger')
new_trie.insert('hatchet')
new_trie.insert('hatch')
new_trie.insert('alphabet')
new_trie.insert('Seattle')
return new_trie
def test_insert_repeat_val(three_word_trie_with_overlap):
"""Test an appropriate error is raised when duplicate value inserted."""
with pytest.raises(ValueError):
three_word_trie_with_overlap.insert('cake')
def test_empty_trie_insert(empty_trie):
"""Test insert on an empty trie."""
empty_trie.insert('pie')
empty_trie.insert('cake')
assert empty_trie.size() == 2
assert empty_trie.contains('cake') is True
assert empty_trie.contains('pie') is True
def test_insert_with_overlap(twelve_word_trie_with_some_overlap):
"""Test insert correctly handles overlap."""
twwo = twelve_word_trie_with_some_overlap
twwo.insert('competition')
twwo.insert('hacker')
assert twwo.size() == 14
assert twwo.contains('competition') is True
assert twwo.contains('hacker') is True
assert twwo.contains('hatchet') is True
assert twwo.contains('computer') is True
def test_contains_returns_true_when_true(twelve_word_trie_with_some_overlap):
"""Test the contain method works correctly when word is in TrieTree."""
twwo = twelve_word_trie_with_some_overlap
assert twwo.contains('hatch') is True
assert twwo.contains('dagger') is True
assert twwo.contains('Seattle') is True
def test_contains_returns_false_when_false(six_word_trie_with_some_overlap):
"""Test the contain method works correctly when word isn't in TrieTree."""
six = six_word_trie_with_some_overlap
assert six.contains('batch') is False
assert six.contains('baker') is False
assert six.contains('portland') is False
def test_size_of_empty_trie(empty_trie):
"""Test empty trie returns size of zero."""
assert empty_trie.size() == 0
def test_size_on_twelve_trie(twelve_word_trie_with_some_overlap):
"""Test the size function on twelve word trie returns twelve."""
assert twelve_word_trie_with_some_overlap.size() == 12
def test_remove_correctly_removes(three_word_trie_no_overlap):
"""Test that a word is correctly removed from trie."""
three_word_trie_no_overlap.remove('cat')
assert three_word_trie_no_overlap.size() == 2
assert three_word_trie_no_overlap.contains('cat') is False
def test_remove_multiple_words(twelve_word_trie_with_some_overlap):
"""Test the removal of three words from twelve word TrieTree."""
twwo = twelve_word_trie_with_some_overlap
twwo.remove('Seattle')
assert twwo.size() == 11
assert twwo.contains('Seattle') is False
twwo.remove('dagger')
assert twwo.size() == 10
assert twwo.contains('dagger') is False
twwo.remove('gardener')
assert twwo.size() == 9
assert twwo.contains('gardener') is False
assert twwo.contains('garden') is True
def test_remove_of_parent_word(twelve_word_trie_with_some_overlap):
"""Test that removing a parent word leaves the child."""
twwo = twelve_word_trie_with_some_overlap
twwo.remove('garden')
assert twwo.size() == 11
assert twwo.contains('garden') is False
assert twwo.contains('gardener') is True
def test_contains_case_sensative(twelve_word_trie_with_some_overlap):
"""Test that the tree is case sensative."""
twwo = twelve_word_trie_with_some_overlap
assert twwo.contains('seattle') is False
assert twwo.contains('Seattle') is True
twwo.insert('seattle')
assert twwo.contains('seattle') is True
assert twwo.contains('Seattle') is True
def test_remove_string_not_in_tree(six_word_trie_with_some_overlap):
"""Test removing a string not in the tree raises error."""
six = six_word_trie_with_some_overlap
with pytest.raises(ValueError):
six.remove('bananza')
def test_non_string_insert(empty_trie):
"""Test that an error is raised if you try to insert a non string."""
with pytest.raises(TypeError):
empty_trie.insert(1)
def test_non_string_remove(empty_trie):
"""Test that an error is raised if you try to remove a non string."""
with pytest.raises(TypeError):
empty_trie.remove((3, 'word', 'pickles'))
def test_non_string_contains(empty_trie):
"""Test that an error is raised if you try to check for a non string."""
with pytest.raises(TypeError):
empty_trie.contains([{}, {}, {}])
def test_depth_traversal_from_root(twelve_word_trie_with_some_overlap):
"""Test full word depth traversal of twelve word tree."""
twwo = twelve_word_trie_with_some_overlap.depth_traversal('b')
output = []
for i in range(11):
output.append(next(twwo))
expected = ['b', 'o', 't', 't', 'l', 'e', 'a', 't', 't', 'l', 'e']
for i in output:
assert i in expected
def test_depth_traversal_from_deeper_node(six_word_trie_with_some_overlap):
"""Test a traversal with a larger prefix."""
six = six_word_trie_with_some_overlap
six.insert('carpenter')
six.insert('carp')
six = six.depth_traversal('carp')
output = [next(six) for i in range(7)]
expected = ['carp', 'e', 't', 'n', 't', 'e', 'r']
for i in output:
assert i in expected
def test_depth_traversal_non_prefix(six_word_trie_with_some_overlap):
"""Test error is raised if start is not a prefix in the tree."""
six = six_word_trie_with_some_overlap.depth_traversal('a')
with pytest.raises(ValueError):
next(six)
def test_depth_traversal_non_end_of_prefix(twelve_word_trie_with_some_overlap):
"""Test error is raised when later parts of start are not in tree."""
twwo = twelve_word_trie_with_some_overlap.depth_traversal('gr')
with pytest.raises(ValueError):
next(twwo)
def test_depth_traversal_bad_type(three_word_trie_no_overlap):
"""Test error is raised if start is not a string."""
three = three_word_trie_no_overlap.depth_traversal(1)
with pytest.raises(TypeError):
next(three)
| {
"repo_name": "CaHudson94/data-structures",
"path": "src/working/test_trie.py",
"copies": "1",
"size": "7330",
"license": "mit",
"hash": -1627935082516621600,
"line_mean": 31.7232142857,
"line_max": 79,
"alpha_frac": 0.6750341064,
"autogenerated": false,
"ratio": 3.1608451918930576,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4335879298293057,
"avg_score": null,
"num_lines": null
} |
'a test module'
__author__ = 'zhayangtao'
import sys
def test():
args = sys.argv
if len(args) == 1:
print('Hello, world!')
elif len(args) == 2:
print('Hello, $s!' % args[1])
else:
print('Too many arguments')
if __name__ == '__main__':
test()
"""
Module documentation
Words Go Here
"""
spam = 40
def square(x):
"""
function documentation
can we have your liver then?
"""
return x ** 2
class Employee:
"class documentation"
pass
print(square(4))
print(square.__doc__)
def makeActions():
acts = []
for i in range(5):
acts.append(lambda x: i ** x)
return acts
def makeActions2():
acts = []
for i in range(5):
acts.append(lambda x, i=i: i ** x)
return acts
def tester(start):
state = start
def nested(label):
nonlocal state
print(label, state)
state += 1
return nested
def tester(start):
state = start
def nested(label):
nonlocal state
print(label, state)
state += 1
return nested
# 递归
def mysum(L):
if not L:
return 0
return nonempty(L)
def nonempty(L):
return L[0] + mysum(L[1:])
def func(a: 'spam', b: (1, 10), c: float) -> int:
return a + b + c
seplen = 60
sepchr = '_'
def listing(module, verbose=True):
sepline = sepchr * seplen
if verbose:
print(sepline)
print('name:', module.__name__, 'file:', module.__file)
print(sepline)
count = 0
for attr in module.__dict__:
print('%02d) %s' % (count, attr), end=' ')
if attr.startswith('__'):
print('<bulit-in name')
else:
print(getattr(module, attr))
count += 1
| {
"repo_name": "zhayangtao/HelloPython",
"path": "python01/PythonModule.py",
"copies": "1",
"size": "1745",
"license": "apache-2.0",
"hash": 5411620409957738000,
"line_mean": 14.9724770642,
"line_max": 63,
"alpha_frac": 0.5336013785,
"autogenerated": false,
"ratio": 3.38715953307393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.442076091157393,
"avg_score": null,
"num_lines": null
} |
# A Test Program for pipeTestService.py
#
# Install and start the Pipe Test service, then run this test
# either from the same machine, or from another using the "-s" param.
#
# Eg: pipeTestServiceClient.py -s server_name Hi There
# Should work.
from win32pipe import *
from win32file import *
from win32event import *
import pywintypes
import win32api
import winerror
import sys, os, traceback
verbose = 0
#def ReadFromPipe(pipeName):
# Could (Should?) use CallNamedPipe, but this technique allows variable size
# messages (whereas you must supply a buffer size for CallNamedPipe!
# hPipe = CreateFile(pipeName, GENERIC_WRITE, 0, None, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0)
# more = 1
# while more:
# hr = ReadFile(hPipe, 256)
# if hr==0:
# more = 0
# except win32api.error (hr, fn, desc):
# if hr==winerror.ERROR_MORE_DATA:
# data = dat
#
def CallPipe(fn, args):
ret = None
retryCount = 0
while retryCount < 8: # Keep looping until user cancels.
retryCount = retryCount + 1
try:
return apply(fn, args)
except win32api.error, (rc, fnerr, msg):
if rc==winerror.ERROR_PIPE_BUSY:
win32api.Sleep(5000)
continue
else:
raise win32api.error, (rc, fnerr, msg)
raise RuntimeError, "Could not make a connection to the server"
def testClient(server,msg):
if verbose:
print "Sending", msg
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 256, NMPWAIT_WAIT_FOREVER))
if verbose:
print "Server sent back '%s'" % data
print "Sent and received a message!"
def testLargeMessage(server, size = 4096):
if verbose:
print "Sending message of size %d" % (size)
msg = "*" * size
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 512, NMPWAIT_WAIT_FOREVER))
if len(data)-size:
print "Sizes are all wrong - send %d, got back %d" % (size, len(data))
def stressThread(server, numMessages, wait):
try:
try:
for i in xrange(numMessages):
r = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, "#" * 512, 1024, NMPWAIT_WAIT_FOREVER))
except:
traceback.print_exc()
print "Failed after %d messages" % i
finally:
SetEvent(wait)
def stressTestClient(server, numThreads, numMessages):
import thread
thread_waits = []
for t_num in xrange(numThreads):
# Note I could just wait on thread handles (after calling DuplicateHandle)
# See the service itself for an example of waiting for the clients...
wait = CreateEvent(None, 0, 0, None)
thread_waits.append(wait)
thread.start_new_thread(stressThread, (server,numMessages, wait))
# Wait for all threads to finish.
WaitForMultipleObjects(thread_waits, 1, INFINITE)
def main():
import sys, getopt, string
server = "."
thread_count = 0
msg_count = 500
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:m:vl')
for o,a in opts:
if o=='-s':
server = a
if o=='-m':
msg_count = string.atoi(a)
if o=='-t':
thread_count = string.atoi(a)
if o=='-v':
global verbose
verbose = 1
if o=='-l':
testLargeMessage(server)
msg = string.join(args)
except getopt.error, msg:
print msg
my_name = os.path.split(sys.argv[0])[1]
print "Usage: %s [-v] [-s server] [-t thread_count=0] [-m msg_count=500] msg ..." % my_name
print " -v = verbose"
print " Specifying a value for -t will stress test using that many threads."
return
testClient(server, msg)
if thread_count > 0:
print "Spawning %d threads each sending %d messages..." % (thread_count, msg_count)
stressTestClient(server, thread_count, msg_count)
if __name__=='__main__':
main()
| {
"repo_name": "windyuuy/opera",
"path": "chromium/src/third_party/python_26/Lib/site-packages/win32/Demos/service/pipeTestServiceClient.py",
"copies": "17",
"size": "4173",
"license": "bsd-3-clause",
"hash": 6698579987453669000,
"line_mean": 33.4876033058,
"line_max": 121,
"alpha_frac": 0.5823148814,
"autogenerated": false,
"ratio": 3.6445414847161572,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A test program that generates a word document.
import sys
import os
from win32com.client import gencache
# When built with py2exe, it assumes 'Word.Application.9' is installed
# on the machine performing the build. The typelibs from the local machine
# will be used to generate makepy files, and those generated files will
# be included in the py2exe library (generally in a .zip file)
# The resulting application should run without referencing any typelibs on
# the target system.
# It will create a file:
filename = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), "output.doc"))
word = gencache.EnsureDispatch("Word.Application.9")
# For the sake of ensuring the correct module is used...
mod = sys.modules[word.__module__]
print "The module hosting the object is", mod
word.Visible = 1
doc = word.Documents.Add()
wrange = doc.Range()
for i in range(10):
wrange.InsertAfter("Hello from py2exe %d\n" % i)
doc.SaveAs(filename)
word.Quit()
print "Done - saved to", os.path.abspath(filename)
| {
"repo_name": "pupboss/xndian",
"path": "deploy/site-packages/py2exe/samples/pywin32/com_typelib/build_gen/word/docmaker.py",
"copies": "1",
"size": "1062",
"license": "mit",
"hash": -1564788382075116800,
"line_mean": 31.1875,
"line_max": 75,
"alpha_frac": 0.7146892655,
"autogenerated": false,
"ratio": 3.3821656050955413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9596854870595541,
"avg_score": 0,
"num_lines": 32
} |
# a test scene used to help develop collision code for cocos.
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q"
tags = "collision"
import cocos
from cocos.director import director
import cocos.actions as ac
import cocos.collision_model as cm
import cocos.euclid as eu
import random
# the driver parameters for the city view
# all city dimensions as integer multiples of half_street_width
half_street_width = 15
streets_per_side = 5 # first and last will be out of view
street_to_square_width_multiplier = 4
street_color = (170,170,0,255)
square_color = (120,32,120,255)
pool_car_size = 4
time_to_next_crossing = 1.0
# interesting city view quantities, derived from driver parameters and
# desired view
squares_per_side = streets_per_side - 1
street_width = 2*half_street_width
square_width = street_to_square_width_multiplier * street_width
# crossing points are at the center of street crossings, so
crossing_point_separation = square_width + 2*(half_street_width)
# we want the center of bottom left square at bottom left window, and the
# city view simetrical, so left to right in the x direction we see
# half square, [a street, a square] (repeat squares_per_side - 2 times), a
# street and half a square
view_width = ( 2*0.5*square_width + square_width*(squares_per_side -2) +
street_width*(streets_per_side-2) )
view_width = square_width * (squares_per_side -1) + street_width*(streets_per_side-2)
view_height = view_width
# the distance in one dimension from a crossing to the center of next left
# square
offset = 0.5 * street_width + 0.5 * square_width
offset = half_street_width + street_to_square_width_multiplier * half_street_width
offset = half_street_width * (street_to_square_width_multiplier + 1)
class Actor(cocos.sprite.Sprite):
def __init__(self, *args, **kwargs):
"""same params as Sprite plus kwargs
'rx', 'ry' for collision cshape
'desired_width' """
rx = kwargs.pop('rx', None)
ry = kwargs.pop('ry', None)
desired_width = kwargs.pop('desired_width', None)
super(Actor, self).__init__(*args, **kwargs)
if desired_width is None:
desired_width = self.image.width
desired_width = float(desired_width)
self.scale = float(desired_width) / self.width
if rx is None:
rx = 0.8 * desired_width / 2.0
if ry is None:
ry = 0.8 * self.image.height / self.image.width * desired_width /2.0
#self.cshape = cm.AARectShape(eu.Vector2(0.0, 0.0), rx, ry)
self.cshape = cm.CircleShape(eu.Vector2(0.0, 0.0), rx)#, ry)
def update_position(self, new_position):
assert isinstance(new_position, eu.Vector2)
self.position = new_position
self.cshape.center = new_position
class RobotCar(Actor):
"""
"""
def __init__(self):
super(RobotCar, self).__init__("circle6.png", desired_width=32)
self.e_free()
def e_free(self):
self.state = 'free'
self.color = ( 20, 120, 70)
def e_burn(self):
self.state = 'burning'
self.color = (180, 0, 0)
template_action = ac.Delay(2.0) + ac.CallFunc(self.e_free)
self.do(template_action)
def e_travel(self):
self.state = 'traveling'
def do_travel(self, initial_crossing, final_crossing):
self.e_travel()
self.color = ( 20, 120, 70)
self.next_crossing = initial_crossing
self.final_crossing = final_crossing
self.update_when_crossing_reached()
def update_when_crossing_reached(self):
# set position exactly at old crossing
ix, iy = self.next_crossing
self.update_position(eu.Vector2(ix*crossing_point_separation,
iy*crossing_point_separation))
# update next_crossing
dx = self.final_crossing[0] - self.next_crossing[0]
ok = False
# try to reduce error in x
if dx!=0:
dy = 0
if dx < 0: dx = -1
else: dx = 1
ix += dx
# it is not acceptable going invisible except if final crossing
ok = ((0<ix<(streets_per_side-1) and (0<iy<streets_per_side-1)) or
((ix, iy)==self.final_crossing))
if not ok:
ix -= dx
if not ok:
# reduce error in y
dx = 0
dy = self.final_crossing[1] - self.next_crossing[1]
if dy!=0:
if dy < 0: dy = -1
else: dy = 1
iy += dy
self.next_crossing = ix, iy
# now refresh params used to update position between crossings
self.elapsed = 0.0
self.arrival = time_to_next_crossing
self.move_in_x = (dx!=0)
fastness = crossing_point_separation / time_to_next_crossing
if self.move_in_x:
self.scalar_vel = dx * fastness
else:
self.scalar_vel = dy * fastness
def is_travel_completed(self):
return ((self.elapsed > self.arrival) and
(self.next_crossing == self.final_crossing))
def update(self, dt):
"""
dont call this when self.state != 'traveling'
"""
self.elapsed += dt
if self.elapsed > self.arrival:
# crossing reached
if self.next_crossing == self.final_crossing:
# travel finished
self.e_free()
else:
self.update_when_crossing_reached()
else:
x, y = self.cshape.center
# between crossings
if self.move_in_x:
x += self.scalar_vel*dt
else:
y += self.scalar_vel*dt
self.update_position(eu.Vector2(x,y))
class City(cocos.layer.Layer):
def __init__(self):
super(City, self).__init__()
bg = cocos.layer.ColorLayer(*street_color,width=view_width,
height=view_width)
self.add(bg)
self.add_squares()
self.position = -offset, -offset
bg.position = offset, offset
self.cars = set()
while len(self.cars) < pool_car_size:
car = RobotCar()
self.cars.add(car)
self.add(car)
self.collman = cm.CollisionManagerGrid(-square_width, view_width + square_width,
-square_width, view_height + square_width,
40.0, 40.0)
self.schedule(self.update)
def add_squares(self):
for iy in xrange(squares_per_side):
y = half_street_width + iy*crossing_point_separation
for ix in xrange(squares_per_side):
square = cocos.layer.ColorLayer(*square_color,width=square_width,
height=square_width)
x = half_street_width + ix*crossing_point_separation
square.position = (x,y)
self.add(square, z=2)
def generate_travel(self):
#ix,iy : ints, street crossing; 0,0 is bottom left (out of view)
#ix, iy maps to x,y = ix*crossing_point_separation + iy*crossing_point_separation
# iz refers to the starting crossing, jz to the final crossing
# generate starting crossing
if random.random()>0.5:
# start from left - right side
ix = 0
if random.random()>0.5:
ix = streets_per_side - 1
iy = random.randint(1, streets_per_side - 2)
else:
# start from bottom - top side
iy = 0
if random.random()>0.5:
iy = streets_per_side - 1
ix = random.randint(1, streets_per_side-2);
# generate final crossing by simetry of initial
jx = streets_per_side - 1 - ix; jy = streets_per_side - 1 - iy
initial_crossing = (ix, iy)
final_crossing = (jx, jy)
return initial_crossing, final_crossing
def update(self, dt):
for car in self.cars:
if car.state == 'free':
initial_crossing, final_crossing = self.generate_travel()
car.do_travel(initial_crossing, final_crossing)
if car.state == 'traveling':
car.update(dt)
# handle collisions
self.collman.clear()
for car in self.cars:
self.collman.add(car)
for car, other in self.collman.iter_all_collisions():
if car.state != 'burning':
car.e_burn()
if other.state != 'burning':
other.e_burn()
def main():
director.init(width=view_width, height=view_height)
scene = cocos.scene.Scene()
city = City()
scene.add(city)
director.run(scene)
if __name__ == '__main__':
main()
| {
"repo_name": "eevee/cocos2d-mirror",
"path": "test/test_all_collisions.py",
"copies": "1",
"size": "9036",
"license": "bsd-3-clause",
"hash": -5006280332121477000,
"line_mean": 34.8571428571,
"line_max": 89,
"alpha_frac": 0.5722664896,
"autogenerated": false,
"ratio": 3.496904024767802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9539294921116709,
"avg_score": 0.005975118650218609,
"num_lines": 252
} |
# a test scene used to help develop collision code for cocos.
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q"
tags = "collision"
import cocos
from cocos.director import director
import cocos.actions as ac
import cocos.collision_model as cm
import cocos.euclid as eu
import random
# the driver parameters for the city view
# all city dimensions as integer multiples of half_street_width
half_street_width = 15
streets_per_side = 5 # first and last will be out of view
street_to_square_width_multiplier = 4
street_color = (170,170,0,255)
square_color = (120,32,120,255)
pool_car_size = 4
time_to_next_crossing = 1.0
# interesting city view quantities, derived from driver parameters and
# desired view
squares_per_side = streets_per_side - 1
street_width = 2*half_street_width
square_width = street_to_square_width_multiplier * street_width
# crossing points are at the center of street crossings, so
crossing_point_separation = square_width + 2*(half_street_width)
# we want the center of bottom left square at bottom left window, and the
# city view simetrical, so left to right in the x direction we see
# half square, [a street, a square] (repeat squares_per_side - 2 times), a
# street and half a square
view_width = ( 2*0.5*square_width + square_width*(squares_per_side -2) +
street_width*(streets_per_side-2) )
view_width = square_width * (squares_per_side -1) + street_width*(streets_per_side-2)
view_height = view_width
# the distance in one dimension from a crossing to the center of next left
# square
offset = 0.5 * street_width + 0.5 * square_width
offset = half_street_width + street_to_square_width_multiplier * half_street_width
offset = half_street_width * (street_to_square_width_multiplier + 1)
class Actor(cocos.sprite.Sprite):
def __init__(self, *args, **kwargs):
"""same params as Sprite plus kwargs
'rx', 'ry' for collision cshape
'desired_width' """
rx = kwargs.pop('rx', None)
ry = kwargs.pop('ry', None)
desired_width = kwargs.pop('desired_width', None)
super(Actor, self).__init__(*args, **kwargs)
if desired_width is None:
desired_width = self.image.width
desired_width = float(desired_width)
self.scale = float(desired_width) / self.width
if rx is None:
rx = 0.8 * desired_width / 2.0
if ry is None:
ry = 0.8 * self.image.height / self.image.width * desired_width /2.0
#self.cshape = cm.AARectShape(eu.Vector2(0.0, 0.0), rx, ry)
self.cshape = cm.CircleShape(eu.Vector2(0.0, 0.0), rx)#, ry)
def update_position(self, new_position):
assert isinstance(new_position, eu.Vector2)
self.position = new_position
self.cshape.center = new_position
class RobotCar(Actor):
"""
"""
def __init__(self):
super(RobotCar, self).__init__("circle6.png", desired_width=32)
self.e_free()
def e_free(self):
self.state = 'free'
self.color = ( 20, 120, 70)
def e_burn(self):
self.state = 'burning'
self.color = (180, 0, 0)
template_action = ac.Delay(2.0) + ac.CallFunc(self.e_free)
self.do(template_action)
def e_travel(self):
self.state = 'traveling'
def do_travel(self, initial_crossing, final_crossing):
self.e_travel()
self.color = ( 20, 120, 70)
self.next_crossing = initial_crossing
self.final_crossing = final_crossing
self.update_when_crossing_reached()
def update_when_crossing_reached(self):
# set position exactly at old crossing
ix, iy = self.next_crossing
self.update_position(eu.Vector2(ix*crossing_point_separation,
iy*crossing_point_separation))
# update next_crossing
dx = self.final_crossing[0] - self.next_crossing[0]
ok = False
# try to reduce error in x
if dx!=0:
dy = 0
if dx < 0: dx = -1
else: dx = 1
ix += dx
# it is not acceptable going invisible except if final crossing
ok = ((0<ix<(streets_per_side-1) and (0<iy<streets_per_side-1)) or
((ix, iy)==self.final_crossing))
if not ok:
ix -= dx
if not ok:
# reduce error in y
dx = 0
dy = self.final_crossing[1] - self.next_crossing[1]
if dy!=0:
if dy < 0: dy = -1
else: dy = 1
iy += dy
self.next_crossing = ix, iy
# now refresh params used to update position between crossings
self.elapsed = 0.0
self.arrival = time_to_next_crossing
self.move_in_x = (dx!=0)
fastness = crossing_point_separation / time_to_next_crossing
if self.move_in_x:
self.scalar_vel = dx * fastness
else:
self.scalar_vel = dy * fastness
def is_travel_completed(self):
return ((self.elapsed > self.arrival) and
(self.next_crossing == self.final_crossing))
def update(self, dt):
"""
dont call this when self.state != 'traveling'
"""
self.elapsed += dt
if self.elapsed > self.arrival:
# crossing reached
if self.next_crossing == self.final_crossing:
# travel finished
self.e_free()
else:
self.update_when_crossing_reached()
else:
x, y = self.cshape.center
# between crossings
if self.move_in_x:
x += self.scalar_vel*dt
else:
y += self.scalar_vel*dt
self.update_position(eu.Vector2(x,y))
class City(cocos.layer.Layer):
def __init__(self):
super(City, self).__init__()
bg = cocos.layer.ColorLayer(*street_color,width=view_width,
height=view_width)
self.add(bg)
self.add_squares()
self.position = -offset, -offset
bg.position = offset, offset
self.cars = set()
while len(self.cars) < pool_car_size:
car = RobotCar()
self.cars.add(car)
self.add(car)
self.collman = cm.CollisionManagerGrid(-square_width, view_width + square_width,
-square_width, view_height + square_width,
40.0, 40.0)
self.schedule(self.update)
def add_squares(self):
for iy in xrange(squares_per_side):
y = half_street_width + iy*crossing_point_separation
for ix in xrange(squares_per_side):
square = cocos.layer.ColorLayer(*square_color,width=square_width,
height=square_width)
x = half_street_width + ix*crossing_point_separation
square.position = (x,y)
self.add(square, z=2)
def generate_travel(self):
#ix,iy : ints, street crossing; 0,0 is bottom left (out of view)
#ix, iy maps to x,y = ix*crossing_point_separation + iy*crossing_point_separation
# iz refers to the starting crossing, jz to the final crossing
# generate starting crossing
if random.random()>0.5:
# start from left - right side
ix = 0
if random.random()>0.5:
ix = streets_per_side - 1
iy = random.randint(1, streets_per_side - 2)
else:
# start from bottom - top side
iy = 0
if random.random()>0.5:
iy = streets_per_side - 1
ix = random.randint(1, streets_per_side-2);
# generate final crossing by simetry of initial
jx = streets_per_side - 1 - ix; jy = streets_per_side - 1 - iy
initial_crossing = (ix, iy)
final_crossing = (jx, jy)
return initial_crossing, final_crossing
def update(self, dt):
for car in self.cars:
if car.state == 'free':
initial_crossing, final_crossing = self.generate_travel()
car.do_travel(initial_crossing, final_crossing)
if car.state == 'traveling':
car.update(dt)
# handle collisions
self.collman.clear()
for car in self.cars:
self.collman.add(car)
for car, other in self.collman.iter_all_collisions():
if car.state != 'burning':
car.e_burn()
if other.state != 'burning':
other.e_burn()
def main():
director.init(width=view_width, height=view_height)
scene = cocos.scene.Scene()
city = City()
scene.add(city)
director.run(scene)
if __name__ == '__main__':
main()
| {
"repo_name": "shadowmint/nwidget",
"path": "lib/cocos2d-0.5.5/test/test_all_collisions.py",
"copies": "1",
"size": "9288",
"license": "apache-2.0",
"hash": -5754364714064246000,
"line_mean": 34.8571428571,
"line_max": 89,
"alpha_frac": 0.5567398794,
"autogenerated": false,
"ratio": 3.545038167938931,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4601778047338931,
"avg_score": null,
"num_lines": null
} |
# a test scene used to help develop collision code for summa.
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q"
tags = "collision"
import summa
from summa.director import director
import summa.actions as ac
import summa.collision_model as cm
import summa.euclid as eu
import random
# the driver parameters for the city view
# all city dimensions as integer multiples of half_street_width
half_street_width = 15
streets_per_side = 5 # first and last will be out of view
street_to_square_width_multiplier = 4
street_color = (170,170,0,255)
square_color = (120,32,120,255)
pool_car_size = 4
time_to_next_crossing = 1.0
# interesting city view quantities, derived from driver parameters and
# desired view
squares_per_side = streets_per_side - 1
street_width = 2*half_street_width
square_width = street_to_square_width_multiplier * street_width
# crossing points are at the center of street crossings, so
crossing_point_separation = square_width + 2*(half_street_width)
# we want the center of bottom left square at bottom left window, and the
# city view simetrical, so left to right in the x direction we see
# half square, [a street, a square] (repeat squares_per_side - 2 times), a
# street and half a square
view_width = ( 2*0.5*square_width + square_width*(squares_per_side -2) +
street_width*(streets_per_side-2) )
view_width = square_width * (squares_per_side -1) + street_width*(streets_per_side-2)
view_height = view_width
# the distance in one dimension from a crossing to the center of next left
# square
offset = 0.5 * street_width + 0.5 * square_width
offset = half_street_width + street_to_square_width_multiplier * half_street_width
offset = half_street_width * (street_to_square_width_multiplier + 1)
class Actor(summa.sprite.Sprite):
def __init__(self, *args, **kwargs):
"""same params as Sprite plus kwargs
'rx', 'ry' for collision cshape
'desired_width' """
rx = kwargs.pop('rx', None)
ry = kwargs.pop('ry', None)
desired_width = kwargs.pop('desired_width', None)
super(Actor, self).__init__(*args, **kwargs)
if desired_width is None:
desired_width = self.image.width
desired_width = float(desired_width)
self.scale = float(desired_width) / self.width
if rx is None:
rx = 0.8 * desired_width / 2.0
if ry is None:
ry = 0.8 * self.image.height / self.image.width * desired_width /2.0
#self.cshape = cm.AARectShape(eu.Vector2(0.0, 0.0), rx, ry)
self.cshape = cm.CircleShape(eu.Vector2(0.0, 0.0), rx)#, ry)
def update_position(self, new_position):
assert isinstance(new_position, eu.Vector2)
self.position = new_position
self.cshape.center = new_position
class RobotCar(Actor):
"""
"""
def __init__(self):
super(RobotCar, self).__init__("circle6.png", desired_width=32)
self.e_free()
def e_free(self):
self.state = 'free'
self.color = ( 20, 120, 70)
def e_burn(self):
self.state = 'burning'
self.color = (180, 0, 0)
template_action = ac.Delay(2.0) + ac.CallFunc(self.e_free)
self.do(template_action)
def e_travel(self):
self.state = 'traveling'
def do_travel(self, initial_crossing, final_crossing):
self.e_travel()
self.color = ( 20, 120, 70)
self.next_crossing = initial_crossing
self.final_crossing = final_crossing
self.update_when_crossing_reached()
def update_when_crossing_reached(self):
# set position exactly at old crossing
ix, iy = self.next_crossing
self.update_position(eu.Vector2(ix*crossing_point_separation,
iy*crossing_point_separation))
# update next_crossing
dx = self.final_crossing[0] - self.next_crossing[0]
ok = False
# try to reduce error in x
if dx!=0:
dy = 0
if dx < 0: dx = -1
else: dx = 1
ix += dx
# it is not acceptable going invisible except if final crossing
ok = ((0<ix<(streets_per_side-1) and (0<iy<streets_per_side-1)) or
((ix, iy)==self.final_crossing))
if not ok:
ix -= dx
if not ok:
# reduce error in y
dx = 0
dy = self.final_crossing[1] - self.next_crossing[1]
if dy!=0:
if dy < 0: dy = -1
else: dy = 1
iy += dy
self.next_crossing = ix, iy
# now refresh params used to update position between crossings
self.elapsed = 0.0
self.arrival = time_to_next_crossing
self.move_in_x = (dx!=0)
fastness = crossing_point_separation / time_to_next_crossing
if self.move_in_x:
self.scalar_vel = dx * fastness
else:
self.scalar_vel = dy * fastness
def is_travel_completed(self):
return ((self.elapsed > self.arrival) and
(self.next_crossing == self.final_crossing))
def update(self, dt):
"""
dont call this when self.state != 'traveling'
"""
self.elapsed += dt
if self.elapsed > self.arrival:
# crossing reached
if self.next_crossing == self.final_crossing:
# travel finished
self.e_free()
else:
self.update_when_crossing_reached()
else:
x, y = self.cshape.center
# between crossings
if self.move_in_x:
x += self.scalar_vel*dt
else:
y += self.scalar_vel*dt
self.update_position(eu.Vector2(x,y))
class City(summa.layer.Layer):
def __init__(self):
super(City, self).__init__()
bg = summa.layer.ColorLayer(*street_color,width=view_width,
height=view_width)
self.add(bg)
self.add_squares()
self.position = -offset, -offset
bg.position = offset, offset
self.cars = set()
while len(self.cars) < pool_car_size:
car = RobotCar()
self.cars.add(car)
self.add(car)
self.collman = cm.CollisionManagerGrid(-square_width, view_width + square_width,
-square_width, view_height + square_width,
40.0, 40.0)
self.schedule(self.update)
def add_squares(self):
for iy in xrange(squares_per_side):
y = half_street_width + iy*crossing_point_separation
for ix in xrange(squares_per_side):
square = summa.layer.ColorLayer(*square_color,width=square_width,
height=square_width)
x = half_street_width + ix*crossing_point_separation
square.position = (x,y)
self.add(square, z=2)
def generate_travel(self):
#ix,iy : ints, street crossing; 0,0 is bottom left (out of view)
#ix, iy maps to x,y = ix*crossing_point_separation + iy*crossing_point_separation
# iz refers to the starting crossing, jz to the final crossing
# generate starting crossing
if random.random()>0.5:
# start from left - right side
ix = 0
if random.random()>0.5:
ix = streets_per_side - 1
iy = random.randint(1, streets_per_side - 2)
else:
# start from bottom - top side
iy = 0
if random.random()>0.5:
iy = streets_per_side - 1
ix = random.randint(1, streets_per_side-2);
# generate final crossing by simetry of initial
jx = streets_per_side - 1 - ix; jy = streets_per_side - 1 - iy
initial_crossing = (ix, iy)
final_crossing = (jx, jy)
return initial_crossing, final_crossing
def update(self, dt):
for car in self.cars:
if car.state == 'free':
initial_crossing, final_crossing = self.generate_travel()
car.do_travel(initial_crossing, final_crossing)
if car.state == 'traveling':
car.update(dt)
# handle collisions
self.collman.clear()
for car in self.cars:
self.collman.add(car)
for car, other in self.collman.iter_all_collisions():
if car.state != 'burning':
car.e_burn()
if other.state != 'burning':
other.e_burn()
def main():
director.init(width=view_width, height=view_height)
scene = summa.scene.Scene()
city = City()
scene.add(city)
director.run(scene)
if __name__ == '__main__':
main()
| {
"repo_name": "shackra/thomas-aquinas",
"path": "tests/test_all_collisions.py",
"copies": "1",
"size": "9036",
"license": "bsd-3-clause",
"hash": 6257474981547206000,
"line_mean": 34.8571428571,
"line_max": 89,
"alpha_frac": 0.5722664896,
"autogenerated": false,
"ratio": 3.496904024767802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4569170514367802,
"avg_score": null,
"num_lines": null
} |
"""A test script that downloads blobs from a reflector server"""
import argparse
import itertools
import json
import random
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('reflector_ip')
parser.add_argument('--ssh-key')
parser.add_argument('--size', type=int, default=100)
parser.add_argument('--batch', type=int, default=10)
parser.add_argument('--timeout', type=int, default=30)
parser.add_argument('--hashes', help='file listing hashes in json')
args = parser.parse_args()
if args.hashes:
hashes = readHashes(args.hashes)
else:
hashes = getHashes(args.reflector_ip, args.ssh_key)
if len(hashes) > args.size:
selected_hashes = random.sample(hashes, args.size)
else:
print 'Only {} hashes are available'.format(hashes)
selected_hashes = hashes
successes = 0
for hashes in grouper(selected_hashes, args.batch):
hashes = filter(None, hashes)
successes += downloadHashes(args.reflector_ip, hashes, args.timeout)
print 'Downloaded {} / {}'.format(successes, len(selected_hashes))
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def readHashes(hash_file):
with open(hash_file) as f:
return json.load(f)
def getHashes(ip, key=None):
key = ['-i', key] if key else []
hashes = subprocess.check_output(['ssh'] + key +
['lbry@{}'.format(ip), '/opt/venvs/lbrynet/bin/lbrynet-cli', 'get_blob_hashes'])
return json.loads(hashes)
def downloadHashes(ip, blob_hashes, timeout=30):
processes = [
subprocess.Popen(
[
'python',
'download_blob_from_peer.py',
'--timeout', str(timeout), '{}:3333'.format(ip), blob_hash,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
for blob_hash in blob_hashes
]
for p, h in zip(processes, blob_hashes):
stdout, stderr = p.communicate()
print p.returncode, h
if p.returncode != 0:
print 'Failed to download', h
print stdout
print stderr
return sum(1 for p in processes if p.returncode == 0)
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "zestyr/lbry",
"path": "scripts/download_blobs_from_reflector.py",
"copies": "1",
"size": "2458",
"license": "mit",
"hash": 178935417038363230,
"line_mean": 29.725,
"line_max": 88,
"alpha_frac": 0.6159479251,
"autogenerated": false,
"ratio": 3.7469512195121952,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4862899144612195,
"avg_score": null,
"num_lines": null
} |
# A test suite for pdb; at the moment, this only validates skipping of
# specified test modules (RFE #5142).
import imp
import sys
from test import test_support
# This little helper class is essential for testing pdb under doctest.
from test_doctest import _FakeInput
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
def __exit__(self, *exc):
sys.stdin = self.real_stdin
def write(x):
print x
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb().set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): write(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): write(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb().set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'print 42',
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
(Pdb) clear 1
Deleted breakpoint 1
(Pdb) commands 2
(com) print 42
(com) end
(Pdb) continue
1
42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['string*']).set_trace()
... string.lower('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.lower('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.lower('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = imp.new_module('module_to_skip')
exec 'def foo_pony(callback): x = 1; callback(); return None' in mod.__dict__
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*']).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb()
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
>>> with PdbTestInput([
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint 1 at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def test_main():
from test import test_pdb
test_support.run_doctest(test_pdb, verbosity=True)
if __name__ == '__main__':
test_main()
| {
"repo_name": "hwu25/AppPkg",
"path": "Applications/Python/Python-2.7.2/Lib/test/test_pdb.py",
"copies": "7",
"size": "8721",
"license": "bsd-2-clause",
"hash": 2975681377854749700,
"line_mean": 28.3867595819,
"line_max": 92,
"alpha_frac": 0.557160876,
"autogenerated": false,
"ratio": 3.529340348037232,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009320651353335653,
"num_lines": 287
} |
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import unittest
import subprocess
import textwrap
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[2]>(18)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
Note a fix for PyPy: on CPython, the two iterations through the loop
don't stop at the same line each time. Actually, if the loop
iterates more than twice, we have a behavior of period two(!). This
is due to very internal behavior that could be classified as a bug
and that PyPy doesn't emulating exactly.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def run_pdb(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb', filename]
stdout = stderr = None
with subprocess.Popen(cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def _assert_find_function(self, file_content, func_name, expected):
file_content = textwrap.dedent(file_content)
with open(support.TESTFN, 'w') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function('', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bar():
pass
def quux():
pass
""",
'bar',
('bar', 4),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13210(self):
# invoking "continue" on a non-main thread triggered an exception
# inside signal.signal
# raises SkipTest if python was built without threads
support.import_module('threading')
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def tearDown(self):
support.unlink(support.TESTFN)
def load_tests(*args):
from test import test_pdb
suites = [unittest.makeSuite(PdbTestCase), doctest.DocTestSuite(test_pdb)]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "yotchang4s/cafebabepy",
"path": "src/main/python/test/test_pdb.py",
"copies": "2",
"size": "36525",
"license": "bsd-3-clause",
"hash": -6751383355134666000,
"line_mean": 31.3230088496,
"line_max": 98,
"alpha_frac": 0.5332511978,
"autogenerated": false,
"ratio": 3.5300086981733836,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5063259895973383,
"avg_score": null,
"num_lines": null
} |
"""A test suite for solverwrappers that implement solve methods for
canonical linear constraints. Wrapppers considered include:
'cvxpy', 'qpOASES', "ecos", 'hotqpOASES', 'seidel'.
"""
import pytest
import numpy as np
import numpy.testing as npt
import toppra
import toppra.constraint as constraint
import cvxpy
from ..testing_flags import FOUND_CXPY, FOUND_MOSEK, FOUND_OPENRAVEPY
toppra.setup_logging(level="INFO")
class RandomSecondOrderLinearConstraint(constraint.linear_constraint.LinearConstraint):
"""A random Second-Order non-identical constraint.
This contraint is defined solely for testing purposes. It accepts
a degree of freedom, then generates the coefficient randomly.
"""
def __init__(self, dof, discretization_scheme=constraint.DiscretizationType.Collocation):
super(RandomSecondOrderLinearConstraint, self).__init__()
self.dof = dof
self.set_discretization_type(discretization_scheme)
self.identical = False
self._format_string = " Random Second-Order constraint (dof={:d}) \n".format(
self.dof)
def compute_constraint_params(self, path, gridpoints):
N = gridpoints.shape[0] - 1
a = np.random.randn(N + 1, self.dof)
b = np.random.randn(N + 1, self.dof)
c = np.random.randn(N + 1, self.dof)
F = np.random.randn(N + 1, self.dof, self.dof)
g = np.random.rand(N + 1, self.dof)
for i in range(N + 1):
g[i] += F[i].dot(c[i])
if self.discretization_type == constraint.DiscretizationType.Collocation:
return a, b, c, F, g, None, None
elif self.discretization_type == constraint.DiscretizationType.Interpolation:
return constraint.canlinear_colloc_to_interpolate(
a, b, c, F, g, None, None, gridpoints, identical=False)
else:
raise NotImplementedError("Other form of discretization not supported!")
@pytest.fixture(scope='class', params=['vel_accel'])
def basic_init_fixture(request):
""" A fixture for testing basic capability of the solver wrapper.
This test case has only two constraints, one velocity constraint
and one acceleration constraint.
"""
dof = 6
np.random.seed(1) # Use the same randomly generated way pts
way_pts = np.random.randn(4, dof) * 0.6
N = 200
path = toppra.SplineInterpolator(np.linspace(0, 1, 4), way_pts)
ss = np.linspace(0, 1, N + 1)
# Velocity Constraint
vlim_ = np.random.rand(dof) * 10 + 10
vlim = np.vstack((-vlim_, vlim_)).T
pc_vel = constraint.JointVelocityConstraint(vlim)
# Acceleration Constraints
alim_ = np.random.rand(dof) * 10 + 100
alim = np.vstack((-alim_, alim_)).T
pc_acc = constraint.JointAccelerationConstraint(alim)
# random Second Order Constraint, only use for testing
pc_rand = RandomSecondOrderLinearConstraint(dof)
pcs = [pc_vel, pc_acc, pc_rand]
yield pcs, path, ss, vlim, alim
print("\n [TearDown] Finish PP Fixture")
@pytest.mark.parametrize("solver_name", ['cvxpy', 'qpOASES', "ecos", 'hotqpOASES', 'seidel'])
@pytest.mark.parametrize("i", [3, 10, 30])
@pytest.mark.parametrize("H", [np.array([[1.5, 0], [0, 1.0]]), np.zeros((2, 2)), None])
@pytest.mark.parametrize("g", [np.array([0.2, -1]), np.array([0.5, 1]), np.array([2.0, 1])])
@pytest.mark.parametrize("x_ineq", [(0.1, 1), (0.2, 0.2), (0.4, 0.3), (np.nan, np.nan)])
@pytest.mark.skipif(not FOUND_CXPY, reason="This test requires cvxpy to validate results.")
def test_basic_correctness(basic_init_fixture, solver_name, i, H, g, x_ineq):
"""Basic test case for solver wrappers.
The input fixture `basic_init_fixture` has two constraints, one
velocity and one acceleration. Hence, in this test, I directly
formulate an optimization with cvxpy and compare the result with
the result obtained from the solver wrapper.
"""
constraints, path, path_discretization, vlim, alim = basic_init_fixture
if solver_name == "cvxpy":
from toppra.solverwrapper.cvxpy_solverwrapper import cvxpyWrapper
solver = cvxpyWrapper(constraints, path, path_discretization)
elif solver_name == 'qpOASES':
from toppra.solverwrapper.qpoases_solverwrapper import qpOASESSolverWrapper
solver = qpOASESSolverWrapper(constraints, path, path_discretization)
elif solver_name == 'hotqpOASES':
from toppra.solverwrapper.hot_qpoases_solverwrapper import hotqpOASESSolverWrapper
solver = hotqpOASESSolverWrapper(constraints, path, path_discretization)
elif solver_name == 'ecos' and H is None:
from toppra.solverwrapper.ecos_solverwrapper import ecosWrapper
solver = ecosWrapper(constraints, path, path_discretization)
elif solver_name == 'seidel' and H is None:
from toppra.solverwrapper.cy_seidel_solverwrapper import seidelWrapper
solver = seidelWrapper(constraints, path, path_discretization)
else:
return True # Skip all other tests
xmin, xmax = x_ineq
xnext_min = 0
xnext_max = 1
# Results from solverwrapper to test
solver.setup_solver()
result_ = solver.solve_stagewise_optim(i - 2, H, g, xmin, xmax, xnext_min, xnext_max)
result_ = solver.solve_stagewise_optim(i - 1, H, g, xmin, xmax, xnext_min, xnext_max)
solverwrapper_result = solver.solve_stagewise_optim(i, H, g, xmin, xmax, xnext_min, xnext_max)
solver.close_solver()
# Results from cvxpy, used as the actual, desired values
ux = cvxpy.Variable(2)
u = ux[0]
x = ux[1]
_, _, _, _, _, _, xbound = solver.params[0] # vel constraint
a, b, c, F, h, ubound, _ = solver.params[1] # accel constraint
a2, b2, c2, F2, h2, _, _ = solver.params[2] # random constraint
Di = path_discretization[i + 1] - path_discretization[i]
v = a[i] * u + b[i] * x + c[i]
v2 = a2[i] * u + b2[i] * x + c2[i]
cvxpy_constraints = [
x <= xbound[i, 1],
x >= xbound[i, 0],
F * v <= h,
F2[i] * v2 <= h2[i],
x + u * 2 * Di <= xnext_max,
x + u * 2 * Di >= xnext_min,
]
if not np.isnan(xmin):
cvxpy_constraints.append(x <= xmax)
cvxpy_constraints.append(x >= xmin)
if H is not None:
objective = cvxpy.Minimize(0.5 * cvxpy.quad_form(ux, H) + g * ux)
else:
objective = cvxpy.Minimize(g * ux)
problem = cvxpy.Problem(objective, cvxpy_constraints)
problem.solve(verbose=True) # test with the same solver as cvxpywrapper
if problem.status == "optimal":
cvxpy_result = np.array(ux.value).flatten()
solverwrapper_result = np.array(solverwrapper_result).flatten()
npt.assert_allclose(solverwrapper_result, cvxpy_result, atol=5e-2, rtol=1e-5) # Very bad accuracy? why?
else:
assert np.all(np.isnan(solverwrapper_result))
@pytest.mark.parametrize("solver_name", ['cvxpy', 'qpOASES', 'ecos', 'hotqpOASES', 'seidel'])
def test_infeasible_instance(basic_init_fixture, solver_name):
"""If the given parameters are infeasible, the solverwrapper should
terminate gracefully and return a numpy vector [nan, nan].
"""
constraints, path, path_discretization, vlim, alim = basic_init_fixture
if solver_name == "cvxpy":
from toppra.solverwrapper.cvxpy_solverwrapper import cvxpyWrapper
solver = cvxpyWrapper(constraints, path, path_discretization)
elif solver_name == 'qpOASES':
from toppra.solverwrapper.qpoases_solverwrapper import qpOASESSolverWrapper
solver = qpOASESSolverWrapper(constraints, path, path_discretization)
elif solver_name == 'hotqpOASES':
from toppra.solverwrapper.hot_qpoases_solverwrapper import hotqpOASESSolverWrapper
solver = hotqpOASESSolverWrapper(constraints, path, path_discretization)
elif solver_name == 'ecos':
from toppra.solverwrapper.ecos_solverwrapper import ecosWrapper
solver = ecosWrapper(constraints, path, path_discretization)
elif solver_name == 'seidel':
from toppra.solverwrapper.cy_seidel_solverwrapper import seidelWrapper
solver = seidelWrapper(constraints, path, path_discretization)
g = np.r_[0, 1].astype(float)
solver.setup_solver()
result = solver.solve_stagewise_optim(0, None, g, 1.1, 1.0, np.nan, np.nan)
assert np.all(np.isnan(result))
result = solver.solve_stagewise_optim(0, None, g, 1.1, 1.0, 0, -0.5)
assert np.all(np.isnan(result))
result = solver.solve_stagewise_optim(0, None, g, np.nan, np.nan, 0, -0.5)
assert np.all(np.isnan(result))
solver.close_solver()
| {
"repo_name": "hungpham2511/toppra",
"path": "tests/tests/solverwrapper/test_basic_can_linear.py",
"copies": "1",
"size": "8575",
"license": "mit",
"hash": -3632063118896570400,
"line_mean": 43.2010309278,
"line_max": 112,
"alpha_frac": 0.6681049563,
"autogenerated": false,
"ratio": 3.199626865671642,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4367731821971642,
"avg_score": null,
"num_lines": null
} |
"""A test suite that doesn't query the Google API.
Avoiding direct network access is benefitial in that it markedly speeds up
testing, avoids error-prone credential setup, and enables validation even if
internet access is unavailable.
"""
from datetime import datetime
import unittest
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import mock
import gspread
from tests import test
from tests import test_utils
class MockGspreadTest(unittest.TestCase):
"""This is the base class for all tests not accessing the API.
IMPORTANT: This class must be inherited _BEFORE_ a test suite inheriting
from GspreadTest. This allows MockGspreadTest.setUpClass to clobber the
one inherited from GspreadTest which authorizes with the Google API.
"""
@classmethod
def setUpClass(cls):
try:
cls.config = ConfigParser.RawConfigParser()
cls.gc = gspread.client.Client(auth={})
except IOError as e:
msg = "Can't find %s for reading test configuration. "
raise Exception(msg % e.filename)
class MockClientTest(MockGspreadTest, test.ClientTest):
"""Test for gspread.Client that mocks out the server response.
The tests themselves are inherited from ClientTest so no redefinition is
necessary.
"""
@classmethod
def setUpClass(cls):
super(MockClientTest, cls).setUpClass()
key = '0123456789ABCDEF'
title = 'This is a spreadsheet title'
url = 'https://docs.google.com/spreadsheet/ccc?key=' + key
updated = datetime.now()
dev_email = 'foobar@developer.gserviceaccount.com'
user_name = 'First Last'
user_email = 'real_email@gmail.com'
# Initialize mock ConfigParser
cls.config.add_section('Spreadsheet')
cls.config.set('Spreadsheet', 'key', key)
cls.config.set('Spreadsheet', 'title', title)
cls.config.set('Spreadsheet', 'url', url)
# Set up spreadsheet mock
feed_obj = test_utils.SpreadsheetFeed(updated, dev_email)
feed_obj.add_entry(key, title, user_name, user_email, updated)
feed = feed_obj.to_xml()
cls.gc.get_spreadsheets_feed = mock.Mock(return_value=feed)
post_mock = mock.MagicMock()
post_mock.return_value.json.return_value = {'id': key}
cls.gc.session.post = post_mock
class MockSpreadsheetTest(MockGspreadTest, test.SpreadsheetTest):
"""Test for gspread.Spreadsheet that mocks out the server response.
The tests themselves are inherited from SpreadsheetTest so no redefinition
is necessary.
"""
@classmethod
def setUpClass(cls):
super(MockSpreadsheetTest, cls).setUpClass()
updated = datetime.now()
user_name = 'First Last'
user_email = 'real_email@gmail.com'
key = '0123456789ABCDEF'
title = 'This is a spreadsheet title'
ws_feed = test_utils.WorksheetFeed(updated, user_name, user_email,
key, title)
dev_email = 'foobar@developer.gserviceaccount.com'
ss_feed = test_utils.SpreadsheetFeed(updated, dev_email)
ss_feed.add_entry(key, title, user_name, user_email, updated)
ws_key = 'AB64KEY'
ws_title = 'WS Title'
ws_id = 123456789
ws_version = 'avkey'
num_cols = 10
num_rows = 10
ws_updated = updated
ws_feed.add_entry(ws_key, ws_title, ws_id, ws_version, num_cols,
num_rows, ws_updated)
# Initialize mock ConfigParser
cls.config.add_section('Spreadsheet')
cls.config.set('Spreadsheet', 'id', key)
cls.config.set('Spreadsheet', 'title', title)
cls.config.set('Spreadsheet', 'sheet1_title', ws_title)
# Set up mocks
cls.gc.get_spreadsheets_feed = mock.Mock(return_value=ss_feed.to_xml())
cls.gc.get_worksheets_feed = mock.Mock(return_value=ws_feed.to_xml())
| {
"repo_name": "ShivaShinde/gspread",
"path": "tests/mock_tests.py",
"copies": "1",
"size": "3995",
"license": "mit",
"hash": 3669611813422431700,
"line_mean": 33.1452991453,
"line_max": 79,
"alpha_frac": 0.6515644556,
"autogenerated": false,
"ratio": 4.0030060120240485,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 117
} |
"""A test that all code scores above a 9.25 in pylint"""
import subprocess
import re
import os.path
SCORE_REGEXP = re.compile(
r'^Your\ code\ has\ been\ rated\ at\ (\-?[0-9\.]+)/10')
TOOLS_ROOT = os.path.dirname(os.path.dirname(__file__))
def parse_score(pylint_output):
"""Parse the score out of pylint's output as a float If the score is not
found, return 0.0.
"""
for line in pylint_output.splitlines():
match = re.match(SCORE_REGEXP, line)
if match:
return float(match.group(1))
return 0.0
def execute_pylint(filename):
"""Execute a pylint process and collect it's output
"""
process = subprocess.Popen(
["pylint", filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stout, sterr = process.communicate()
status = process.poll()
return status, stout, sterr
FILES = ["build_api.py", "config.py", "colorize.py", "detect_targets.py",
"hooks.py", "libraries.py", "memap.py", "options.py", "paths.py",
"targets.py", "test/pylint.py"]
if __name__ == "__main__":
for python_module in FILES:
_, stdout, stderr = execute_pylint(os.path.join(TOOLS_ROOT,
python_module))
score = parse_score(stdout)
if score < 9.25:
print(stdout)
| {
"repo_name": "mmorenobarm/mbed-os",
"path": "tools/test/pylint.py",
"copies": "61",
"size": "1359",
"license": "apache-2.0",
"hash": -1846970968414263800,
"line_mean": 27.3125,
"line_max": 76,
"alpha_frac": 0.5849889625,
"autogenerated": false,
"ratio": 3.5669291338582676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.021809309309309313,
"num_lines": 48
} |
# A test that stresses the serve handles. We spin up a backend with a bunch
# (0-2) of replicas that just forward requests to another backend.
#
# By comparing using the forward replicas with just calling the worker
# replicas, we measure the (latency) overhead in the handle. This answers
# the question: How much of a latency/throughput hit is there when I
# compose models?
#
# By comparing the qps as we fix the number of forwarder replicas and vary the
# number of worker replicas, we measure the limit of a single async actor. This
# answers the question: How many "ForwardActor"s or these kinds of high-level
# pipeline workers do I need to provision for my workload? every 1k qps,
# 2k qps?
#
# Sample output:
# 0 forwarders and 1 worker replicas: 1282 requests/s
# 0 forwarders and 5 worker replicas: 1375 requests/s
# 0 forwarders and 10 worker replicas: 1362 requests/s
# 1 forwarders and 1 worker replicas: 608 requests/s
# 1 forwarders and 5 worker replicas: 626 requests/s
# 1 forwarders and 10 worker replicas: 627 requests/s
# 2 forwarders and 1 worker replicas: 609 requests/s
# 2 forwarders and 5 worker replicas: 620 requests/s
# 2 forwarders and 10 worker replicas: 609 requests/s
import asyncio
import time
import ray
from ray import serve
from ray.serve import BackendConfig
num_queries = 10000
max_concurrent_queries = 100000
ray.init(address="auto")
def worker(_):
return b"Hello World"
class ForwardActor:
def __init__(self, sync: bool):
client = serve.connect()
self.sync = sync
self.handle = client.get_handle("worker", sync=sync)
async def __call__(self, _):
if self.sync:
await self.handle.remote()
else:
await (await self.handle.remote_async())
async def run_test(num_replicas, num_forwarders, sync):
client = serve.start()
client.create_backend(
"worker",
worker,
config=BackendConfig(
num_replicas=num_replicas,
max_concurrent_queries=max_concurrent_queries,
))
client.create_endpoint("worker", backend="worker")
endpoint_name = "worker"
if num_forwarders > 0:
client.create_backend(
"ForwardActor",
ForwardActor,
sync,
config=BackendConfig(
num_replicas=num_forwarders,
max_concurrent_queries=max_concurrent_queries))
client.create_endpoint("ForwardActor", backend="ForwardActor")
endpoint_name = "ForwardActor"
handle = client.get_handle(endpoint_name, sync=sync)
# warmup - helpful to wait for gc.collect() and actors to start
start = time.time()
while time.time() - start < 1:
if sync:
ray.get(handle.remote())
else:
ray.get(await handle.remote_async())
# real test
start = time.time()
if sync:
ray.get([handle.remote() for _ in range(num_queries)])
else:
ray.get([(await handle.remote_async()) for _ in range(num_queries)])
qps = num_queries / (time.time() - start)
print(
f"Sync: {sync}, {num_forwarders} forwarders and {num_replicas} worker "
f"replicas: {int(qps)} requests/s")
client.shutdown()
async def main():
for sync in [True, False]:
for num_forwarders in [0, 1, 2]:
for num_replicas in [1, 5, 10]:
await run_test(num_replicas, num_forwarders, sync)
asyncio.get_event_loop().run_until_complete(main())
| {
"repo_name": "ray-project/ray",
"path": "python/ray/serve/benchmarks/handle.py",
"copies": "1",
"size": "3481",
"license": "apache-2.0",
"hash": -3951125213691100700,
"line_mean": 30.6454545455,
"line_max": 79,
"alpha_frac": 0.6575696639,
"autogenerated": false,
"ratio": 3.734978540772532,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9892548204672532,
"avg_score": 0,
"num_lines": 110
} |
"""A test that subscribes to NumPy arrays.
Uses REQ/REP (on PUB/SUB socket + 1) to synchronize
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2010 Brian Granger
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import time
import zmq
import numpy
def sync(connect_to):
# use connect socket + 1
sync_with = ':'.join(connect_to.split(':')[:-1] +
[str(int(connect_to.split(':')[-1]) + 1)]
)
ctx = zmq.Context.instance()
s = ctx.socket(zmq.REQ)
s.connect(sync_with)
s.send('READY')
s.recv()
def main():
if len (sys.argv) != 3:
print 'usage: subscriber <connect_to> <array-count>'
sys.exit (1)
try:
connect_to = sys.argv[1]
array_count = int (sys.argv[2])
except (ValueError, OverflowError), e:
print 'array-count must be integers'
sys.exit (1)
ctx = zmq.Context()
s = ctx.socket(zmq.SUB)
s.connect(connect_to)
s.setsockopt(zmq.SUBSCRIBE,'')
sync(connect_to)
start = time.clock()
print "Receiving arrays..."
for i in range(array_count):
a = s.recv_pyobj()
print " Done."
end = time.clock()
elapsed = (end - start) * 1000000
if elapsed == 0:
elapsed = 1
throughput = (1000000.0 * float (array_count)) / float (elapsed)
message_size = a.nbytes
megabits = float (throughput * message_size * 8) / 1000000
print "message size: %.0f [B]" % (message_size, )
print "array count: %.0f" % (array_count, )
print "mean throughput: %.0f [msg/s]" % (throughput, )
print "mean throughput: %.3f [Mb/s]" % (megabits, )
time.sleep(1.0)
if __name__ == "__main__":
main()
| {
"repo_name": "swn1/pyzmq",
"path": "examples/pubsub/subscriber.py",
"copies": "8",
"size": "1937",
"license": "bsd-3-clause",
"hash": -4551727521045227000,
"line_mean": 25.1756756757,
"line_max": 78,
"alpha_frac": 0.5322663913,
"autogenerated": false,
"ratio": 3.521818181818182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8054084573118183,
"avg_score": null,
"num_lines": null
} |
"""A text based UI, implemented with cmd2: https://bitbucket.org/catherinedevlin/cmd2
BeamAnalyzer v0.4.0
Copyright 2014 Evan Murawski
License: MIT
"""
__author__ = 'Evan Murawski'
from backend.beam import Beam
from backend.interactions import InteractionLocationError, Interaction, Force, Moment, Dist_Force
import cmd2 as cmd
from cmd2 import options, make_option
import backend.solver as solver
from backend.solver import SolverError
import backend.shearmomentgenerator as shearmomentgenerator
from backend.shearmomentgenerator import Shear_Moment_Error
import matplotlib.pyplot as plt
import numpy as np
class Text_Interface(cmd.Cmd):
"""Defines the text UI, using cmd2"""
#Contains the active beam
beam = None
PLOT_MARGIN = 0.15
def preloop(self):
"""Before the loop starts: Request the beam length from the
user and create a new beam.
"""
valid = False
while not valid:
try:
length = float(input('Enter the length of the beam: '))
except ValueError:
print('Length must be a positive number.')
continue
if length > 0:
self.beam = Beam(length)
valid = True
else:
print('Length must be a positive number.')
def do_addf(self, arguments):
"""Add a force. Usage:
Add a known force: addf location magnitude
Add an unknown force: addf location
"""
list_args = str.split(arguments)
float_args = []
#Attempt to convert arguments to floating point numbers.
try:
for item in list_args:
float_args.append(float(item))
except ValueError:
print("Arguments must be numers.")
return
#Determine if this will be a known or unknown force.
if len(list_args) == 1:
known = False
float_args.append(float(0))
elif len(list_args) == 2:
known = True
else:
print("Arguments must be 1 or 2 numbers.")
return
#Add the force.
try:
self.beam.add_interaction(Force(float_args[0], float_args[1], known))
except InteractionLocationError:
print("Invalid location for force.")
return
print("Added.")
def do_addm(self, arguments):
"""Add a moment. Usage:
Add a known moment: addm location magnitude
Add an unknown moment: addm location
"""
list_args = str.split(arguments)
float_args = []
#Attempt to convert the args to floating point numbers.
try:
for item in list_args:
float_args.append(float(item))
except ValueError:
print("Arguments must be numers.")
return
#Determine if this is a known or unknown moment.
if len(list_args) == 1:
known = False
float_args.append(float(0))
elif len(list_args) == 2:
known = True
else:
print("Arguments must be 1 or 2 numbers.")
return
#Add the moment.
try:
self.beam.add_interaction(Moment(float_args[0], float_args[1], known))
except InteractionLocationError:
print("Invalid location for moment.")
return
print("Added.")
def do_adddf(self, arguments):
"""Add a distributed force. Usage:
Add a distributed force: addf start magnitude end
"""
list_args = str.split(arguments)
float_args = []
#Attempt to convert arguments to floating point numbers.
try:
for item in list_args:
float_args.append(float(item))
except ValueError:
print("Arguments must be numers.")
return
#Determine if this will be a known or unknown force.
if len(list_args) != 3:
print("Arguments must be 3 numbers.")
return
#Add the force.
try:
self.beam.add_interaction(Dist_Force(float_args[0], float_args[1], float_args[2]))
except InteractionLocationError:
print("Invalid location for distributed force.")
return
print("Added.")
def do_view(self, arguments):
"""View the current status of the beam."""
print("\n", self.beam, "\n")
print("Unknowns: ",self.beam.count_unknowns(), "\n")
def do_solve(self, arguments):
"""Solve the current beam."""
solver.solve(self.beam)
self.do_view(None)
def do_reset(self, arguments):
"""Reset the beam - lets you create a new beam."""
self.preloop()
print('Beam reset.')
@options([make_option('-s', '--step', type="float", help="Specify the step size."),
make_option('-a', '--annotate', action="store_true", help="Annotate key points on the graph.")])
def do_plot(self, arguments, opts=None):
"""Plot the shear / moment diagram. Usage:
plot [-s stepsize] (default step size 0.01)
"""
step_size = 0.01
annotate = False
if opts.step != None:
step_size = opts.step
if opts.annotate != None:
annotate = True
#Generate the shear and moment points, using generate_numerical
shear_moment = shearmomentgenerator.generate_numerical(self.beam, step_size)
#Plot the points
x = np.arange(0, self.beam.length, step_size)
shear = [y[0] for y in shear_moment]
moment = [y[1] for y in shear_moment]
fig = plt.figure()
shear_plot = fig.add_subplot(211)
shear_plot.plot(x, shear)
plt.title('Shear')
moment_plot = fig.add_subplot(212)
moment_plot.plot(x, moment)
plt.title('Moment')
#Experimental: annotate the plot with the magnitudes of the interactions
if annotate:
for interaction in self.beam.interactions:
point_one = int(interaction.location / step_size) - 1
point_two = int(interaction.location / step_size)
shear_plot.annotate('(' + str(interaction.location) +
', ' + str(shear[point_one]) + ')', xy=(interaction.location, shear[point_one]), textcoords='offset points')
if isinstance(interaction, Moment):
moment_plot.annotate('(' + str(interaction.location) +
', ' + str(moment[point_one]) + ')', xy=(interaction.location, moment[point_one]), textcoords='offset points')
if interaction.location != self.beam.length:
shear_plot.annotate('(' + str(interaction.location) +
', ' + str(shear[point_two]) + ')', xy=(interaction.location, shear[point_two]), textcoords='offset points')
moment_plot.annotate('(' + str(interaction.location) +
', ' + str(moment[point_two]) + ')', xy=(interaction.location, moment[point_two]), textcoords='offset points')
shear_plot.axis([min(x), max(x), min(shear) - self.PLOT_MARGIN * (max(shear)-min(shear)), max(shear) + self.PLOT_MARGIN * (max(shear)-min(shear))])
moment_plot.axis([min(x), max(x), min(moment) - self.PLOT_MARGIN * (max(moment)-min(moment)), max(moment) + self.PLOT_MARGIN * (max(moment)-min(moment))])
plt.show()
if __name__ == '__main__':
"""The main method."""
interface = Text_Interface()
interface.cmdloop()
| {
"repo_name": "EvanMurawski/BeamAnalyzer",
"path": "beamanalyzer/textinterface.py",
"copies": "1",
"size": "7669",
"license": "mit",
"hash": -5240379892001479000,
"line_mean": 30.4303278689,
"line_max": 162,
"alpha_frac": 0.5694353892,
"autogenerated": false,
"ratio": 4.109860664523044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012351349302012177,
"num_lines": 244
} |
""" A text editor. """
# Standard library imports.
from os.path import basename
# Enthought library imports.
from pyface.workbench.api import TraitsUIEditor
from pyface.api import FileDialog, CANCEL
from traits.api import Code, Instance
from traitsui.api import CodeEditor, Group, Item, View
from traitsui.key_bindings import KeyBinding, KeyBindings
from traitsui.menu import NoButtons,ApplyButton, OKCancelButtons
# Local imports.
from text_editor_handler import TextEditorHandler
def _id_generator():
""" A generator that returns the next number for untitled files. """
i = 1
while True:
yield(i)
i += 1
return
_id_generator = _id_generator()
from traits.api import Button
class TextEditor(TraitsUIEditor):
""" A text editor. """
#### 'TextEditor' interface ###############################################
# The key bindings used by the editor.
key_bindings = Instance(KeyBindings)
# The text being edited.
text = Code
# Run
runbut = Button
###########################################################################
# 'IEditor' interface.
###########################################################################
def _runbut_fired(self):
self.run()
def save(self):
""" Saves the text to disk. """
# If the file has not yet been saved then prompt for the file name.
if len(self.obj.path) == 0:
self.save_as()
else:
f = file(self.obj.path, 'w')
f.write(self.text)
f.close()
# We have just saved the file so we ain't dirty no more!
self.dirty = False
return
def save_as(self):
""" Saves the text to disk after prompting for the file name. """
dialog = FileDialog(
parent = self.window.control,
action = 'save as',
default_filename = self.name,
wildcard = FileDialog.WILDCARD_PY
)
if dialog.open() != CANCEL:
# Update the editor.
self.id = dialog.path
self.name = basename(dialog.path)
# Update the resource.
self.obj.path = dialog.path
# Save it!
self.save()
return
###########################################################################
# 'TraitsUIEditor' interface.
###########################################################################
def create_ui(self, parent):
""" Creates the traits UI that represents the editor. """
ui = self.edit_traits(
parent=parent, view=self._create_traits_ui_view(), kind='subpanel'
)
return ui
###########################################################################
# 'TextEditor' interface.
###########################################################################
def run(self):
""" Runs the file as Python. """
# The file must be saved first!
self.save()
# Execute the code.
if len(self.obj.path) > 0:
view = self.window.get_view_by_id(
'envisage.plugins.python_shell_view'
)
if view is not None:
view.execute_command(
'execfile(r"%s")' % self.obj.path, hidden=False
)
return
def select_line(self, lineno):
""" Selects the specified line. """
self.ui.info.text.selected_line = lineno
return
###########################################################################
# Private interface.
###########################################################################
#### Trait initializers ###################################################
def _key_bindings_default(self):
""" Trait initializer. """
key_bindings = KeyBindings(
KeyBinding(
binding1 = 'Ctrl-s',
description = 'Save the file',
method_name = 'save'
),
KeyBinding(
binding1 = 'Ctrl-r',
description = 'Run the file',
method_name = 'run'
)
)
return key_bindings
#### Trait change handlers ################################################
def _obj_changed(self, new):
""" Static trait change handler. """
# The path will be the empty string if we are editing a file that has
# not yet been saved.
if len(new.path) == 0:
self.id = self._get_unique_id()
self.name = self.id
else:
self.id = new.path
self.name = basename(new.path)
f = file(new.path, 'r')
self.text = f.read()
f.close()
return
def _text_changed(self, trait_name, old, new):
""" Static trait change handler. """
if self.traits_inited():
self.dirty = True
return
def _dirty_changed(self, dirty):
""" Static trait change handler. """
if len(self.obj.path) > 0:
if dirty:
self.name = basename(self.obj.path) + '*'
else:
self.name = basename(self.obj.path)
return
#### Methods ##############################################################
def _create_traits_ui_view(self):
""" Create the traits UI view used by the editor.
fixme: We create the view dynamically to allow the key bindings to be
created dynamically (we don't use this just yet, but obviously plugins
need to be able to contribute new bindings).
"""
view = View(
Group(
Item(
'text', editor=CodeEditor(key_bindings=self.key_bindings)
),
Item('runbut', label = 'Run script...', show_label = False),
show_labels = False
),
id = 'envisage.editor.text_editor',
handler = TextEditorHandler(),
kind = 'live',
resizable = True,
width = 1.0,
height = 1.0,
#buttons = NoButtons,
buttons=['OK'],
)
return view
def _get_unique_id(self, prefix='Untitled '):
""" Return a unique id for a new file. """
id = prefix + str(_id_generator.next())
while self.window.get_editor_by_id(id) is not None:
id = prefix + str(_id_generator.next())
return id
#### EOF ######################################################################
| {
"repo_name": "LTS5/connectomeviewer",
"path": "cviewer/plugins/text_editor/editor/text_editor.py",
"copies": "1",
"size": "6824",
"license": "bsd-3-clause",
"hash": 5050727498481671000,
"line_mean": 27.0823045267,
"line_max": 79,
"alpha_frac": 0.4457796014,
"autogenerated": false,
"ratio": 4.79887482419128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016703327971776805,
"num_lines": 243
} |
"""A text library.
TextLib stores a set of strings containing English text as compact binary
data. It can also generate Pico-8 Lua code capable of accessing a string given
the string's string ID (returned by the encoder method). The goal is to make it
easy to write Pico-8 games that use a large quantity of English text without
storing that text in the code region of the cart.
Strings are not encoded exactly. To save space when storing multi-word
English phrases, word spaces are not stored. The generated Lua code uses
rules about English phrases to calculate word spacing in the final string.
See p8advent.tool for code that generates a full Pico-8 cart that replaces
string literals in Lua source with string library IDs. To allow code to defer
string assembly until the last minute, the code must explicitly call the t(sid)
function (added during cart processing) to get the string value. String IDs
are encoded as strings, and can be concatenated. (sub() also works if you're
careful: each string ID is three characters long.)
TextLib uses a technique similar to the one used by old 8-bit text adventure
games. Words are encoded as two bytes: a prefix ID and a suffix ID. A prefix
is a fixed length that you set when you instantiate TextLib, typically 1 or 2.
Each prefix has a list of suffixes indexed by the suffix ID. The decoded word
is simply the prefix followed by the suffix. A string is a sequence of
literal non-word characters and word byte pairs. See as_bytes() for a
description of the complete binary representation.
(It's debatable whether this is the best way to compress a set of short English
phrases for a text game. It's also debatable whether a Pico-8 text game
benefits from storing its text in a compacted form in cart data vs. in the
code region. And of course making a text game in Pico-8 is a dubious endeavor
to begin with. I just wanted to play with this technique.)
"""
__all__ = ['TextLib', 'encode_pscii']
from collections import defaultdict
import re
import sys
_WORD = re.compile(r'[a-zA-Z\']+')
# A character set, which I'm going to call "pscii", consisting of all of the
# characters supported by TextLib. This corresponds to all of the characters
# supported by Pico-8 v0.1.3. Notable missing chars include: $ \ @ ` (I
# believe 0.1.4 will add support for "\".)
CHAR_TABLE = ' !"#%\'()*+,-./0123456789:;<=>?abcdefghijklmnopqrstuvwxyz[]^_{~}'
# A format pattern for the Lua code to inject. This expects a format key of
# "text_start_addr" equal to the RAM address where the text data begins.
#
# _c(o) converts a character code to a single-character string.
# _o(c) converts a single-character string to its character code (or nil).
#
# _t(sid) calculates the string with the given ID. It uses the string jump
# table to find the character and word codes for the string, then builds the
# result. If the next byte has its high bit set, then it and the following
# byte are the prefix and suffix ID, respectively, of a word in the word
# table. Otherwise it is a character code. For a word, it finds the prefix
# using the word jump table, reads the prefix at that location (of a fixed
# length encoded at pos 0), then scans a list of null-terminated suffixes to
# find the appropriate suffix.
#
# Spaces are added according to English punctuation rules:
#
# * a space between words: "word word"
# * a space after sentence ending punctuation and closing brackets if
# followed by a word: !),.?:;]}
# * a space after a word if followed by opening brackets: ([{
# * double-quotes (") are treated as brackets, alternating between opening and
# closing brackets
#
# Local variables in _t():
#
# * ta: The text data start absolute address.
# * r : The result accumulator.
# * sids : A list of string IDs encoded as a string of three-char segments.
# * sid : The (numeric, decoded) string ID.
# * sc : The sentence count.
# * sa : The address of the first byte of the sentence string.
# This pointer is advanced during the sentence string loop.
# * sae : The address of the last byte of the sentence string + 1.
# * psa : The value at the sentence string pointer.
# * pi : The prefix index.
# * si : The suffix index.
# * wa : The address of the first byte of the prefix for the word.
# * pl : The prefix length.
# * pli : Prefix char index (0-based).
# * was : The address of the start of the word table.
# * lww : True if the last string part was a word.
# * lwep : True if the last string part was sentence-ending or
# bracket-closing punctuation.
# * qt : True if the next double-quote is bracket-closing.
#
# TODO: Treat ~ (61) as a paragraph break, reset double-quote state.
CHAR_TABLE_LUA = re.sub(r'"', '"..\'"\'.."', CHAR_TABLE)
CHAR_TABLE_LUA = re.sub(r'{', '{{', CHAR_TABLE_LUA)
CHAR_TABLE_LUA = re.sub(r'}', '}}', CHAR_TABLE_LUA)
P8ADVENT_LUA_PAT = (
'_ct="' + CHAR_TABLE_LUA + '"\n' +
"""
function _c(o) return sub(_ct,o+1,o+1) end
function _o(c)
local i
for i=1,#_ct do
if sub(_ct,i,i)==c then return i-1 end
end
return 63
end
function _t(sids)
local ta={text_start_addr}
local sidsi,sid,r,sc,sa,sae,psa,pi,si,wa,pl,pli,was,lww,lwep,qt
pl=peek(ta)
sc=bor(shl(peek(ta+2),8),peek(ta+1))
was=ta+bor(shl(peek(ta+sc*2+4),8),peek(ta+sc*2+3))
r=''
lww=false
lwep=false
qt=false
for sidsi=1,#sids,3 do
sid=bor(bor(_o(sub(sids,sidsi,sidsi)),
shl(_o(sub(sids,sidsi+1,sidsi+1)),6)),
shl(_o(sub(sids,sidsi+2,sidsi+2)),12))
sa=ta+bor(shl(peek(ta+sid*2+4),8),peek(ta+sid*2+3))
sae=ta+bor(shl(peek(ta+(sid+1)*2+4),8),peek(ta+(sid+1)*2+3))
while sa<sae do
psa=peek(sa)
if band(psa,128)==128 then
if (lww or lwep) r=r.." "
pi=band(psa,127)
si=peek(sa+1)
wa=ta+bor(shl(peek(was+pi*2+1),8),peek(was+pi*2))
for pli=0,pl-1 do
if (peek(wa+pli) > 0) r=r.._c(peek(wa+pli))
end
wa=wa+pl
while si>0 do
while band(peek(wa),128)~=128 and peek(wa)~=0 do wa+=1 end
wa+=1
si-=1
end
repeat
if peek(wa)==0 then break end
r=r.._c(band(peek(wa),127))
wa+=1
until band(peek(wa-1),128)==128
sa+=1
lww=true
lwep=false
else
if ((lww and ((psa==2 and qt)or(psa==6)or(psa==56)or(psa==60))) or
(lwep and psa==2 and not qt)) then
r=r.." "
end
r=r.._c(psa)
lww=false
lwep=((psa==2 and qt)or(psa==7)or(psa==10)or(psa==12)or(psa==24)or
(psa==25)or(psa==29)or(psa==57)or(psa==62))
if (psa==2) qt=not qt
end
sa+=1
end
end
return r
end
""")
class Error(Exception):
"""A base class for errors."""
pass
class TooManyWordsForPrefixError(Error):
"""There were too many words with the same prefix.
If this happens, increase the prefix length and try again.
"""
pass
def encode_pscii(s):
"""Encode an ASCII string as a bytestring in terms of the character table.
Args:
s: The Python string to encode.
Returns:
The bytestring of indexes into CHAR_TABLE.
Raises:
ValueError: The string contains a character not in CHAR_TABLE.
"""
result = bytearray()
lower_s = s.lower()
ce = None
try:
for c in lower_s:
ce = c
result.append(CHAR_TABLE.index(c))
except ValueError as e:
sys.stderr.write('Character out of supported range: {}\n'.format(
repr(ce)))
raise
return bytes(result)
class TextLib:
def __init__(self, prefix_length=1):
self._prefix_lst = list()
self._word_lib = defaultdict(list)
self._string_lib_map = dict()
self._string_lib_lst = list()
self._prefix_length = prefix_length
self._total_chars_stored = 0
def _encode_word(self, w):
"""Encodes a word, adding it to the library if necessary.
The result is a prefix index followed by a lookup index for the suffix.
If a prefix index grows beyond 127 or a suffix index grows beyond 255,
we raise an exception. If this happens, increase the prefix length
and try again. (A test with a very large document needed a 10-bit
index with a 1-byte prefix, but an 8-bit index with a 2-byte prefix.)
Args:
w: The word.
Returns:
A bytestring, either <prefix_id><suffix_id> or a pscii bytestring if
the word is shorter than the prefix length.
"""
w = encode_pscii(w)
if len(w) <= self._prefix_length:
w += b'\x00' * (self._prefix_length - len(w))
prefix = w
suffix = b''
else:
prefix = w[:self._prefix_length]
suffix = w[self._prefix_length:]
if prefix not in self._word_lib:
self._prefix_lst.append(prefix)
prefix_id = len(self._prefix_lst) - 1
if prefix_id > 127:
raise TooManyWordsForPrefixError()
else:
prefix_id = self._prefix_lst.index(prefix)
if suffix in self._word_lib[prefix]:
suffix_id = self._word_lib[prefix].index(suffix)
else:
self._word_lib[prefix].append(suffix)
suffix_id = len(self._word_lib[prefix]) - 1
if suffix_id > 255:
raise TooManyWordsForPrefixError()
# Set high bit of prefix ID.
prefix_id |= 128
return bytes((prefix_id, suffix_id))
def _encode_string(self, s):
"""Encodes the symbols of a string.
Args:
s: The string.
Returns:
The byte encoding for the string.
"""
result = bytearray()
s_i = 0
while s_i < len(s):
if s[s_i] == ' ':
s_i += 1
continue
m = _WORD.match(s[s_i:])
if not m:
result.extend(encode_pscii(s[s_i]))
s_i += 1
continue
result.extend(self._encode_word(m.group(0)))
s_i += len(m.group(0))
return result
def _encode_string_id(self, id):
"""Encodes a string ID as three pscii characters.
Args:
id: The numeric ID, from 0 to 65535.
Returns:
The three-character string encoding of the ID.
"""
# Add a special char to the table to make it 64 chars even.
ct = CHAR_TABLE + '@'
w1 = id & 63
w2 = (id >> 6) & 63
w3 = (id >> 12) & 63
return ct[w1] + ct[w2] + ct[w3]
def id_for_string(self, s):
"""Gets the ID for a string, adding it to the library if necessary.
Args:
s: The string.
Returns:
The string ID, encoded as a three-character pscii string.
"""
s = re.sub(r'\s+', ' ', s)
self._total_chars_stored += len(s) # for stats
if s not in self._string_lib_map:
self._string_lib_lst.append(self._encode_string(s))
self._string_lib_map[s] = len(self._string_lib_lst) - 1
return self._encode_string_id(self._string_lib_map[s])
def as_bytes(self):
"""Dump the entire library in its byte encoding.
The prefix length and table sizes are not encoded. It is expected
that the generated access code will stay within expected ranges.
TODO: This is dumb. I'm passing these values into the generated Lua,
might as well store them with the bytes.
0: The prefix length.
1 - 2: The number of sentences, S, LSB first.
3 - 2*S+2: The string jump table, each entry as an offset from pos 0,
two bytes each, LSB first.
2*S+3 - 2*S+4: The offset of the byte following the last byte of string
S, LSB first. This serves two purposes: it allows the string reader
to read two offsets from the jump table to get the length, and it's
the offset of the word lookup table (W_addr).
2*S+5 - ...: Encoded strings. If a byte has its high bit set, then it is
a word prefix offset and the next byte is the suffix offset.
Otherwise a given byte is a pscii code. Word spaces are omitted, and
are up to the renderer to provide according to English punctuation
rules.
W_addr - W_addr+2*W-1: The prefix jump table, each entry as an offset
from pos 0, two bytes each, LSB first.
W_addr+2*W - ...: Word entries, null terminated. Each entry starts with
the prefix (in pscii) followed by all of the suffixes (in pscii).
Each suffix's final character has its high bit set.
Returns:
A bytearray.
"""
longest_string_size = 0
most_lookup_entries_count = 0
total_lookup_entries_count = 0
longest_suffix_size = 0
string_offset_list = [0]
string_data = bytearray()
for s in self._string_lib_lst:
string_data.extend(s)
string_offset_list.append(len(string_data))
if len(string_data) > longest_string_size:
longest_string_size = len(string_data)
string_table_offset = 3 + 2 * len(self._string_lib_lst) + 2
string_jump_tbl = bytearray()
for e in string_offset_list:
v = string_table_offset + e
if v >= 65536:
raise TooManyWordsForPrefixError()
string_jump_tbl.append(v & 255)
string_jump_tbl.append(v >> 8)
lookup_offset_list = [0]
lookup_data = bytearray()
for p in self._prefix_lst:
lookup_data.extend(p)
for suffix in self._word_lib[p]:
if len(suffix) > 0:
lookup_data.extend(suffix)
lookup_data[-1] |= 0x80
else:
lookup_data.append(0)
if len(suffix) > longest_suffix_size:
longest_suffix_size = len(suffix)
lookup_offset_list.append(len(lookup_data))
if len(self._word_lib[p]) > most_lookup_entries_count:
most_lookup_entries_count = len(self._word_lib[p])
total_lookup_entries_count += len(self._word_lib[p])
lookup_table_offset = (3 + len(string_jump_tbl) + len(string_data) +
2 * len(self._prefix_lst))
lookup_prefix_tbl = bytearray()
# We don't need the offset past the last lookup:
lookup_offset_list.pop()
for e in lookup_offset_list:
v = lookup_table_offset + e
if v >= 65536:
raise TooManyWordsForPrefixError()
lookup_prefix_tbl.append(v & 255)
lookup_prefix_tbl.append(v >> 8)
num_of_strings = len(self._string_lib_lst)
num_of_prefixes = len(self._prefix_lst)
# TODO: remove these, or make them an official feature:
print('DEBUG: num_of_strings = {}'.format(num_of_strings))
print('DEBUG: num_of_prefixes = {}'.format(num_of_prefixes))
print('DEBUG: longest_string_size = {}'.format(longest_string_size))
print('DEBUG: longest_suffix_size = {}'.format(longest_suffix_size))
print('DEBUG: most_lookup_entries_count = {}'.format(most_lookup_entries_count))
print('DEBUG: total_lookup_entries_count = {}'.format(total_lookup_entries_count))
print('DEBUG: original text size = {}'.format(self._total_chars_stored))
print('DEBUG: total text lib size = {}'.format(len(string_jump_tbl) +
len(string_data) +
len(lookup_prefix_tbl) +
len(lookup_data)))
return bytes(bytearray([self._prefix_length,
len(self._string_lib_lst) & 255,
len(self._string_lib_lst) >> 8]) +
string_jump_tbl +
string_data +
lookup_prefix_tbl +
lookup_data)
def generate_lua(self, text_start_addr=0):
"""Generate the Lua code for accessing this TextLib.
Args:
text_start_addr: The starting address for the text bytes region.
"""
return P8ADVENT_LUA_PAT.format(text_start_addr=text_start_addr)
| {
"repo_name": "dansanderson/p8advent",
"path": "p8advent/textlib.py",
"copies": "1",
"size": "16277",
"license": "mit",
"hash": -7760836072420984000,
"line_mean": 36.6782407407,
"line_max": 90,
"alpha_frac": 0.604534005,
"autogenerated": false,
"ratio": 3.5773626373626373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46818966423626374,
"avg_score": null,
"num_lines": null
} |
#at first I try to do it in place, but it was too hard.
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
# @param intervals, a list of Intervals
# @param newInterval, a Interval
# @return a list of Interval
def insert(self, intervals, newInterval):
if len(intervals) == 0:
return [newInterval]
if newInterval.end < intervals[0].start:
intervals.insert(0, newInterval)
return intervals
if newInterval.start > intervals[-1].end:
intervals.append(newInterval)
return intervals
result = []
i = 0
while i < len(intervals):
if newInterval.start > intervals[i].end:
result.append(intervals[i])
i += 1
else:
break
new_start = newInterval.start
new_end = newInterval.end
while i < len(intervals):
#notice the corner = case here
if intervals[i].start <= newInterval.end:
new_start = min(new_start, intervals[i].start)
new_end = max(new_end, intervals[i].end)
i += 1
else:
break
result.append(Interval(new_start, new_end))
while i < len(intervals):
result.append(intervals[i])
i += 1
return result
def output(self, intervals):
for i in xrange(len(intervals)):
print (intervals[i].start, intervals[i].end)
if __name__ == "__main__":
i0 = Interval(0, 2)
i1 = Interval(3, 5)
i2 = Interval(6, 8)
i3 = Interval(10, 12)
i4 = Interval(13, 15)
newInterval = Interval(4, 7)
intervals = [i0, i1, i2, i3, i4]
solution = Solution()
result = solution.insert(intervals, newInterval)
solution.output(result)
| {
"repo_name": "sureleo/leetcode",
"path": "archive/python/array/InsertInterval.py",
"copies": "2",
"size": "1935",
"license": "mit",
"hash": -7709369809358563000,
"line_mean": 27.0434782609,
"line_max": 62,
"alpha_frac": 0.5390180879,
"autogenerated": false,
"ratio": 3.9651639344262297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5504182022326229,
"avg_score": null,
"num_lines": null
} |
# A theater seating chart is implemented as a table of ticket prices, like this:
# 10 10 10 10 10 10 10 10 10 10
# 10 10 10 10 10 10 10 10 10 10
# 10 10 10 10 10 10 10 10 10 10
# 10 10 20 20 20 20 20 20 10 10
# 10 10 20 20 20 20 20 20 10 10
# 10 10 20 20 20 20 20 20 10 10
# 20 20 30 30 40 40 30 30 20 20
# 20 30 30 40 50 50 40 30 30 20
# 30 40 50 50 50 50 50 50 40 30
#
# Write a program that prompts users to
# pick either a seat or a price. Mark sold
# seats by changing the price to 0. When
# a user specifies a seat, make sure it is
# available. When a user specifies a price,
# find any seat with that price.
# FUNCTIONS
def printSeats(seats):
for i in range(len(seats[0]) - 1):
print(seats[i])
def findSeat(seats, pickChoice):
if pickChoice.lower() == "p":
price = int(input("\nEnter the price(10, 20, 30, 40 or 50): "))
seatFound = False
for i in range(len(seats)):
for j in range(len(seats[0])):
if seats[i][j] == price and not seatFound:
seatFound = True
seats[i][j] = 0
print("\nYour seat is in row %d and seat %d" % (i + 1, j + 1))
else:
totalRows = int(len(seats)) - 1
row = int(input("\nEnter the row (1 through %d): " % totalRows))
totalCols = int(len(seats[0])) - 1
col = int(input(("\nEnter the seat(column) (1 through %d): " % totalCols)))
if seats[row-1][col-1] == 0:
print("\nSorry the seat is unavailable")
else:
seats[row-1][col-1] = 0
print("\nYour seat is in row %d and seat %d" % (row, col))
# main
def main():
seats = [ [ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 ], [ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 ],
[ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 ], [ 10, 10, 20, 20, 20, 20, 20, 20, 10, 10 ],
[ 10, 10, 20, 20, 20, 20, 20, 20, 10, 10 ], [ 10, 10, 20, 20, 20, 20, 20, 20, 10, 10 ],
[ 20, 20, 30, 30, 40, 40, 30, 30, 20, 20 ], [ 20, 30, 30, 40, 50, 50, 40, 30, 30, 20 ],
[ 30, 40, 50, 50, 50, 50, 50, 50, 40, 30 ] ]
pickChoice = str(input("Pick seat by price or location(Enter P or L): "))
printSeats(seats)
findSeat(seats, pickChoice)
printSeats(seats)
# PROGRAM RUN
main() | {
"repo_name": "futurepr0n/Books-solutions",
"path": "Python-For-Everyone-Horstmann/Chapter6-Lists/P6.26.py",
"copies": "1",
"size": "2303",
"license": "mit",
"hash": 5629236307621983000,
"line_mean": 35,
"line_max": 101,
"alpha_frac": 0.5445071646,
"autogenerated": false,
"ratio": 2.9225888324873095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39670959970873093,
"avg_score": null,
"num_lines": null
} |
"""Athena Query
Submits the appropriate Security Fairy
query to Athena.
"""
import re
import boto3
import logging
from datetime import datetime, timedelta
from setup_logger import create_logger
from botocore.exceptions import ProfileNotFound
logger = create_logger(name="athena_query.py", logging_level=logging.INFO)
try:
SESSION = boto3.session.Session(profile_name='training',
region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
""" Executed by the Lambda service.
Submits the query for execution and returns
the Execution ID for use by subsequent
Lambda functions.
"""
event['execution_id'] = execute_query(event['entity_arn'],
event['num_days'],
event['s3_bucket'])
return event
def window_calc(num_days):
"""Calculate the correct year,
month, and day for the query
"""
days = abs(num_days)
delta = timedelta(days=days)
today = datetime.now()
query_date = today - delta
year = query_date.year
month = query_date.month
return year, str(month).zfill(2)
def execute_query(entity_arn, num_days, s3_bucket):
"""Submit and run query"""
escaped_arn = build_escaped_arn(entity_arn)
year, month = window_calc(num_days)
hql = f"""
select useridentity.arn as user_arn
, eventsource
, array_distinct(array_agg(eventName)) as actions
from aws_logs.cloudtrail
where year = '{year}'
and month >= '{month}'
and regexp_like(useridentity.arn, '{escaped_arn}\/.+')
group by useridentity.arn
, eventsource
"""
logger.info(hql)
output = f's3://{s3_bucket}/tables'
config = {
'OutputLocation': output,
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
athena_client = SESSION.client('athena')
execution = athena_client.start_query_execution(QueryString=hql,
ResultConfiguration=config)
logger.info("Query ID:")
logger.info(execution['QueryExecutionId'])
return execution['QueryExecutionId']
def build_escaped_arn(entity_arn):
"""Format ARN"""
split_arn = re.split('/|:', entity_arn)
escaped_arn = "arn:aws:sts::" + split_arn[4] + ":assumed-role\\/" + split_arn[6]
logger.debug(escaped_arn)
return escaped_arn
if __name__ == '__main__':
# arn:aws:sts::281782457076:assumed-role\/1s_tear_down_role\/.+
# lambda_handler(
# {
# "entity_arn": "arn:aws:iam::281782457076:assumed-role/1s_tear_down_role",
# "num_days": "-30",
# "s3_bucket": "1s-potato-east"
# },
# {}
# )
pass | {
"repo_name": "1Strategy/security-fairy",
"path": "athena_query.py",
"copies": "1",
"size": "2886",
"license": "apache-2.0",
"hash": 5804082585751560000,
"line_mean": 26.4952380952,
"line_max": 87,
"alpha_frac": 0.5914760915,
"autogenerated": false,
"ratio": 3.6764331210191084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.975835921277303,
"avg_score": 0.0019099999492156355,
"num_lines": 105
} |
"""Athena URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from blog.views import PostListView, PostDetailView, ResumeListView
from django.contrib import admin
urlpatterns = [
url(r'^$', PostListView.as_view(), name='post-list'),
url(r'^posts/(?P<pk>[0-9]+)$', PostDetailView.as_view(), name='post-detail'),
url(r'^resume', ResumeListView.as_view(), name='resume'),
url(r'^xadmin/', admin.site.urls),
]
# change admin title and header
admin.site.site_header = 'He Xiangyu Blog. Administration'
admin.site.site_title = 'He Xiangyu Blog. Administration'
| {
"repo_name": "zhengxiaowai/Athena",
"path": "Athena/urls.py",
"copies": "1",
"size": "1217",
"license": "mit",
"hash": 770156306794120300,
"line_mean": 38.2580645161,
"line_max": 81,
"alpha_frac": 0.7058340181,
"autogenerated": false,
"ratio": 3.4089635854341735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46147976035341737,
"avg_score": null,
"num_lines": null
} |
"""A thesaurus of words the player might use, so you can easily accept
synonyms of the word you want.
e.g. This will recognise take, pick, get or collect::
if do[0] in weatbag.words.take:
#...
"""
# Verbs
move = {'move', 'walk', 'go', 'travel', 'cross', 'pace', 'traverse'}
give = {'give', 'feed', 'present', 'impart', 'pass'}
use = {'eat', 'use', 'wear', 'utilise'}
fight = {'fight', 'kill', 'hit', 'attack', 'afflict'}
drop = {'drop', 'unload'}
take = {'pick', 'take', 'get', 'collect', 'acquire', 'adopt', 'withdraw'}
look = {'look', 'inspect', 'examine', 'check'}
attack = {'attack', 'swing', 'hit', 'punch', 'kick', 'fight'}
combine = {'combine', 'join', 'mix', 'unite', 'compound', 'aggregate', 'blend', 'coalesce', 'meld'}
talk = {'talk', 'speak', 'converse'}
# Nouns
inventory = {'inventory', 'possessions', 'belongings', 'bag'}
surroundings = {'surroundings', 'around', 'scenery'}
# Prepositions
prepositions = {'up', 'down', 'on', 'under', 'in', 'at', 'to', 'with', 'and'}
# Yes/No
yes = {'yes', 'y', 'yup', 'ye'}
no = {'no', 'n', 'nope'}
# Control
instructions = {'instructions', 'help', '?', 'tutorial'}
exit = {'exit', 'quit'}
| {
"repo_name": "takluyver/weatbag",
"path": "weatbag/words.py",
"copies": "1",
"size": "1158",
"license": "mit",
"hash": 3742032054259640300,
"line_mean": 33.0588235294,
"line_max": 99,
"alpha_frac": 0.5820379965,
"autogenerated": false,
"ratio": 2.776978417266187,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3859016413766187,
"avg_score": null,
"num_lines": null
} |
"""A thin layer on top of the requests package that provides niceties
such as retry policy, rate limit, and more logging.
"""
__all__ = [
'HttpError',
'Client',
'ForwardingClient',
'Request',
'Response',
]
import functools
import time
import logging
import requests
import requests.cookies
try:
import lxml.etree
from lxml.etree import fromstring
except ImportError:
fromstring = None
from garage.assertions import ASSERT
from garage.http import policies
LOG = logging.getLogger(__name__)
_REQUEST_ARG_NAMES = frozenset(
'headers files data params auth cookies hooks json'.split()
)
_SEND_ARG_NAMES = frozenset(
'verify proxies stream cert timeout allow_redirects'.split()
)
_ALL_ARG_NAMES = _REQUEST_ARG_NAMES | _SEND_ARG_NAMES
class HttpError(Exception):
pass
def _check_kwargs(kwargs, arg_names):
names = set(kwargs) - arg_names
if names:
raise TypeError('not expect these keyword arguments: %s' %
', '.join(sorted(names)))
def _make_method(method):
def http_method(self, uri, **kwargs):
_check_kwargs(kwargs, _ALL_ARG_NAMES)
req_kwargs = {
key: arg for key, arg in kwargs.items()
if key in _REQUEST_ARG_NAMES
}
send_kwargs = {
key: arg for key, arg in kwargs.items()
if key in _SEND_ARG_NAMES
}
return self.send(Request(method, uri, **req_kwargs), **send_kwargs)
return http_method
class _ClientMixin:
get = _make_method('GET')
head = _make_method('HEAD')
post = _make_method('POST')
put = _make_method('PUT')
def _patch_session(session):
"""Patch requests.Session.send for better logging."""
def _send(request, **kwargs):
if LOG.isEnabledFor(logging.DEBUG):
for name, value in request.headers.items():
LOG.debug('<<< %s: %s', name, value)
LOG.debug('send_kwargs %r', kwargs)
response = send(request, **kwargs)
if LOG.isEnabledFor(logging.DEBUG):
for name, value in response.headers.items():
LOG.debug('>>> %s: %s', name, value)
return response
send = session.send
session.send = _send
class Client(_ClientMixin):
#
# NOTE:
# Session.{get,...} does a _LOT_ of extra work than just bare
# Session.send. Your life would be much easier if you stay above
# Session.{get,...} instead of Session.send.
#
def __init__(self, *,
rate_limit=None,
retry_policy=None,
send_kwargs=None,
_session=None,
_sleep=time.sleep):
self._session = _session or requests.Session()
self._rate_limit = rate_limit or policies.Unlimited()
self._retry_policy = retry_policy or policies.NoRetry()
self._send_kwargs = send_kwargs or {}
_check_kwargs(self._send_kwargs, _SEND_ARG_NAMES)
self._sleep = _sleep
_patch_session(self._session)
@property
def headers(self):
return self._session.headers
@property
def cookies(self):
return self._session.cookies
def update_cookies(self, cookie_dict):
"""Update cookies with a dict-like object."""
requests.cookies.cookiejar_from_dict(
cookie_dict, self._session.cookies
)
def send(self, request, **kwargs):
LOG.debug('%s %s', request.method, request.uri)
_check_kwargs(kwargs, _SEND_ARG_NAMES)
method = getattr(self._session, request.method.lower())
# Precedence: request.kwargs > kwargs > self._send_kwargs.
final_kwargs = {}
final_kwargs.update(self._send_kwargs)
final_kwargs.update(kwargs)
final_kwargs.update(request.kwargs)
kwargs = final_kwargs
retry = self._retry_policy()
retry_count = 0
while True:
try:
return self._send(method, request, kwargs, retry_count)
except Exception:
backoff = next(retry, None)
if backoff is None:
raise
self._sleep(backoff)
retry_count += 1
def _send(self, method, request, kwargs, retry_count):
try:
with self._rate_limit:
if retry_count:
LOG.warning(
'retry %d times: %s %s',
retry_count, request.method, request.uri,
)
response = method(request.uri, **kwargs)
response.raise_for_status()
return Response(response)
except requests.RequestException as exc:
if exc.response is not None:
status_code = exc.response.status_code
else:
status_code = '???'
LOG.warning(
'encounter HTTP error: status=%s, %s %s',
status_code, request.method, request.uri,
)
raise HttpError('%s %s' % (request.method, request.uri)) from exc
except Exception:
LOG.warning(
'encounter generic error: %s %s',
request.method, request.uri,
)
raise
class ForwardingClient(_ClientMixin):
"""A client that forwards requests to the underlying client."""
def __init__(self, client):
self.client = client
@property
def headers(self):
return self.client.headers
def update_cookies(self, cookie_dict):
self.client.update_cookies(cookie_dict)
def send(self, request, **kwargs):
request = self.on_request(request)
response = self.client.send(request, **kwargs)
return self.on_response(request, response)
def on_request(self, request):
"""Hook for modifying request."""
return request
def on_response(self, _, response):
"""Hook for modifying response."""
return response
class Request:
"""A thin wrapper of requests.Request."""
def __init__(self, method, uri, **kwargs):
_check_kwargs(kwargs, _REQUEST_ARG_NAMES)
self.method = method
self.uri = uri
self.kwargs = kwargs
def __str__(self):
return ('Request(%r, %r, **%r)' %
(self.method, self.uri, self.kwargs))
__repr__ = __str__
@property
def headers(self):
return self.kwargs.setdefault('headers', {})
class Response:
"""A thin wrapper of requests.Response."""
XML_PARSER = lxml.etree.XMLParser()
def __init__(self, response):
self._response = response
def __getattr__(self, name):
return getattr(self._response, name)
def dom(self, encoding=None, errors=None):
if fromstring is None:
raise RuntimeError('lxml.etree is not installed')
#
# The caller intends to handle character encoding error in a way
# that is different from lxml's (lxml refuses to parse the rest
# of the document if there is any encoding error in the middle,
# but neither does it report the error).
#
# lxml's strict-but-silent policy is counterproductive because
# Web is full of malformed documents, and it should either be
# lenient about the error, or raise it to the caller, not a mix
# of both as it is right now.
#
if encoding and errors:
html = self.content.decode(encoding=encoding, errors=errors)
parser = _get_parser(None)
return fromstring(html, parser)
ASSERT.none(errors)
parser = _get_parser(encoding or self.encoding)
return fromstring(self.content, parser)
def xml_dom(self):
if fromstring is None:
raise RuntimeError('lxml.etree is not installed')
return fromstring(self.content, self.XML_PARSER)
@functools.lru_cache(maxsize=8)
def _get_parser(encoding):
return lxml.etree.HTMLParser(encoding=encoding)
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/http/clients.py",
"copies": "1",
"size": "8037",
"license": "mit",
"hash": -3906384966088583000,
"line_mean": 27.9100719424,
"line_max": 77,
"alpha_frac": 0.5837999253,
"autogenerated": false,
"ratio": 4.117315573770492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 278
} |
"""A thin, practical wrapper around terminal coloring, styling, and
positioning"""
from contextlib import contextmanager
import curses
from curses import setupterm, tigetnum, tigetstr, tparm
from fcntl import ioctl
from six import text_type, string_types
try:
from io import UnsupportedOperation as IOUnsupportedOperation
except ImportError:
class IOUnsupportedOperation(Exception):
"""A dummy exception to take the place of Python 3's
``io.UnsupportedOperation`` in Python 2"""
from os import isatty, environ
import struct
import sys
from termios import TIOCGWINSZ
__all__ = ['Terminal']
class Terminal(object):
"""An abstraction around terminal capabilities
Unlike curses, this doesn't require clearing the screen before doing
anything, and it's friendlier to use. It keeps the endless calls to
``tigetstr()`` and ``tparm()`` out of your code, and it acts intelligently
when somebody pipes your output to a non-terminal.
Instance attributes:
``stream``
The stream the terminal outputs to. It's convenient to pass the stream
around with the terminal; it's almost always needed when the terminal
is and saves sticking lots of extra args on client functions in
practice.
"""
def __init__(self, kind=None, stream=None, force_styling=False):
"""Initialize the terminal.
If ``stream`` is not a tty, I will default to returning an empty
Unicode string for all capability values, so things like piping your
output to a file won't strew escape sequences all over the place. The
``ls`` command sets a precedent for this: it defaults to columnar
output when being sent to a tty and one-item-per-line when not.
:arg kind: A terminal string as taken by ``setupterm()``. Defaults to
the value of the ``TERM`` environment variable.
:arg stream: A file-like object representing the terminal. Defaults to
the original value of stdout, like ``curses.initscr()`` does.
:arg force_styling: Whether to force the emission of capabilities, even
if we don't seem to be in a terminal. This comes in handy if users
are trying to pipe your output through something like ``less -r``,
which supports terminal codes just fine but doesn't appear itself
to be a terminal. Just expose a command-line option, and set
``force_styling`` based on it. Terminal initialization sequences
will be sent to ``stream`` if it has a file descriptor and to
``sys.__stdout__`` otherwise. (``setupterm()`` demands to send them
somewhere, and stdout is probably where the output is ultimately
headed. If not, stderr is probably bound to the same terminal.)
If you want to force styling to not happen, pass
``force_styling=None``.
"""
if stream is None:
stream = sys.__stdout__
try:
stream_descriptor = (stream.fileno() if hasattr(stream, 'fileno')
and callable(stream.fileno)
else None)
except IOUnsupportedOperation:
stream_descriptor = None
self._is_a_tty = (stream_descriptor is not None and
isatty(stream_descriptor))
self._does_styling = ((self.is_a_tty or force_styling) and
force_styling is not None)
# The descriptor to direct terminal initialization sequences to.
# sys.__stdout__ seems to always have a descriptor of 1, even if output
# is redirected.
self._init_descriptor = (sys.__stdout__.fileno()
if stream_descriptor is None
else stream_descriptor)
if self.does_styling:
# Make things like tigetstr() work. Explicit args make setupterm()
# work even when -s is passed to nosetests. Lean toward sending
# init sequences to the stream if it has a file descriptor, and
# send them to stdout as a fallback, since they have to go
# somewhere.
try:
setupterm(kind or environ.get('TERM', 'dumb') or 'dumb',
self._init_descriptor)
except curses.error:
# There was an error setting up the terminal, either curses is
# not supported or TERM is incorrectly set. Fall back to dumb.
self._does_styling = False
self.stream = stream
# Sugary names for commonly-used capabilities, intended to help avoid trips
# to the terminfo man page and comments in your code:
_sugar = dict(
# Don't use "on" or "bright" as an underscore-separated chunk in any of
# these (e.g. on_cology or rock_on) so we don't interfere with
# __getattr__.
save='sc',
restore='rc',
clear_eol='el',
clear_bol='el1',
clear_eos='ed',
# 'clear' clears the whole screen.
position='cup', # deprecated
enter_fullscreen='smcup',
exit_fullscreen='rmcup',
move='cup',
move_x='hpa',
move_y='vpa',
move_left='cub1',
move_right='cuf1',
move_up='cuu1',
move_down='cud1',
hide_cursor='civis',
normal_cursor='cnorm',
reset_colors='op', # oc doesn't work on my OS X terminal.
normal='sgr0',
reverse='rev',
# 'bold' is just 'bold'. Similarly...
# blink
# dim
# flash
italic='sitm',
no_italic='ritm',
shadow='sshm',
no_shadow='rshm',
standout='smso',
no_standout='rmso',
subscript='ssubm',
no_subscript='rsubm',
superscript='ssupm',
no_superscript='rsupm',
underline='smul',
no_underline='rmul')
def __getattr__(self, attr):
"""Return a terminal capability, like bold.
For example, you can say ``term.bold`` to get the string that turns on
bold formatting and ``term.normal`` to get the string that turns it off
again. Or you can take a shortcut: ``term.bold('hi')`` bolds its
argument and sets everything to normal afterward. You can even combine
things: ``term.bold_underline_red_on_bright_green('yowzers!')``.
For a parametrized capability like ``cup``, pass the parameters too:
``some_term.cup(line, column)``.
``man terminfo`` for a complete list of capabilities.
Return values are always Unicode.
"""
resolution = (self._resolve_formatter(attr) if self.does_styling
else NullCallableString())
setattr(self, attr, resolution) # Cache capability codes.
return resolution
@property
def does_styling(self):
"""Whether attempt to emit capabilities
This is influenced by the ``is_a_tty`` property and by the
``force_styling`` argument to the constructor. You can examine
this value to decide whether to draw progress bars or other frippery.
"""
return self._does_styling
@property
def is_a_tty(self):
"""Whether my ``stream`` appears to be associated with a terminal"""
return self._is_a_tty
@property
def height(self):
"""The height of the terminal in characters
If no stream or a stream not representing a terminal was passed in at
construction, return the dimension of the controlling terminal so
piping to things that eventually display on the terminal (like ``less
-R``) work. If a stream representing a terminal was passed in, return
the dimensions of that terminal. If there somehow is no controlling
terminal, return ``None``. (Thus, you should check that the property
``is_a_tty`` is true before doing any math on the result.)
"""
return self._height_and_width()[0]
@property
def width(self):
"""The width of the terminal in characters
See ``height()`` for some corner cases.
"""
return self._height_and_width()[1]
def _height_and_width(self):
"""Return a tuple of (terminal height, terminal width).
Start by trying TIOCGWINSZ (Terminal I/O-Control: Get Window Size),
falling back to environment variables (LINES, COLUMNS), and returning
(None, None) if those are unavailable or invalid.
"""
# tigetnum('lines') and tigetnum('cols') update only if we call
# setupterm() again.
for descriptor in self._init_descriptor, sys.__stdout__:
try:
return struct.unpack(
'hhhh', ioctl(descriptor, TIOCGWINSZ, '\000' * 8))[0:2]
except IOError:
# when the output stream or init descriptor is not a tty, such
# as when when stdout is piped to another program, fe. tee(1),
# these ioctls will raise IOError
pass
try:
return int(environ.get('LINES')), int(environ.get('COLUMNS'))
except TypeError:
return None, None
@contextmanager
def location(self, x=None, y=None):
"""Return a context manager for temporarily moving the cursor.
Move the cursor to a certain position on entry, let you print stuff
there, then return the cursor to its original position::
term = Terminal()
with term.location(2, 5):
print('Hello, world!')
for x in xrange(10):
print('I can do it %i times!' % x)
Specify ``x`` to move to a certain column, ``y`` to move to a certain
row, both, or neither. If you specify neither, only the saving and
restoration of cursor position will happen. This can be useful if you
simply want to restore your place after doing some manual cursor
movement.
"""
# Save position and move to the requested column, row, or both:
self.stream.write(self.save)
if x is not None and y is not None:
self.stream.write(self.move(y, x))
elif x is not None:
self.stream.write(self.move_x(x))
elif y is not None:
self.stream.write(self.move_y(y))
try:
yield
finally:
# Restore original cursor position:
self.stream.write(self.restore)
@contextmanager
def fullscreen(self):
"""Return a context manager that enters fullscreen mode while inside it
and restores normal mode on leaving."""
self.stream.write(self.enter_fullscreen)
try:
yield
finally:
self.stream.write(self.exit_fullscreen)
@contextmanager
def hidden_cursor(self):
"""Return a context manager that hides the cursor while inside it and
makes it visible on leaving."""
self.stream.write(self.hide_cursor)
try:
yield
finally:
self.stream.write(self.normal_cursor)
@property
def color(self):
"""Return a capability that sets the foreground color.
The capability is unparametrized until called and passed a number
(0-15), at which point it returns another string which represents a
specific color change. This second string can further be called to
color a piece of text and set everything back to normal afterward.
:arg num: The number, 0-15, of the color
"""
return ParametrizingString(self._foreground_color, self.normal)
@property
def on_color(self):
"""Return a capability that sets the background color.
See ``color()``.
"""
return ParametrizingString(self._background_color, self.normal)
@property
def number_of_colors(self):
"""Return the number of colors the terminal supports.
Common values are 0, 8, 16, 88, and 256.
Though the underlying capability returns -1 when there is no color
support, we return 0. This lets you test more Pythonically::
if term.number_of_colors:
...
We also return 0 if the terminal won't tell us how many colors it
supports, which I think is rare.
"""
# This is actually the only remotely useful numeric capability. We
# don't name it after the underlying capability, because we deviate
# slightly from its behavior, and we might someday wish to give direct
# access to it.
if not self._does_styling:
return 0
colors = tigetnum('colors') # Returns -1 if no color support, -2 if no
# such cap.
# self.__dict__['colors'] = ret # Cache it. It's not changing.
# (Doesn't work.)
return colors if colors >= 0 else 0
def _resolve_formatter(self, attr):
"""Resolve a sugary or plain capability name, color, or compound
formatting function name into a callable capability.
Return a ``ParametrizingString`` or a ``FormattingString``.
"""
if attr in COLORS:
return self._resolve_color(attr)
elif attr in COMPOUNDABLES:
# Bold, underline, or something that takes no parameters
return self._formatting_string(self._resolve_capability(attr))
else:
formatters = split_into_formatters(attr)
if all(f in COMPOUNDABLES for f in formatters):
# It's a compound formatter, like "bold_green_on_red". Future
# optimization: combine all formatting into a single escape
# sequence.
return self._formatting_string(
u''.join(self._resolve_formatter(s) for s in formatters))
else:
return ParametrizingString(self._resolve_capability(attr))
def _resolve_capability(self, atom):
"""Return a terminal code for a capname or a sugary name, or an empty
Unicode.
The return value is always Unicode, because otherwise it is clumsy
(especially in Python 3) to concatenate with real (Unicode) strings.
"""
code = tigetstr(self._sugar.get(atom, atom))
if code:
# See the comment in ParametrizingString for why this is latin1.
return code.decode('latin1')
return u''
def _resolve_color(self, color):
"""Resolve a color like red or on_bright_green into a callable
capability."""
# TODO: Does curses automatically exchange red and blue and cyan and
# yellow when a terminal supports setf/setb rather than setaf/setab?
# I'll be blasted if I can find any documentation. The following
# assumes it does.
color_cap = (self._background_color if 'on_' in color else
self._foreground_color)
# curses constants go up to only 7, so add an offset to get at the
# bright colors at 8-15:
offset = 8 if 'bright_' in color else 0
base_color = color.rsplit('_', 1)[-1]
return self._formatting_string(
color_cap(getattr(curses, 'COLOR_' + base_color.upper()) + offset))
@property
def _foreground_color(self):
return self.setaf or self.setf
@property
def _background_color(self):
return self.setab or self.setb
def _formatting_string(self, formatting):
"""Return a new ``FormattingString`` which implicitly receives my
notion of "normal"."""
return FormattingString(formatting, self.normal)
def derivative_colors(colors):
"""Return the names of valid color variants, given the base colors."""
return set([('on_' + c) for c in colors] +
[('bright_' + c) for c in colors] +
[('on_bright_' + c) for c in colors])
COLORS = {'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan',
'white'}
COLORS.update(derivative_colors(COLORS))
SINGLES = {'bold', 'reverse', 'blink', 'dim', 'flash'}
DUALS = {
'underline', 'italic', 'shadow', 'standout', 'subscript', 'superscript'
}
COMPOUNDABLES = (COLORS | SINGLES | DUALS | {'no_' + c for c in DUALS})
class ParametrizingString(text_type):
"""A Unicode string which can be called to parametrize it as a terminal
capability"""
def __new__(cls, formatting, normal=None):
"""Instantiate.
:arg normal: If non-None, indicates that, once parametrized, this can
be used as a ``FormattingString``. The value is used as the
"normal" capability.
"""
new = text_type.__new__(cls, formatting)
new._normal = normal
return new
def __call__(self, *args):
try:
# Re-encode the cap, because tparm() takes a bytestring in Python
# 3. However, appear to be a plain Unicode string otherwise so
# concats work.
#
# We use *latin1* encoding so that bytes emitted by tparm are
# encoded to their native value: some terminal kinds, such as
# 'avatar' or 'kermit', emit 8-bit bytes in range 0x7f to 0xff.
# latin1 leaves these values unmodified in their conversion to
# unicode byte values. The terminal emulator will "catch" and
# handle these values, even if emitting utf8-encoded text, where
# these bytes would otherwise be illegal utf8 start bytes.
parametrized = tparm(self.encode('latin1'), *args).decode('latin1')
return (parametrized if self._normal is None else
FormattingString(parametrized, self._normal))
except curses.error:
# Catch "must call (at least) setupterm() first" errors, as when
# running simply `nosetests` (without progressive) on nose-
# progressive. Perhaps the terminal has gone away between calling
# tigetstr and calling tparm.
return u''
except TypeError:
# If the first non-int (i.e. incorrect) arg was a string, suggest
# something intelligent:
if len(args) == 1 and isinstance(args[0], string_types):
raise TypeError(
'A native or nonexistent capability template received '
'%r when it was expecting ints. You probably misspelled a '
'formatting call like bright_red_on_white(...).' % args)
else:
# Somebody passed a non-string; I don't feel confident
# guessing what they were trying to do.
raise
class FormattingString(text_type):
"""A Unicode string which can be called upon a piece of text to wrap it in
formatting"""
def __new__(cls, formatting, normal):
new = text_type.__new__(cls, formatting)
new._normal = normal
return new
def __call__(self, text):
"""Return a new string that is ``text`` formatted with my contents.
At the beginning of the string, I prepend the formatting that is my
contents. At the end, I append the "normal" sequence to set everything
back to defaults. The return value is always a Unicode.
"""
return self + text + self._normal
class NullCallableString(text_type):
"""A dummy callable Unicode to stand in for ``FormattingString`` and
``ParametrizingString``
We use this when there is no tty and thus all capabilities should be blank.
"""
def __new__(cls):
new = text_type.__new__(cls, u'')
return new
def __call__(self, *args):
"""Return a Unicode or whatever you passed in as the first arg
(hopefully a string of some kind).
When called with an int as the first arg, return an empty Unicode. An
int is a good hint that I am a ``ParametrizingString``, as there are
only about half a dozen string-returning capabilities on OS X's
terminfo man page which take any param that's not an int, and those are
seldom if ever used on modern terminal emulators. (Most have to do with
programming function keys. Blessings' story for supporting
non-string-returning caps is undeveloped.) And any parametrized
capability in a situation where all capabilities themselves are taken
to be blank are, of course, themselves blank.
When called with a non-int as the first arg (no no args at all), return
the first arg. I am acting as a ``FormattingString``.
"""
if len(args) != 1 or isinstance(args[0], int):
# I am acting as a ParametrizingString.
# tparm can take not only ints but also (at least) strings as its
# second...nth args. But we don't support callably parametrizing
# caps that take non-ints yet, so we can cheap out here. TODO: Go
# through enough of the motions in the capability resolvers to
# determine which of 2 special-purpose classes,
# NullParametrizableString or NullFormattingString, to return, and
# retire this one.
return u''
return args[0] # Should we force even strs in Python 2.x to be
# unicodes? No. How would I know what encoding to use
# to convert it?
def split_into_formatters(compound):
"""Split a possibly compound format string into segments.
>>> split_into_formatters('bold_underline_bright_blue_on_red')
['bold', 'underline', 'bright_blue', 'on_red']
>>> split_into_formatters('red_no_italic_shadow_on_bright_cyan')
['red', 'no_italic', 'shadow', 'on_bright_cyan']
"""
merged_segs = []
# These occur only as prefixes, so they can always be merged:
mergeable_prefixes = ['no', 'on', 'bright', 'on_bright']
for s in compound.split('_'):
if merged_segs and merged_segs[-1] in mergeable_prefixes:
merged_segs[-1] += '_' + s
else:
merged_segs.append(s)
return merged_segs
| {
"repo_name": "erikrose/blessings",
"path": "blessings/__init__.py",
"copies": "1",
"size": "22310",
"license": "mit",
"hash": 8873629933045182000,
"line_mean": 38.5567375887,
"line_max": 79,
"alpha_frac": 0.6069475571,
"autogenerated": false,
"ratio": 4.39779223339247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.550473979049247,
"avg_score": null,
"num_lines": null
} |
"""A thin, practical wrapper around terminal coloring, styling, and
positioning"""
from contextlib import contextmanager
import curses
from curses import setupterm, tigetnum, tigetstr, tparm
try:
from fcntl import ioctl
except ImportError:
from hackwindows import ioctl
try:
from io import UnsupportedOperation as IOUnsupportedOperation
except ImportError:
class IOUnsupportedOperation(Exception):
"""A dummy exception to take the place of Python 3's
``io.UnsupportedOperation`` in Python 2"""
from os import isatty, environ
from platform import python_version_tuple
import struct
import sys
try:
from termios import TIOCGWINSZ
except ImportError:
from hackwindows import TIOCGWINSZ
__all__ = ['Terminal']
if ('3', '0', '0') <= python_version_tuple() < ('3', '2', '2+'): # Good till
# 3.2.10
# Python 3.x < 3.2.3 has a bug in which tparm() erroneously takes a string.
raise ImportError('Blessings needs Python 3.2.3 or greater for Python 3 '
'support due to http://bugs.python.org/issue10570.')
class Terminal(object):
"""An abstraction around terminal capabilities
Unlike curses, this doesn't require clearing the screen before doing
anything, and it's friendlier to use. It keeps the endless calls to
``tigetstr()`` and ``tparm()`` out of your code, and it acts intelligently
when somebody pipes your output to a non-terminal.
Instance attributes:
``stream``
The stream the terminal outputs to. It's convenient to pass the stream
around with the terminal; it's almost always needed when the terminal
is and saves sticking lots of extra args on client functions in
practice.
"""
def __init__(self, kind=None, stream=None, force_styling=False):
"""Initialize the terminal.
If ``stream`` is not a tty, I will default to returning an empty
Unicode string for all capability values, so things like piping your
output to a file won't strew escape sequences all over the place. The
``ls`` command sets a precedent for this: it defaults to columnar
output when being sent to a tty and one-item-per-line when not.
:arg kind: A terminal string as taken by ``setupterm()``. Defaults to
the value of the ``TERM`` environment variable.
:arg stream: A file-like object representing the terminal. Defaults to
the original value of stdout, like ``curses.initscr()`` does.
:arg force_styling: Whether to force the emission of capabilities, even
if we don't seem to be in a terminal. This comes in handy if users
are trying to pipe your output through something like ``less -r``,
which supports terminal codes just fine but doesn't appear itself
to be a terminal. Just expose a command-line option, and set
``force_styling`` based on it. Terminal initialization sequences
will be sent to ``stream`` if it has a file descriptor and to
``sys.__stdout__`` otherwise. (``setupterm()`` demands to send them
somewhere, and stdout is probably where the output is ultimately
headed. If not, stderr is probably bound to the same terminal.)
If you want to force styling to not happen, pass
``force_styling=None``.
"""
if stream is None:
stream = sys.__stdout__
try:
stream_descriptor = (stream.fileno() if hasattr(stream, 'fileno')
and callable(stream.fileno)
else None)
except IOUnsupportedOperation:
stream_descriptor = None
self._is_a_tty = (stream_descriptor is not None and
isatty(stream_descriptor))
self._does_styling = ((self.is_a_tty or force_styling) and
force_styling is not None)
# The descriptor to direct terminal initialization sequences to.
# sys.__stdout__ seems to always have a descriptor of 1, even if output
# is redirected.
self._init_descriptor = (sys.__stdout__.fileno()
if stream_descriptor is None
else stream_descriptor)
if self.does_styling:
# Make things like tigetstr() work. Explicit args make setupterm()
# work even when -s is passed to nosetests. Lean toward sending
# init sequences to the stream if it has a file descriptor, and
# send them to stdout as a fallback, since they have to go
# somewhere.
setupterm(kind or environ.get('TERM', 'unknown'),
self._init_descriptor)
self.stream = stream
# Sugary names for commonly-used capabilities, intended to help avoid trips
# to the terminfo man page and comments in your code:
_sugar = dict(
# Don't use "on" or "bright" as an underscore-separated chunk in any of
# these (e.g. on_cology or rock_on) so we don't interfere with
# __getattr__.
save='sc',
restore='rc',
clear_eol='el',
clear_bol='el1',
clear_eos='ed',
# 'clear' clears the whole screen.
position='cup', # deprecated
enter_fullscreen='smcup',
exit_fullscreen='rmcup',
move='cup',
move_x='hpa',
move_y='vpa',
move_left='cub1',
move_right='cuf1',
move_up='cuu1',
move_down='cud1',
hide_cursor='civis',
normal_cursor='cnorm',
reset_colors='op', # oc doesn't work on my OS X terminal.
normal='sgr0',
reverse='rev',
# 'bold' is just 'bold'. Similarly...
# blink
# dim
# flash
italic='sitm',
no_italic='ritm',
shadow='sshm',
no_shadow='rshm',
standout='smso',
no_standout='rmso',
subscript='ssubm',
no_subscript='rsubm',
superscript='ssupm',
no_superscript='rsupm',
underline='smul',
no_underline='rmul')
def __getattr__(self, attr):
"""Return a terminal capability, like bold.
For example, you can say ``term.bold`` to get the string that turns on
bold formatting and ``term.normal`` to get the string that turns it off
again. Or you can take a shortcut: ``term.bold('hi')`` bolds its
argument and sets everything to normal afterward. You can even combine
things: ``term.bold_underline_red_on_bright_green('yowzers!')``.
For a parametrized capability like ``cup``, pass the parameters too:
``some_term.cup(line, column)``.
``man terminfo`` for a complete list of capabilities.
Return values are always Unicode.
"""
resolution = (self._resolve_formatter(attr) if self.does_styling
else NullCallableString())
setattr(self, attr, resolution) # Cache capability codes.
return resolution
@property
def does_styling(self):
"""Whether attempt to emit capabilities
This is influenced by the ``is_a_tty`` property and by the
``force_styling`` argument to the constructor. You can examine
this value to decide whether to draw progress bars or other frippery.
"""
return self._does_styling
@property
def is_a_tty(self):
"""Whether my ``stream`` appears to be associated with a terminal"""
return self._is_a_tty
@property
def height(self):
"""The height of the terminal in characters
If no stream or a stream not representing a terminal was passed in at
construction, return the dimension of the controlling terminal so
piping to things that eventually display on the terminal (like ``less
-R``) work. If a stream representing a terminal was passed in, return
the dimensions of that terminal. If there somehow is no controlling
terminal, return ``None``. (Thus, you should check that the property
``is_a_tty`` is true before doing any math on the result.)
"""
return self._height_and_width()[0]
@property
def width(self):
"""The width of the terminal in characters
See ``height()`` for some corner cases.
"""
return self._height_and_width()[1]
def _height_and_width(self):
"""Return a tuple of (terminal height, terminal width).
Start by trying TIOCGWINSZ (Terminal I/O-Control: Get Window Size),
falling back to environment variables (LINES, COLUMNS), and returning
(None, None) if those are unavailable or invalid.
"""
# tigetnum('lines') and tigetnum('cols') update only if we call
# setupterm() again.
for descriptor in self._init_descriptor, sys.__stdout__:
try:
return struct.unpack(
'hhhh', ioctl(descriptor, TIOCGWINSZ, '\000' * 8))[0:2]
except IOError:
# when the output stream or init descriptor is not a tty, such
# as when when stdout is piped to another program, fe. tee(1),
# these ioctls will raise IOError
pass
try:
return int(environ.get('LINES')), int(environ.get('COLUMNS'))
except TypeError:
return None, None
@contextmanager
def location(self, x=None, y=None):
"""Return a context manager for temporarily moving the cursor.
Move the cursor to a certain position on entry, let you print stuff
there, then return the cursor to its original position::
term = Terminal()
with term.location(2, 5):
print 'Hello, world!'
for x in xrange(10):
print 'I can do it %i times!' % x
Specify ``x`` to move to a certain column, ``y`` to move to a certain
row, both, or neither. If you specify neither, only the saving and
restoration of cursor position will happen. This can be useful if you
simply want to restore your place after doing some manual cursor
movement.
"""
# Save position and move to the requested column, row, or both:
self.stream.write(self.save)
if x is not None and y is not None:
self.stream.write(self.move(y, x))
elif x is not None:
self.stream.write(self.move_x(x))
elif y is not None:
self.stream.write(self.move_y(y))
try:
yield
finally:
# Restore original cursor position:
self.stream.write(self.restore)
@contextmanager
def fullscreen(self):
"""Return a context manager that enters fullscreen mode while inside it
and restores normal mode on leaving."""
self.stream.write(self.enter_fullscreen)
try:
yield
finally:
self.stream.write(self.exit_fullscreen)
@contextmanager
def hidden_cursor(self):
"""Return a context manager that hides the cursor while inside it and
makes it visible on leaving."""
self.stream.write(self.hide_cursor)
try:
yield
finally:
self.stream.write(self.normal_cursor)
@property
def color(self):
"""Return a capability that sets the foreground color.
The capability is unparametrized until called and passed a number
(0-15), at which point it returns another string which represents a
specific color change. This second string can further be called to
color a piece of text and set everything back to normal afterward.
:arg num: The number, 0-15, of the color
"""
return ParametrizingString(self._foreground_color, self.normal)
@property
def on_color(self):
"""Return a capability that sets the background color.
See ``color()``.
"""
return ParametrizingString(self._background_color, self.normal)
@property
def number_of_colors(self):
"""Return the number of colors the terminal supports.
Common values are 0, 8, 16, 88, and 256.
Though the underlying capability returns -1 when there is no color
support, we return 0. This lets you test more Pythonically::
if term.number_of_colors:
...
We also return 0 if the terminal won't tell us how many colors it
supports, which I think is rare.
"""
# This is actually the only remotely useful numeric capability. We
# don't name it after the underlying capability, because we deviate
# slightly from its behavior, and we might someday wish to give direct
# access to it.
colors = tigetnum('colors') # Returns -1 if no color support, -2 if no
# such cap.
# self.__dict__['colors'] = ret # Cache it. It's not changing.
# (Doesn't work.)
return colors if colors >= 0 else 0
def _resolve_formatter(self, attr):
"""Resolve a sugary or plain capability name, color, or compound
formatting function name into a callable capability.
Return a ``ParametrizingString`` or a ``FormattingString``.
"""
if attr in COLORS:
return self._resolve_color(attr)
elif attr in COMPOUNDABLES:
# Bold, underline, or something that takes no parameters
return self._formatting_string(self._resolve_capability(attr))
else:
formatters = split_into_formatters(attr)
if all(f in COMPOUNDABLES for f in formatters):
# It's a compound formatter, like "bold_green_on_red". Future
# optimization: combine all formatting into a single escape
# sequence.
return self._formatting_string(
u''.join(self._resolve_formatter(s) for s in formatters))
else:
return ParametrizingString(self._resolve_capability(attr))
def _resolve_capability(self, atom):
"""Return a terminal code for a capname or a sugary name, or an empty
Unicode.
The return value is always Unicode, because otherwise it is clumsy
(especially in Python 3) to concatenate with real (Unicode) strings.
"""
code = tigetstr(self._sugar.get(atom, atom))
if code:
# See the comment in ParametrizingString for why this is latin1.
return code.decode('latin1')
return u''
def _resolve_color(self, color):
"""Resolve a color like red or on_bright_green into a callable
capability."""
# TODO: Does curses automatically exchange red and blue and cyan and
# yellow when a terminal supports setf/setb rather than setaf/setab?
# I'll be blasted if I can find any documentation. The following
# assumes it does.
color_cap = (self._background_color if 'on_' in color else
self._foreground_color)
# curses constants go up to only 7, so add an offset to get at the
# bright colors at 8-15:
offset = 8 if 'bright_' in color else 0
base_color = color.rsplit('_', 1)[-1]
return self._formatting_string(
color_cap(getattr(curses, 'COLOR_' + base_color.upper()) + offset))
@property
def _foreground_color(self):
return self.setaf or self.setf
@property
def _background_color(self):
return self.setab or self.setb
def _formatting_string(self, formatting):
"""Return a new ``FormattingString`` which implicitly receives my
notion of "normal"."""
return FormattingString(formatting, self.normal)
def derivative_colors(colors):
"""Return the names of valid color variants, given the base colors."""
return set([('on_' + c) for c in colors] +
[('bright_' + c) for c in colors] +
[('on_bright_' + c) for c in colors])
COLORS = {'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'}
COLORS.update(derivative_colors(COLORS))
COMPOUNDABLES = (COLORS |
{'bold', 'underline', 'reverse', 'blink', 'dim', 'italic', 'shadow', 'standout', 'subscript',
'superscript'})
class ParametrizingString(unicode):
"""A Unicode string which can be called to parametrize it as a terminal
capability"""
def __new__(cls, formatting, normal=None):
"""Instantiate.
:arg normal: If non-None, indicates that, once parametrized, this can
be used as a ``FormattingString``. The value is used as the
"normal" capability.
"""
new = unicode.__new__(cls, formatting)
new._normal = normal
return new
def __call__(self, *args):
try:
# Re-encode the cap, because tparm() takes a bytestring in Python
# 3. However, appear to be a plain Unicode string otherwise so
# concats work.
#
# We use *latin1* encoding so that bytes emitted by tparm are
# encoded to their native value: some terminal kinds, such as
# 'avatar' or 'kermit', emit 8-bit bytes in range 0x7f to 0xff.
# latin1 leaves these values unmodified in their conversion to
# unicode byte values. The terminal emulator will "catch" and
# handle these values, even if emitting utf8-encoded text, where
# these bytes would otherwise be illegal utf8 start bytes.
parametrized = tparm(self.encode('latin1'), *args).decode('latin1')
return (parametrized if self._normal is None else
FormattingString(parametrized, self._normal))
except curses.error:
# Catch "must call (at least) setupterm() first" errors, as when
# running simply `nosetests` (without progressive) on nose-
# progressive. Perhaps the terminal has gone away between calling
# tigetstr and calling tparm.
return u''
except TypeError:
# If the first non-int (i.e. incorrect) arg was a string, suggest
# something intelligent:
if len(args) == 1 and isinstance(args[0], basestring):
raise TypeError(
'A native or nonexistent capability template received '
'%r when it was expecting ints. You probably misspelled a '
'formatting call like bright_red_on_white(...).' % args)
else:
# Somebody passed a non-string; I don't feel confident
# guessing what they were trying to do.
raise
class FormattingString(unicode):
"""A Unicode string which can be called upon a piece of text to wrap it in
formatting"""
def __new__(cls, formatting, normal):
new = unicode.__new__(cls, formatting)
new._normal = normal
return new
def __call__(self, text):
"""Return a new string that is ``text`` formatted with my contents.
At the beginning of the string, I prepend the formatting that is my
contents. At the end, I append the "normal" sequence to set everything
back to defaults. The return value is always a Unicode.
"""
return self + text + self._normal
class NullCallableString(unicode):
"""A dummy callable Unicode to stand in for ``FormattingString`` and
``ParametrizingString``
We use this when there is no tty and thus all capabilities should be blank.
"""
def __new__(cls):
new = unicode.__new__(cls, u'')
return new
def __call__(self, *args):
"""Return a Unicode or whatever you passed in as the first arg
(hopefully a string of some kind).
When called with an int as the first arg, return an empty Unicode. An
int is a good hint that I am a ``ParametrizingString``, as there are
only about half a dozen string-returning capabilities on OS X's
terminfo man page which take any param that's not an int, and those are
seldom if ever used on modern terminal emulators. (Most have to do with
programming function keys. Blessings' story for supporting
non-string-returning caps is undeveloped.) And any parametrized
capability in a situation where all capabilities themselves are taken
to be blank are, of course, themselves blank.
When called with a non-int as the first arg (no no args at all), return
the first arg. I am acting as a ``FormattingString``.
"""
if len(args) != 1 or isinstance(args[0], int):
# I am acting as a ParametrizingString.
# tparm can take not only ints but also (at least) strings as its
# second...nth args. But we don't support callably parametrizing
# caps that take non-ints yet, so we can cheap out here. TODO: Go
# through enough of the motions in the capability resolvers to
# determine which of 2 special-purpose classes,
# NullParametrizableString or NullFormattingString, to return, and
# retire this one.
return u''
return args[0] # Should we force even strs in Python 2.x to be
# unicodes? No. How would I know what encoding to use
# to convert it?
def split_into_formatters(compound):
"""Split a possibly compound format string into segments.
>>> split_into_formatters('bold_underline_bright_blue_on_red')
['bold', 'underline', 'bright_blue', 'on_red']
"""
merged_segs = []
# These occur only as prefixes, so they can always be merged:
mergeable_prefixes = ['on', 'bright', 'on_bright']
for s in compound.split('_'):
if merged_segs and merged_segs[-1] in mergeable_prefixes:
merged_segs[-1] += '_' + s
else:
merged_segs.append(s)
return merged_segs
| {
"repo_name": "syg5201314/demoCollection",
"path": "freeline/freeline_core/terminal.py",
"copies": "12",
"size": "22168",
"license": "apache-2.0",
"hash": 7924426690968879000,
"line_mean": 38.0970017637,
"line_max": 110,
"alpha_frac": 0.610519668,
"autogenerated": false,
"ratio": 4.38970297029703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
'''a thin shim for the parts of requests we use'''
import urllib2
class BetterHTTPErrorProcessor(urllib2.BaseHandler):
# a substitute/supplement to urllib2.HTTPErrorProcessor
# that doesn't raise exceptions on status codes 201,204,206
def http_error_201(self, request, response, code, msg, hdrs):
return response
def http_error_204(self, request, response, code, msg, hdrs):
return response
def http_error_206(self, request, response, code, msg, hdrs):
return response
def http_error_400(self, request, response, code, msg, hdrs):
return response
def http_error_401(self, request, response, code, msg, hdrs):
return response
def http_error_415(self, request, response, code, msg, hdrs):
return response
def http_error_500(self, request, response, code, msg, hdrs):
return response
opener = urllib2.build_opener(BetterHTTPErrorProcessor)
urllib2.install_opener(opener)
class Response(object):
def __init__(self, status_code, text, headers):
self.status_code = status_code
self.text = text
self.headers = headers
@classmethod
def from_urllib2_response(cls, response):
info = response.info()
data = response.read()
code = response.code
return cls(code, data, info)
def __str__(self):
return '<oldreq.Response status=%d>' % self.status_code
class Session(object):
def __init__(self):
pass
def query_req(self, method, url, headers=None):
if headers is None:
headers = {}
request = urllib2.Request(url, headers=headers)
request.get_method = lambda: method
response = urllib2.urlopen(request)
return Response.from_urllib2_response(response)
def body_req(self, method, url, data, headers=None):
if headers is None:
headers = {}
request = urllib2.Request(url, data, headers)
request.get_method = lambda: method
response = urllib2.urlopen(request)
return Response.from_urllib2_response(response)
def post(self, url, data, headers=None):
return self.body_req('POST', url, data, headers)
def put(self, url, data, headers=None):
return self.body_req('PUT', url, data, headers)
def patch(self, url, data, headers=None):
return self.body_req('PATCH', url, data, headers)
def get(self, url, headers=None):
return self.query_req('GET', url, headers)
def delete(self, url, headers=None):
return self.query_req('DELETE', url, headers)
| {
"repo_name": "marianoguerra/ioriodb",
"path": "tools/oldreq.py",
"copies": "2",
"size": "2590",
"license": "mpl-2.0",
"hash": -4649068526563540000,
"line_mean": 31.7848101266,
"line_max": 65,
"alpha_frac": 0.6459459459,
"autogenerated": false,
"ratio": 3.848439821693908,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026934061497565507,
"num_lines": 79
} |
"""A thin wrapper around the C extension.
In this module the term 'matrix' is abused to mean numpy array.
"""
import itertools
import numpy as np
import hmmusbuf
import hmmusnodisk
MISSING = 127
def is_stochastic_vector(v):
if len(v.shape) != 1:
return False
if any(x<0 for x in v):
return False
if abs(1.0 - v.sum()) > 1e-7:
return False
return True
def is_square_matrix(M):
if len(M.shape) != 2:
return False
if len(set(M.shape)) != 1:
return False
return True
def is_right_stochastic_matrix(M):
if not is_square_matrix(M):
return False
if not all(is_stochastic_vector(v) for v in M):
return False
return True
def _reformat(distribution, transitions):
"""
Reformat the input as dtype float numpy arrays.
Also check for errors.
@param distribution: initial state distribution numpy array
@param transitions: transition matrix as numpy array
"""
# get the initial state distribution as a numpy array
np_distn = np.array(distribution, dtype=float)
if not is_stochastic_vector(np_distn):
msg = 'the initial distribution should be a stochastic vector'
raise ValueError(msg)
# get the transition matrix as a numpy array
np_trans = np.array(transitions, dtype=float)
if not is_right_stochastic_matrix(np_trans):
msg = 'the transition matrix should be a right stochastic matrix'
raise ValueError(msg)
# the vector and matrix should be conformant
nstates = np_distn.shape[0]
if np_trans.shape != (nstates, nstates):
msg_a = 'the number of states in the initial distribution does not '
msg_b = 'match the number of states in the transition matrix'
raise ValueError(msg_a + msg_b)
return np_distn, np_trans
def forward(distribution, transitions,
likelihoods_name, forward_name, scaling_name):
"""
@param distribution: initial state distribution
@param transitions: transition probabilities
"""
np_distn, np_trans = _reformat(distribution, transitions)
return hmmusbuf.forward(np_distn, np_trans,
likelihoods_name, forward_name, scaling_name)
def backward(distribution, transitions,
likelihoods_name, scaling_name, backward_name):
"""
@param distribution: initial state distribution
@param transitions: transition probabilities
"""
np_distn, np_trans = _reformat(distribution, transitions)
return hmmusbuf.backward(np_distn, np_trans,
likelihoods_name, scaling_name, backward_name)
def posterior(distribution, transitions,
forward_name, scaling_name, backward_name, posterior_name):
"""
@param distribution: initial state distribution
@param transitions: transition probabilities
"""
np_distn, np_trans = _reformat(distribution, transitions)
return hmmusbuf.posterior(np_distn, np_trans,
forward_name, scaling_name, backward_name, posterior_name)
def state_expectations(expectations, posterior_name):
"""
@param expectations: a 1d numpy array to be filled by this function
@param posterior_name: the posterior vector filename
"""
return hmmusbuf.state_expectations(expectations, posterior_name)
def transition_expectations(trans, expectations,
likelihoods_name, forward_name, backward_name):
"""
@param trans: the transition matrix
@param expectations: a 2d numpy array to be filled by this function
@param likelihoods_name: the likelihoods vector filename
@param forward_name: the forward vector filename
@param backward_name: the backward vector filename
"""
return hmmusbuf.transition_expectations(trans, expectations,
likelihoods_name, forward_name, backward_name)
def emission_expectations(expectations, observation_name, posterior_name):
"""
Compute emission expectations per state.
Note that emissions are assumed to be from a small alphabet
where each element of the alphabet fits in a byte.
The expectations matrix should be a numpy array
with the number of rows equal to the number of hidden states
and with the number of columns equal to the size of the emission alphabet.
@param expectations: a 2d numpy array to be filled by this function
@param observation_name: the observation vector filename
@param posterior_name: the posterior vector filename
"""
return hmmusbuf.emission_expectations(expectations,
observation_name, posterior_name)
def finite_alphabet_likelihoods(emissions,
observation_name, likelihood_name):
"""
Write a likelihood vector file.
@param emissions: a 2d numpy array of emission probabilities per state
@param observation_name: the observation vector filename
@param likelihood_name: the likelihood vector filename
"""
return hmmusbuf.finite_alphabet_likelihoods(emissions,
observation_name, likelihood_name)
def sequence_log_likelihood(scaling_name):
"""
@param scaling_name: the scaling vector filename
@return: the sequence log likelihood
"""
return hmmusbuf.sequence_log_likelihood(scaling_name)
def fwdbwd_alldisk(distribution, transitions,
likelihoods_name,
forward_name, scaling_name, backward_name,
posterior_name):
"""
@param distribution: initial state distribution
@param transitions: transition probabilities
"""
forward(distribution, transitions,
likelihoods_name, forward_name, scaling_name)
backward(distribution, transitions,
likelihoods_name, scaling_name, backward_name)
posterior(distribution, transitions,
forward_name, scaling_name, backward_name, posterior_name)
def fwdbwd_somedisk(distribution, transitions,
likelihoods_name, posterior_name):
"""
@param distribution: initial state distribution
@param transitions: transition probabilities
"""
np_distn, np_trans = _reformat(distribution, transitions)
return hmmusbuf.fwdbwd_somedisk(np_distn, np_trans,
likelihoods_name, posterior_name)
def fwdbwd_nodisk(distribution, transitions, np_likelihoods):
"""
@param distribution: initial state distribution
@param transitions: transition probabilities
@param likelihoods: likelihoods at each state
@return: posterior distribution at each state
"""
np_distn, np_trans = _reformat(distribution, transitions)
if len(np_likelihoods.shape) != 2:
msg = 'the matrix of likelihoods should be rectangular'
raise ValueError(msg)
np_posterior = np.zeros_like(np_likelihoods)
hmmusbuf.fwdbwd_nodisk(np_distn, np_trans, np_likelihoods, np_posterior)
return np_posterior
def forward_nodisk(distn, trans, likelihood, forward, scaling):
#TODO add docs
return hmmusnodisk.forward(distn, trans, likelihood, forward, scaling)
def backward_nodisk(distn, trans, likelihood, scaling, backward):
#TODO add docs
return hmmusnodisk.backward(distn, trans, likelihood, scaling, backward)
def posterior_nodisk(forward, scaling, backward, posterior):
#TODO add docs
return hmmusnodisk.posterior(forward, scaling, backward, posterior)
def finite_alphabet_likelihoods_nodisk(emissions, obs, likelihood):
#TODO add docs
return hmmusnodisk.finite_alphabet_likelihoods(emissions, obs, likelihood)
def transition_expectations_nodisk(trans, trans_expect,
likelihood, forward, backward):
#TODO add docs
return hmmusnodisk.transition_expectations(trans, trans_expect,
likelihood, forward, backward)
def emission_expectations_nodisk(emiss_expect, obs, posterior):
#TODO add docs
return hmmusnodisk.emission_expectations(emiss_expect, obs, posterior)
def sequence_log_likelihood_nodisk(scaling):
#TODO add docs
return hmmusnodisk.sequence_log_likelihood(scaling)
def pretty_print_posterior(raw_observations, posterior, ncols, filename):
#TODO add docs
return hmmusnodisk.pretty_print_posterior(
raw_observations, posterior, ncols, filename)
def pretty_print_posterior_decoding(
raw_observations, posterior, ncols, filename):
#TODO add docs
return hmmusnodisk.pretty_print_posterior_decoding(
raw_observations, posterior, ncols, filename)
| {
"repo_name": "argriffing/hmmus",
"path": "hmmus/hmm.py",
"copies": "1",
"size": "8312",
"license": "mit",
"hash": 3920345940688776700,
"line_mean": 35.9422222222,
"line_max": 78,
"alpha_frac": 0.7136669875,
"autogenerated": false,
"ratio": 3.986570743405276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5200237730905276,
"avg_score": null,
"num_lines": null
} |
"""A thin wrapping of the VoIP.ms REST API to make it slightly less than
horrible to use.
"""
import contextlib
from collections import Mapping
import requests
from functools import partial
def validate_response(response_obj):
"""Check for HTTP failures, API failures, and return JSON."""
if response_obj.status_code != requests.codes.ok:
raise requests.HTTPError('expected status code {}, got {}'
.format(requests.codes.ok,
response_obj.status_code))
response = response_obj.json()
if response['status'] != 'success':
raise VoipMSAPIError('API status returned {}'
.format(response['status']),
response)
return response
class VoipMSAPIError(Exception):
"""Something done gone wrong with that there API, ma."""
pass
class VoipMS(object):
def __init__(self, username, password,
url='https://voip.ms/api/v1/rest.php'):
self.username = username
self.password = password
self.url = url
self.ivrs = Directory(self, 'getIVRs', 'ivrs', 'name')
self.forwarders = Directory(self, 'getForwardings',
'forwardings', 'description')
self.dids = Directory(self, 'getDIDsInfo', 'dids', 'did',
partial(DID, api=self))
@property
def credentials(self):
return {'api_' + s: getattr(self, s) for s in ('username', 'password')}
@contextlib.contextmanager
def credentialed_request(self, request):
assert all(k not in request for k in self.credentials.keys())
request = dict(request)
request.update(self.credentials)
yield request
class Directory(Mapping):
"""
Emulates a dictionary but actually retrieves stuff from a REST call.
Returned items are mutable but mutation has no effect. The enlightened
Python core devs see no use cases for immutable dicts, you see, and so
that stubbornness plus my stubbornness to doing the language's job
myself means this API sucks slightly more than it could.
"""
def __init__(self, api, method_name, items_key, id_key, factory=dict):
self.api = api
self.method_name = method_name
self.items_key = items_key
self.id_key = id_key
self.factory = factory
def _query(self):
with self.api.credentialed_request({'method': self.method_name}) as p:
response = validate_response(requests.get(self.api.url, p))
return response[self.items_key]
def __getitem__(self, key):
response = self._query()
for item in response:
if item[self.id_key] == key:
return self.factory(item)
raise KeyError(key)
def items(self):
for item in self._query():
key = item[self.id_key]
yield key, self.factory(item)
def keys(self):
for key, _ in self.items():
yield key
def values(self):
for _, value in self.items():
yield value
def __iter__(self):
for k in self.keys():
yield k
def __len__(self):
return len(self._query())
class DID(dict):
"""Subclass of Directory with a method for diddling the routing."""
def __init__(self, mapping, api):
self.api = api
super(DID, self).__init__(mapping)
def set_routing(self, which):
if 'forwarding' in which:
kind = 'fwd'
key = which['forwarding']
elif 'ivr' in which:
kind = 'ivr'
key = which[kind]
routing = ':'.join((kind, key))
params = {'method': 'setDIDRouting', 'routing': routing,
'did': self['did']}
with self.api.credentialed_request(params) as params:
response = validate_response(requests.get(self.api.url, params))
return response
| {
"repo_name": "dwf/didroute",
"path": "didroute/api.py",
"copies": "1",
"size": "3985",
"license": "mit",
"hash": -3783722952712917000,
"line_mean": 31.6639344262,
"line_max": 79,
"alpha_frac": 0.582434128,
"autogenerated": false,
"ratio": 4.120992761116856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5203426889116856,
"avg_score": null,
"num_lines": null
} |
"""A third example on how to modify the Seamless environment,
adding direct Cython support via an ipy template
The graph is then saved and re-loaded
"""
import traceback
from seamless.highlevel import Context, Cell, Transformer
ctx = Context()
env = ctx.environment
# Define a transformer in IPython format that uses Cython magic
ctx.tf = Transformer()
ctx.tf.a = 123
ctx.tf.language = "ipython"
ctx.tf.code = """
%load_ext Cython
%%cython
from libc.math cimport log
def func(int i):
cdef int n
cdef int nn
cdef double s = 0
for n in range(i):
for nn in range(i):
s += log(n*nn+1)/10000.0
return s
result = func(a)
"""
ctx.compute()
print(ctx.tf.result.value)
# For good measure, define an execution environment
# This should give no problem, as Cython is installed in the Seamless Docker image
ctx.tf.environment.set_conda("""
dependencies:
- cython
""", "yaml")
ctx.tf.environment.set_which(["cython"], format="plain")
ctx.compute()
print(ctx.tf.status)
print(ctx.tf.result.value)
# Now set the transformer as pure Cython code
# - We must call it "transform"
# - The argument must be "a", the pin name
# - Cython arguments with a C type cannot be both positional and keyword
# Therefore, it must be declared as keyword-only
ctx.tf.code = """
from libc.math cimport log
def transform(*, int a):
cdef int n
cdef int nn
cdef double s = 0
for n in range(a):
for nn in range(a):
s += log(n*nn+1)/10000.0
return s
"""
# Set the language as Cython
# => fail, unknown language
try:
ctx.tf.language = "cython"
except KeyError as exc:
traceback.print_exc(limit=0)
print()
# Have a look how languages are defined...
languages = env.get_languages("cson")
print("\n".join(languages.splitlines()[:10]))
print()
# Create a new language "cython"
languages = env.get_languages("plain")
languages["cython"] = {
"extension": "pyx",
"mode": "interpreted",
}
env.set_languages(languages, "plain")
# Set the language as cython => success
ctx.tf.language = "cython"
# Seamless will refuse to translate a graph
# that contains unimplemented interpreted languages
try:
ctx.translate()
except NotImplementedError as exc:
traceback.print_exc(limit=0)
print()
#### help(env.set_ipy_template) # for interactive use
# TODO: make sure that PINS is documented
def wrap_cython(code, parameters):
tmpl = """
get_ipython().run_line_magic("load_ext", "Cython")
get_ipython().run_cell_magic("cython", "", {})
if "transform" not in globals():
raise Exception("Cython code must define a function 'transform'")
result = transform(**PINS)
"""
return tmpl.format(repr(code))
env.set_ipy_template("cython", wrap_cython)
# Define an environment for the Cython code generator
from seamless.highlevel.Environment import Environment
tmpl_env = Environment()
tmpl_env.set_conda("""
dependencies:
- cython
""", "yaml")
tmpl_env.set_which(["cython"], format="plain")
env.set_ipy_template_environment("cython", tmpl_env)
ctx.compute()
print(ctx.tf.status)
print(ctx.tf.exception)
print(ctx.tf.result.value)
ctx.save_graph("environment3.seamless")
ctx.save_zip("environment3.zip") | {
"repo_name": "sjdv1982/seamless",
"path": "tests/highlevel/environment3.py",
"copies": "1",
"size": "3192",
"license": "mit",
"hash": -7037300985390984000,
"line_mean": 24.1417322835,
"line_max": 82,
"alpha_frac": 0.6948621554,
"autogenerated": false,
"ratio": 3.363540569020021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4558402724420021,
"avg_score": null,
"num_lines": null
} |
"""A thorough test of polling PAIR sockets."""
#-----------------------------------------------------------------------------
# Copyright (c) 2010 Brian Granger
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import time
import zmq
print "Running polling tests for PAIR sockets..."
addr = 'tcp://127.0.0.1:5555'
ctx = zmq.Context()
s1 = ctx.socket(zmq.PAIR)
s2 = ctx.socket(zmq.PAIR)
s1.bind(addr)
s2.connect(addr)
# Sleep to allow sockets to connect.
time.sleep(1.0)
poller = zmq.Poller()
poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
poller.register(s2, zmq.POLLIN|zmq.POLLOUT)
# Now make sure that both are send ready.
socks = dict(poller.poll())
assert socks[s1] == zmq.POLLOUT
assert socks[s2] == zmq.POLLOUT
# Now do a send on both, wait and test for zmq.POLLOUT|zmq.POLLIN
s1.send('msg1')
s2.send('msg2')
time.sleep(1.0)
socks = dict(poller.poll())
assert socks[s1] == zmq.POLLOUT|zmq.POLLIN
assert socks[s2] == zmq.POLLOUT|zmq.POLLIN
# Make sure that both are in POLLOUT after recv.
s1.recv()
s2.recv()
socks = dict(poller.poll())
assert socks[s1] == zmq.POLLOUT
assert socks[s2] == zmq.POLLOUT
poller.unregister(s1)
poller.unregister(s2)
# Wait for everything to finish.
time.sleep(1.0)
print "Finished." | {
"repo_name": "caidongyun/pyzmq",
"path": "examples/poll/pair.py",
"copies": "10",
"size": "1400",
"license": "bsd-3-clause",
"hash": 5877415166944736000,
"line_mean": 24.0178571429,
"line_max": 78,
"alpha_frac": 0.6392857143,
"autogenerated": false,
"ratio": 2.9723991507430996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003144752829812311,
"num_lines": 56
} |
"""A thorough test of polling REQ/REP sockets."""
#-----------------------------------------------------------------------------
# Copyright (c) 2010 Brian Granger
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import time
import zmq
print "Running polling tests for REQ/REP sockets..."
addr = 'tcp://127.0.0.1:5555'
ctx = zmq.Context()
s1 = ctx.socket(zmq.REP)
s2 = ctx.socket(zmq.REQ)
s1.bind(addr)
s2.connect(addr)
# Sleep to allow sockets to connect.
time.sleep(1.0)
poller = zmq.Poller()
poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
poller.register(s2, zmq.POLLIN|zmq.POLLOUT)
# Make sure that s1 is in state 0 and s2 is in POLLOUT
socks = dict(poller.poll())
assert not socks.has_key(s1)
assert socks[s2] == zmq.POLLOUT
# Make sure that s2 goes immediately into state 0 after send.
s2.send('msg1')
socks = dict(poller.poll())
assert not socks.has_key(s2)
# Make sure that s1 goes into POLLIN state after a time.sleep().
time.sleep(0.5)
socks = dict(poller.poll())
assert socks[s1] == zmq.POLLIN
# Make sure that s1 goes into POLLOUT after recv.
s1.recv()
socks = dict(poller.poll())
assert socks[s1] == zmq.POLLOUT
# Make sure s1 goes into state 0 after send.
s1.send('msg2')
socks = dict(poller.poll())
assert not socks.has_key(s1)
# Wait and then see that s2 is in POLLIN.
time.sleep(0.5)
socks = dict(poller.poll())
assert socks[s2] == zmq.POLLIN
# Make sure that s2 is in POLLOUT after recv.
s2.recv()
socks = dict(poller.poll())
assert socks[s2] == zmq.POLLOUT
poller.unregister(s1)
poller.unregister(s2)
# Wait for everything to finish.
time.sleep(1.0)
print "Finished."
| {
"repo_name": "Mustard-Systems-Ltd/pyzmq",
"path": "examples/poll/reqrep.py",
"copies": "10",
"size": "1776",
"license": "bsd-3-clause",
"hash": 833007640773809900,
"line_mean": 24.014084507,
"line_max": 78,
"alpha_frac": 0.6537162162,
"autogenerated": false,
"ratio": 2.9748743718592965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8628590588059296,
"avg_score": null,
"num_lines": null
} |
"""A thread-based worker pool."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import threading
import time
import socket
from six.moves import queue
__all__ = ('WorkerThread', 'ThreadPool')
class TrueyZero:
"""Object which equals and does math like the integer 0 but evals True."""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
"""Initialize WorkerThread instance.
Args:
server (cheroot.server.HTTPServer): web server object
receiving this request
"""
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + (
self.start_time is None
and trueyzero
or self.conn.requests_seen
),
'Bytes Read': lambda s: self.bytes_read + (
self.start_time is None
and trueyzero
or self.conn.rfile.bytes_read
),
'Bytes Written': lambda s: self.bytes_written + (
self.start_time is None
and trueyzero
or self.conn.wfile.bytes_written
),
'Work Time': lambda s: self.work_time + (
self.start_time is None
and trueyzero
or time.time() - self.start_time
),
'Read Throughput': lambda s: s['Bytes Read'](s) / (
s['Work Time'](s) or 1e-6
),
'Write Throughput': lambda s: s['Bytes Written'](s) / (
s['Work Time'](s) or 1e-6
),
}
threading.Thread.__init__(self)
def run(self):
"""Process incoming HTTP connections.
Retrieves incoming connections from thread pool.
"""
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit) as ex:
self.server.interrupt = ex
class ThreadPool:
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(
self, server, min=10, max=-1, accepted_queue_size=-1,
accepted_queue_timeout=10,
):
"""Initialize HTTP requests queue instance.
Args:
server (cheroot.server.HTTPServer): web server object
receiving this request
min (int): minimum number of worker threads
max (int): maximum number of worker threads
accepted_queue_size (int): maximum number of active
requests in queue
accepted_queue_timeout (int): timeout for putting request
into queue
"""
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue(maxsize=accepted_queue_size)
self._queue_put_timeout = accepted_queue_timeout
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName('CP Server ' + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
@property
def idle(self): # noqa: D401; irrelevant for properties
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
def put(self, obj):
"""Put request into queue.
Args:
obj (cheroot.server.HTTPConnection): HTTP connection
waiting to be processed
"""
self._queue.put(obj, block=True, timeout=self._queue_put_timeout)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not all(worker.ready for worker in workers):
time.sleep(.1)
self._threads.extend(workers)
def _spawn_worker(self):
worker = WorkerThread(self.server)
worker.setName('CP Server ' + worker.getName())
worker.start()
return worker
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
# calculate the number of threads above the minimum
n_extra = max(len(self._threads) - self.min, 0)
# don't remove more than amount
n_to_remove = min(amount, n_extra)
# put shutdown requests on the queue equal to the number of threads
# to remove. As each request is processed by a worker, that worker
# will terminate and be culled from the list.
for n in range(n_to_remove):
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
"""Terminate all worker threads.
Args:
timeout (int): time to wait for threads to stop gracefully
"""
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout is not None and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (
AssertionError,
# Ignore repeated Ctrl-C.
# See
# https://github.com/cherrypy/cherrypy/issues/691.
KeyboardInterrupt,
):
pass
@property
def qsize(self):
"""Return the queue size."""
return self._queue.qsize()
| {
"repo_name": "Southpaw-TACTIC/TACTIC",
"path": "3rd_party/python3/site-packages/cheroot/workers/threadpool.py",
"copies": "3",
"size": "9345",
"license": "epl-1.0",
"hash": -7128710743203117000,
"line_mean": 32.8586956522,
"line_max": 78,
"alpha_frac": 0.531835206,
"autogenerated": false,
"ratio": 4.665501747378932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 276
} |
"""A thread-based worker pool.
.. spelling::
joinable
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import collections
import threading
import time
import socket
import warnings
from six.moves import queue
from jaraco.functools import pass_none
__all__ = ('WorkerThread', 'ThreadPool')
class TrueyZero:
"""Object which equals and does math like the integer 0 but evals True."""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
"""Initialize WorkerThread instance.
Args:
server (cheroot.server.HTTPServer): web server object
receiving this request
"""
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + (
self.start_time is None
and trueyzero
or self.conn.requests_seen
),
'Bytes Read': lambda s: self.bytes_read + (
self.start_time is None
and trueyzero
or self.conn.rfile.bytes_read
),
'Bytes Written': lambda s: self.bytes_written + (
self.start_time is None
and trueyzero
or self.conn.wfile.bytes_written
),
'Work Time': lambda s: self.work_time + (
self.start_time is None
and trueyzero
or time.time() - self.start_time
),
'Read Throughput': lambda s: s['Bytes Read'](s) / (
s['Work Time'](s) or 1e-6
),
'Write Throughput': lambda s: s['Bytes Written'](s) / (
s['Work Time'](s) or 1e-6
),
}
threading.Thread.__init__(self)
def run(self):
"""Process incoming HTTP connections.
Retrieves incoming connections from thread pool.
"""
self.server.stats['Worker Threads'][self.name] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
is_stats_enabled = self.server.stats['Enabled']
if is_stats_enabled:
self.start_time = time.time()
keep_conn_open = False
try:
keep_conn_open = conn.communicate()
finally:
if keep_conn_open:
self.server.put_conn(conn)
else:
conn.close()
if is_stats_enabled:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit) as ex:
self.server.interrupt = ex
class ThreadPool:
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(
self, server, min=10, max=-1, accepted_queue_size=-1,
accepted_queue_timeout=10,
):
"""Initialize HTTP requests queue instance.
Args:
server (cheroot.server.HTTPServer): web server object
receiving this request
min (int): minimum number of worker threads
max (int): maximum number of worker threads
accepted_queue_size (int): maximum number of active
requests in queue
accepted_queue_timeout (int): timeout for putting request
into queue
"""
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue(maxsize=accepted_queue_size)
self._queue_put_timeout = accepted_queue_timeout
self.get = self._queue.get
self._pending_shutdowns = collections.deque()
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.name = (
'CP Server {worker_name!s}'.
format(worker_name=worker.name),
)
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
@property
def idle(self): # noqa: D401; irrelevant for properties
"""Number of worker threads which are idle. Read-only."""
idles = len([t for t in self._threads if t.conn is None])
return max(idles - len(self._pending_shutdowns), 0)
def put(self, obj):
"""Put request into queue.
Args:
obj (:py:class:`~cheroot.server.HTTPConnection`): HTTP connection
waiting to be processed
"""
self._queue.put(obj, block=True, timeout=self._queue_put_timeout)
def _clear_dead_threads(self):
# Remove any dead threads from our list
for t in [t for t in self._threads if not t.is_alive()]:
self._threads.remove(t)
try:
self._pending_shutdowns.popleft()
except IndexError:
pass
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not all(worker.ready for worker in workers):
time.sleep(.1)
self._threads.extend(workers)
def _spawn_worker(self):
worker = WorkerThread(self.server)
worker.name = (
'CP Server {worker_name!s}'.
format(worker_name=worker.name),
)
worker.start()
return worker
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
amount -= len(self._pending_shutdowns)
self._clear_dead_threads()
if amount <= 0:
return
# calculate the number of threads above the minimum
n_extra = max(len(self._threads) - self.min, 0)
# don't remove more than amount
n_to_remove = min(amount, n_extra)
# put shutdown requests on the queue equal to the number of threads
# to remove. As each request is processed by a worker, that worker
# will terminate and be culled from the list.
for n in range(n_to_remove):
self._pending_shutdowns.append(None)
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
"""Terminate all worker threads.
Args:
timeout (int): time to wait for threads to stop gracefully
"""
# for compatability, negative timeouts are treated like None
# TODO: treat negative timeouts like already expired timeouts
if timeout is not None and timeout < 0:
timeout = None
warnings.warning(
'In the future, negative timeouts to Server.stop() '
'will be equivalent to a timeout of zero.',
stacklevel=2,
)
if timeout is not None:
endtime = time.time() + timeout
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
ignored_errors = (
# TODO: explain this exception.
AssertionError,
# Ignore repeated Ctrl-C. See cherrypy#691.
KeyboardInterrupt,
)
for worker in self._clear_threads():
remaining_time = timeout and endtime - time.time()
try:
worker.join(remaining_time)
if worker.is_alive():
# Timeout exhausted; forcibly shut down the socket.
self._force_close(worker.conn)
worker.join()
except ignored_errors:
pass
@staticmethod
@pass_none
def _force_close(conn):
if conn.rfile.closed:
return
try:
try:
conn.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
conn.socket.shutdown()
except OSError:
# shutdown sometimes fails (race with 'closed' check?)
# ref #238
pass
def _clear_threads(self):
"""Clear self._threads and yield all joinable threads."""
# threads = pop_all(self._threads)
threads, self._threads[:] = self._threads[:], []
return (
thread
for thread in threads
if thread is not threading.current_thread()
)
@property
def qsize(self):
"""Return the queue size."""
return self._queue.qsize()
| {
"repo_name": "cherrypy/cheroot",
"path": "cheroot/workers/threadpool.py",
"copies": "1",
"size": "10586",
"license": "bsd-3-clause",
"hash": 3695387501319372300,
"line_mean": 31.1762917933,
"line_max": 78,
"alpha_frac": 0.5483657661,
"autogenerated": false,
"ratio": 4.500850340136054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5549216106236055,
"avg_score": null,
"num_lines": null
} |
"""A threading based handler.
The :class:`SequentialThreadingHandler` is intended for regular Python
environments that use threads.
.. warning::
Do not use :class:`SequentialThreadingHandler` with applications
using asynchronous event loops (like gevent). Use the
:class:`~kazoo.handlers.gevent.SequentialGeventHandler` instead.
"""
from __future__ import absolute_import
from collections import defaultdict
import errno
from itertools import chain
import logging
import select
import socket
import threading
import time
import six
import kazoo.python2atexit as python2atexit
from kazoo.handlers import utils
try:
import Queue
except ImportError: # pragma: nocover
import queue as Queue
# sentinel objects
_STOP = object()
log = logging.getLogger(__name__)
_HAS_EPOLL = hasattr(select, "epoll")
def _to_fileno(obj):
if isinstance(obj, six.integer_types):
fd = int(obj)
elif hasattr(obj, "fileno"):
fd = obj.fileno()
if not isinstance(fd, six.integer_types):
raise TypeError("fileno() returned a non-integer")
fd = int(fd)
else:
raise TypeError("argument must be an int, or have a fileno() method.")
if fd < 0:
raise ValueError(
"file descriptor cannot be a negative integer (%d)" % (fd,)
)
return fd
class KazooTimeoutError(Exception):
pass
class AsyncResult(utils.AsyncResult):
"""A one-time event that stores a value or an exception"""
def __init__(self, handler):
super(AsyncResult, self).__init__(handler,
threading.Condition,
KazooTimeoutError)
class SequentialThreadingHandler(object):
"""Threading handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially. These are split into two
queues, one for watch events and one for async result completion
callbacks.
Each queue type has a thread worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch and completion callbacks should avoid blocking behavior as
the next callback of that type won't be run until it completes. If
you need to block, spawn a new thread and return immediately so
callbacks can proceed.
.. note::
Completion callbacks can block to wait on Zookeeper calls, but
no other completion callbacks will execute until the callback
returns.
"""
name = "sequential_threading_handler"
timeout_exception = KazooTimeoutError
sleep_func = staticmethod(time.sleep)
queue_impl = Queue.Queue
queue_empty = Queue.Empty
def __init__(self):
"""Create a :class:`SequentialThreadingHandler` instance"""
self.callback_queue = self.queue_impl()
self.completion_queue = self.queue_impl()
self._running = False
self._state_change = threading.Lock()
self._workers = []
@property
def running(self):
return self._running
def _create_thread_worker(self, queue):
def _thread_worker(): # pragma: nocover
while True:
try:
func = queue.get()
try:
if func is _STOP:
break
func()
except Exception:
log.exception("Exception in worker queue thread")
finally:
queue.task_done()
del func # release before possible idle
except self.queue_empty:
continue
t = self.spawn(_thread_worker)
return t
def start(self):
"""Start the worker threads."""
with self._state_change:
if self._running:
return
# Spawn our worker threads, we have
# - A callback worker for watch events to be called
# - A completion worker for completion events to be called
for queue in (self.completion_queue, self.callback_queue):
w = self._create_thread_worker(queue)
self._workers.append(w)
self._running = True
python2atexit.register(self.stop)
def stop(self):
"""Stop the worker threads and empty all queues."""
with self._state_change:
if not self._running:
return
self._running = False
for queue in (self.completion_queue, self.callback_queue):
queue.put(_STOP)
self._workers.reverse()
while self._workers:
worker = self._workers.pop()
worker.join()
# Clear the queues
self.callback_queue = self.queue_impl()
self.completion_queue = self.queue_impl()
python2atexit.unregister(self.stop)
def select(self, *args, **kwargs):
# if we have epoll, and select is not expected to work
# use an epoll-based "select". Otherwise don't touch
# anything to minimize changes
if _HAS_EPOLL:
# if the highest fd we've seen is > 1023
if max(map(_to_fileno, chain.from_iterable(args[:3]))) > 1023:
return self._epoll_select(*args, **kwargs)
return self._select(*args, **kwargs)
def _select(self, *args, **kwargs):
timeout = kwargs.pop('timeout', None)
# either the time to give up, or None
end = (time.time() + timeout) if timeout else None
while end is None or time.time() < end:
if end is not None:
# make a list, since tuples aren't mutable
args = list(args)
# set the timeout to the remaining time
args[3] = end - time.time()
try:
return select.select(*args, **kwargs)
except select.error as ex:
# if the system call was interrupted, we'll retry until timeout
# in Python 3, system call interruptions are a native exception
# in Python 2, they are not
errnum = ex.errno if isinstance(ex, OSError) else ex[0]
if errnum == errno.EINTR:
continue
raise
# if we hit our timeout, lets return as a timeout
return ([], [], [])
def _epoll_select(self, rlist, wlist, xlist, timeout=None):
"""epoll-based drop-in replacement for select to overcome select
limitation on a maximum filehandle value
"""
if timeout is None:
timeout = -1
eventmasks = defaultdict(int)
rfd2obj = defaultdict(list)
wfd2obj = defaultdict(list)
xfd2obj = defaultdict(list)
read_evmask = select.EPOLLIN | select.EPOLLPRI # Just in case
def store_evmasks(obj_list, evmask, fd2obj):
for obj in obj_list:
fileno = _to_fileno(obj)
eventmasks[fileno] |= evmask
fd2obj[fileno].append(obj)
store_evmasks(rlist, read_evmask, rfd2obj)
store_evmasks(wlist, select.EPOLLOUT, wfd2obj)
store_evmasks(xlist, select.EPOLLERR, xfd2obj)
poller = select.epoll()
for fileno in eventmasks:
poller.register(fileno, eventmasks[fileno])
try:
events = poller.poll(timeout)
revents = []
wevents = []
xevents = []
for fileno, event in events:
if event & read_evmask:
revents += rfd2obj.get(fileno, [])
if event & select.EPOLLOUT:
wevents += wfd2obj.get(fileno, [])
if event & select.EPOLLERR:
xevents += xfd2obj.get(fileno, [])
finally:
poller.close()
return revents, wevents, xevents
def socket(self):
return utils.create_tcp_socket(socket)
def create_connection(self, *args, **kwargs):
return utils.create_tcp_connection(socket, *args, **kwargs)
def create_socket_pair(self):
return utils.create_socket_pair(socket)
def event_object(self):
"""Create an appropriate Event object"""
return threading.Event()
def lock_object(self):
"""Create a lock object"""
return threading.Lock()
def rlock_object(self):
"""Create an appropriate RLock object"""
return threading.RLock()
def async_result(self):
"""Create a :class:`AsyncResult` instance"""
return AsyncResult(self)
def spawn(self, func, *args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def dispatch_callback(self, callback):
"""Dispatch to the callback object
The callback is put on separate queues to run depending on the
type as documented for the :class:`SequentialThreadingHandler`.
"""
self.callback_queue.put(lambda: callback.func(*callback.args))
| {
"repo_name": "cloudera/hue",
"path": "desktop/core/ext-py/kazoo-2.8.0/kazoo/handlers/threading.py",
"copies": "3",
"size": "9459",
"license": "apache-2.0",
"hash": 1722115173394408400,
"line_mean": 31.5051546392,
"line_max": 79,
"alpha_frac": 0.589597209,
"autogenerated": false,
"ratio": 4.405682347461575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6495279556461575,
"avg_score": null,
"num_lines": null
} |
"""A thread-safe way to interrupt a Hub waiting on IO."""
import os
import errno
from greennet import get_hub
class Trigger(object):
__slots__ = ('hub', '_gun', '_trigger', '_closed')
def __init__(self, hub=None):
self.hub = get_hub() if hub is None else hub
self._gun, self._trigger = os.pipe()
self._closed = False
def wait(self, timeout=None):
if self._closed:
raise IOError(errno.EBADF, os.strerror(errno.EBADF))
self.hub.poll(self._gun, read=True, timeout=timeout)
os.read(self._gun, 1)
def pull(self):
if self._closed:
raise IOError(errno.EBADF, os.strerror(errno.EBADF))
while True:
try:
os.write(self._trigger, 'x')
except (IOError, OSError), err:
if err.args[0] == errno.EINTR:
continue
elif err.args[0] == errno.EAGAIN:
return
raise
return
def close(self):
self._closed = True
for fd in self._gun, self._trigger:
try:
os.close(fd)
except IOError:
pass
del self._gun, self._trigger
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "dhain/greennet",
"path": "greennet/trigger.py",
"copies": "1",
"size": "1319",
"license": "mit",
"hash": -4995111651672118000,
"line_mean": 24.862745098,
"line_max": 64,
"alpha_frac": 0.506444276,
"autogenerated": false,
"ratio": 3.984894259818731,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49913385358187307,
"avg_score": null,
"num_lines": null
} |
# A tile
class tile():
def __init__(self):
self.x = 0
self.y = 0
self.image = None
# A level
class level():
levelSizeX = 0
levelSizeY = 0
levelTiles = []
def setLevelSize(self, x, y):
levelSizeX = x
levelSizeY = y
# Class for manipulating tiles
class levelManipulation():
def __init__(self):
self.levelz = level()
self.canvas = None
# Determines where to place the tile
def determinePos(self, x, y):
fixedX = (x - x%16)
fixedY = (y- y%16)
return (fixedX, fixedY)
# Create a new level
def createLevel(self, sizeX, sizeY, widget):
self.tiles = []
# Loops through adding all the tiles
for total in range(0, sizeX*sizeY):
self.tiles.append(tile())
self.levelz.levelTiles = self.tiles
self.levelz.levelSizeX = sizeX
self.levelz.levelSizeY = sizeY
self.canvas = widget
# Set the canvas size to size of the level
self.canvas.configure(width = self.levelz.levelSizeX * 16, height = self.levelz.levelSizeY * 16)
# Set the scrollable size
self.canvas.configure(scrollregion=(0, 0, self.levelz.levelSizeX * 16, self.levelz.levelSizeY * 16))
def displayLevel(self):
posX = 0
posY = 0
# Checks when to start positioning tiles from right to left
for i in range(0, len(self.levelz.levelTiles)):
if (i % self.levelz.levelSizeX == 49):
#self.canvas.create_rectangle(posX, posY, posX+16, posY+16, fill="red", width=0)
self.canvas.create_image(posX, posY, image=self.levelz.levelTiles[i].image, anchor="nw")
posY += 16
posX = 0
else:
self.canvas.create_image(posX, posY, image=self.levelz.levelTiles[i].image, anchor="nw")
#self.canvas.create_rectangle(posX, posY, posX+16, posY+16, fill="red", width=0)
posX += 16
#### The below code is a bit of a shit show (If the rest of it already isn't)
# Draw the tile on the canvas and add it to the list
# The parameters are the following
# widget - The widget to draw the image on
# x - The x cordinate to place the image
# y - The y cordinate to place the image
# selectedTile - the image of the tile to place
# imageLocX - the x cordinate of the image on the tileset (This is added to the array)
# imageLocY - the y cordinate of the image on the tileset (This is added to the array)
def drawTile(self, widget, x, y, selectedTile, imageLocX, imageLocY):
newX, newY = self.determinePos(x, y)
widget.copyImage = selectedTile
# Add the tile to the array in the correct position using simple algoriphm to find where to set it
# We know the level size in the x direction so we can use a similar way to find where to place tile in array
# This may be removed in future and replaced with a better technique, since this is probably a bad way of doing it
# Now check for correct position
newX, newY = self.determinePos(newX, newY)
# Simple check to make sure tile is actually allowed to be place in this level
if (0 <= newX < self.levelz.levelSizeX*16 and 0 <=newY < self.levelz.levelSizeY*16 ):
# Than devide that by 16 since it's 16x16
divnewX = newX / 16
divnewY = newY / 16
# Than we muliply the x + y*xmax since y is xtotal*y tiles down
## Error is here
self.levelz.levelTiles[int(divnewX + self.levelz.levelSizeX*divnewY)].x = imageLocX
#print (self.levelz.levelTiles[int(divnewX + self.levelz.levelSizeX*divnewY)].x)
self.levelz.levelTiles[int(divnewX + self.levelz.levelSizeX*divnewY)].y = imageLocY
self.levelz.levelTiles[int(divnewX + self.levelz.levelSizeX*divnewY)].image = selectedTile
# Display the level
#self.displayLevel()
self.canvas.create_image(newX, newY, image=selectedTile, anchor="nw")
| {
"repo_name": "tacocats/2DEngine",
"path": "tools/tile_editor/scene_module.py",
"copies": "1",
"size": "4107",
"license": "mit",
"hash": 6780308149456524000,
"line_mean": 39.2647058824,
"line_max": 122,
"alpha_frac": 0.6177258339,
"autogenerated": false,
"ratio": 3.5527681660899653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46704939999899653,
"avg_score": null,
"num_lines": null
} |
# The usual caveats re microsheduler time periods applies: if you need millisecond accuracy
# (or better) use a hardware timer. Times can easily be -0 +20mS or more, depending on other threads
from usched import Sched, microsWhen, seconds, after, microsUntil, Timeout, wait
def _f(): pass
FunctionType = type(_f) # Function or lambda
def _g():
yield 1
ThreadType = type(_g) # differs from type library. type(_g) != type(_g())
class _C:
def _m(self): pass
MethodType = type(_C()._m)
class Delay(object):
def __init__(self, objSched, callback=None, callback_args=()):
self.objSched = objSched
self.callback = callback
self.callback_args = callback_args
self._running = False
def stop(self):
self._running = False
def trigger(self, duration):
self.tstop = microsWhen(seconds(duration)) # Update end time
if not self._running: # Start a thread which stops the
self.objSched.add_thread(self.killer()) # delay after its period has elapsed
self._running = True
def running(self):
return self._running
def killer(self):
to = Timeout(1) # Initial value is arbitrary
while not after(self.tstop): # Might have been retriggered
yield to._ussetdelay(microsUntil(self.tstop))
if self._running and self.callback is not None:
self.callback(*self.callback_args)
self._running = False
def _future(objSched, time_to_run, callback, callback_args):
yield # No initialisation to do
yield from wait(time_to_run)
t = type(callback)
if t is FunctionType or t is MethodType:
callback(*callback_args)
elif t is ThreadType: # Generator function (thread)
objSched.add_thread(callback(*callback_args))
else:
raise ValueError('future() received an invalid callback')
def future(objSched, time_to_run, callback, callback_args=()):
objSched.add_thread(_future(objSched, time_to_run, callback, callback_args))
| {
"repo_name": "peterhinch/Micropython-scheduler",
"path": "delay.py",
"copies": "1",
"size": "2764",
"license": "mit",
"hash": -1824798438218552600,
"line_mean": 41.5230769231,
"line_max": 102,
"alpha_frac": 0.6512301013,
"autogenerated": false,
"ratio": 4.100890207715134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252120309015134,
"avg_score": null,
"num_lines": null
} |
"""A time estimator by running TensorFlow operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import tensorflow as tf
import numpy as np
from six.moves import range
from paleo.profilers.base import BaseProfiler, TimeMeasure
class TensorFlowProfiler(BaseProfiler):
def __init__(self, options, device='/gpu:0'):
super(TensorFlowProfiler, self).__init__('TensorFlowProfiler', options)
self._device = device
self._logger.info('TensorFlow version: %s' % tf.__version__)
def profile(self, layer):
graph = tf.Graph()
ops, bwd_ops = None, None
if layer.layertype == 'conv2d':
ops, bwd_ops = self._ops_conv2d(layer, graph)
elif layer.layertype == 'innerproduct':
ops, bwd_ops = self._ops_innerproduct(layer, graph)
elif layer.layertype == 'pool2d':
ops, bwd_ops = self._ops_pool2d(layer, graph)
elif layer.layertype == 'dropout':
ops, bwd_ops = self._ops_dropout(layer, graph)
elif layer.layertype == 'concat':
ops, bwd_ops = self._ops_concat(layer, graph)
elif layer.layertype == 'reshape':
ops, bwd_ops = self._ops_reshape(layer, graph)
else:
self._logger.warning('Unimplemented \'%s\'' % layer.layertype)
return self._execute(ops, bwd_ops, graph)
def profile_full_pass(self, layers):
graph, end_points, variables = self._compose_full_graph(layers)
# Forward pass.
if layers[-1].layertype in ['softmax', 'sigmoid']:
last_op = end_points[layers[-2].name]
loss_op = end_points[layers[-1].name]
else:
last_op = end_points[layers[-1].name]
loss_op = None
forward_time = self._execute(last_op, None, graph)
# Backward pass.
softmax_time = TimeMeasure()
backward_time = TimeMeasure()
if loss_op is not None:
softmax_time = self._execute(loss_op, None, graph)
with graph.as_default():
grad_op = tf.gradients(loss_op, variables)
backward_time = self._execute(grad_op, None, graph)
backward_time = backward_time - softmax_time
softmax_time = softmax_time - forward_time
return forward_time, softmax_time, backward_time
def _compose_full_graph(self, layers):
graph = tf.Graph()
end_points = dict() # collects out tensors for each layer
variables = [None] # collects trainable variables
for layer in layers:
if layer.layertype == 'conv2d':
ops, _ = self._ops_conv2d(layer, graph, end_points, variables)
elif layer.layertype == 'deconv2d':
ops, _ = self._ops_deconv2d(layer, graph, end_points,
variables)
elif layer.layertype == 'innerproduct':
ops, _ = self._ops_innerproduct(layer, graph, end_points,
variables)
elif layer.layertype == 'pool2d':
ops, _ = self._ops_pool2d(layer, graph, end_points)
elif layer.layertype == 'upsampling2d':
ops, _ = self._ops_upsampling2d(layer, graph, end_points)
elif layer.layertype == 'dropout':
ops, _ = self._ops_dropout(layer, graph, end_points)
elif layer.layertype == 'concat':
ops, _ = self._ops_concat(layer, graph, end_points)
elif layer.layertype == 'reshape':
ops, _ = self._ops_reshape(layer, graph, end_points)
elif layer.layertype == 'softmax':
ops, _ = self._ops_softmax(layer, graph, end_points)
elif layer.layertype == 'sigmoid':
ops, _ = self._ops_sigmoid(layer, graph, end_points)
elif layer.layertype == 'input':
# skip data/input layer.
continue
else:
raise NotImplementedError('Cannot create ops for layer %s [%s]'
% (layer.name, layer.layertype))
end_points[layer.name] = ops
return graph, end_points, variables[1:]
def _get_inputs(self, layer, end_points=None):
if end_points is None or layer.parents[0] == 'data':
# Isolation mode: inputs for the layer are random constants.
inputs = tf.constant(
2 * np.random.random_sample(layer.inputs) - 1,
dtype=tf.float32,
name="fake_inputs")
return inputs
else:
# Chain mode: get inputs from parent layer outputs.
inputs = [end_points[p] for p in layer.parents]
if len(inputs) == 1:
return inputs[0]
return inputs
def _get_variable(self, shape, name='constant'):
return tf.Variable(
tf.truncated_normal(
shape, dtype=tf.float32, stddev=1e-1),
name='rand_{}'.format(name))
def _get_fake_targets(self, batch_size, num_classes):
labels = np.random.randint(0, num_classes, batch_size)
return tf.constant(labels, dtype=tf.int32, name='fake_targets')
def _ops_conv2d(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
filters = self._get_variable(layer.filters, name='filters')
if variables:
variables.append(filters)
conv = None
if self.options.direction == 'forward':
conv = tf.nn.conv2d(
inputs, filters, layer.strides, padding=layer.padding)
bwd_inputs_op, bwd_filter_op = None, None
if self.options.direction == 'backward':
if self.options.gradient_wrt == 'data' and layer.backprop:
bwd_inputs_op = tf.nn.conv2d_backprop_input(
layer.inputs,
filters,
self._get_variable(
layer.outputs, name='outputs'),
layer.strides,
layer.padding)
elif self.options.gradient_wrt == 'filter':
bwd_filter_op = tf.nn.conv2d_backprop_filter(
inputs, layer.filters,
self._get_variable(layer.outputs, 'outputs'),
layer.strides, layer.padding)
return conv, [bwd_inputs_op, bwd_filter_op]
def _ops_deconv2d(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
filters = self._get_variable(layer.filters, name='filters')
if variables:
variables.append(filters)
deconv = tf.nn.conv2d_transpose(
inputs,
filters,
output_shape=layer.outputs,
strides=layer.strides)
return deconv, None
def _ops_innerproduct(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
weights = self._get_variable(layer.weights, name='weights')
if variables:
variables.append(weights)
innerprod = tf.matmul(inputs, weights)
return innerprod, None
def _ops_pool2d(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
if layer.pool_type == 'max':
pool_op = tf.nn.max_pool
elif layer.pool_type == 'avg':
pool_op = tf.nn.avg_pool
else:
raise NotImplementedError('Invalid pool type: %s' %
layer.pool_type)
pool = pool_op(
inputs, layer.kernel, layer.strides, padding=layer.padding)
return pool, None
def _ops_upsampling2d(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
upsampling = tf.image.resize_nearest_neighbor(
inputs, layer.outputs[1:3])
return upsampling, None
def _ops_dropout(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
dropout = tf.nn.dropout(inputs, layer.keep_prob)
return dropout, None
def _ops_concat(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
if end_points:
inputs = self._get_inputs(layer, end_points)
else:
inputs = [tf.Variable(tf.random_normal(inp))
for inp in layer.inputs]
concat = tf.concat(layer.dim, inputs)
return concat, None
def _ops_reshape(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
reshape = tf.reshape(inputs, layer.outputs)
return reshape, None
def _ops_softmax(self, layer, graph, end_points=None):
# For simplicity, here combine softmax and loss
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.squeeze(inputs), self._get_fake_targets(
layer.outputs[0], layer.outputs[1])))
return loss, None
def _ops_sigmoid(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(inputs, tf.zeros(
layer.outputs)))
return loss, None
def _execute(self, layer_ops, bwd_ops, graph):
with graph.as_default():
with tf.device(self._device):
config = tf.ConfigProto(
allow_soft_placement=False,
log_device_placement=(
self._logger.getEffectiveLevel() == logging.DEBUG),
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0)))
ops_to_run = None
if self.options.direction == 'forward':
if layer_ops is None:
return TimeMeasure()
if isinstance(layer_ops, list):
target_fwd_op = [tf.group(op) for op in layer_ops]
else:
shape = tf.shape(layer_ops)
target_fwd_op = tf.group(shape)
ops_to_run = target_fwd_op
elif self.options.direction == 'backward':
if bwd_ops is None:
return TimeMeasure()
else:
if self.options.gradient_wrt == 'data':
target = bwd_ops[0]
elif self.options.gradient_wrt == 'filter':
target = bwd_ops[1]
else:
self._logger.warning(
'TensorFlowProfiler cannot run two'
'backward ops for now.')
return TimeMeasure()
if target is None:
return TimeMeasure()
target_bwd_op = tf.group(tf.shape(target))
ops_to_run = target_bwd_op
init = tf.initialize_all_variables()
# Create a session and initialize variables.
with tf.Session(config=config) as sess:
# writer = tf.train.SummaryWriter('logs/', sess.graph)
sess.run(init)
# Run the ops.
durations = []
for i in range(self.options.num_warmup +
self.options.num_iter):
start_time = time.time()
sess.run(ops_to_run)
duration = time.time() - start_time
if i >= self.options.num_warmup:
# Mesure time in milliseconds.
durations.append(duration * (10**3))
mean_time = np.mean(durations)
tf.reset_default_graph()
return TimeMeasure(total_time=mean_time)
| {
"repo_name": "TalwalkarLab/paleo",
"path": "paleo/profilers/tensorflow_profiler.py",
"copies": "1",
"size": "13589",
"license": "apache-2.0",
"hash": 1647011751058073600,
"line_mean": 41.465625,
"line_max": 79,
"alpha_frac": 0.5143866362,
"autogenerated": false,
"ratio": 4.344309462915601,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 320
} |
"""A timer for APOGEE exposures
History:
2013-08-20: EM: moved to STUI
2013-03-11: EM: changed voice to mac system Glass.wav
2013-04-20: EM: multiple refinement, added format and colors,
changed time left for None if no exposures left in sop sequence.
2013-04-22: EM:
changed colors to self.fgList=["black", "ForestGreen","OrangeRed"]
changed name from Timer to apogeeTimer
added check button for sound on / off, default on.
2012-05-17 EM: change label text to just "apogeeTimer"
"""
import os
import Tkinter
import RO.OS
import RO.Wdg
import TUI
import TUI.Models
SoundsDir = RO.OS.getResourceDir(TUI, "Sounds")
SoundFileName = "Glass.wav"
class ScriptClass(object):
def __init__(self, sr, ):
self.sr = sr
self.fgList = ["DarkGrey", "ForestGreen", "Brown"]
soundFilePath = os.path.join(SoundsDir, SoundFileName)
self.soundPlayer = RO.Wdg.SoundPlayer(soundFilePath)
self.sopModel = TUI.Models.getModel("sop")
self.apogeeModel = TUI.Models.getModel("apogee")
self.alertTime = 5.0 # min
self.alert = True
self.name=" APOGEE Timer: "
sr.master.winfo_toplevel().wm_resizable(True, True)
F1 = Tkinter.Frame(sr.master)
gr = RO.Wdg.Gridder(F1)
F1.grid(row=0, column=0, sticky="ns")
self.labWdg = RO.Wdg.Label(master=F1, text =self.name, fg=self.fgList[0])
self.labWdg.grid(row=0, column=0, sticky="ew")
self.checkWdg = RO.Wdg.Checkbutton(master=F1, text = "", defValue=True, helpText ="Play sound",)
self.checkWdg.grid(row=0, column=1, sticky="ew")
self.expTimer = RO.Wdg.ProgressBar(master = sr.master, valueFormat = "%4.1f", label = None, )
self.expTimer.grid(row=1, column=0, sticky="ew")
sr.master.rowconfigure(0, weight=1)
sr.master.rowconfigure(1, weight=1)
sr.master.columnconfigure(0, weight=1)
self.record()
self.sopModel.doApogeeScience_sequenceState.addCallback(self.seqState,callNow=False)
self.sopModel.doApogeeScienceState.addCallback(self.sopState,callNow=False)
self.apogeeModel.utrReadState.addCallback(self.utrState,callNow=False)
def sopState (self, keyVar):
if not keyVar.isGenuine: return
self.record()
def seqState(self, keyVar):
if not keyVar.isGenuine: return
self.record()
def utrState(self, keyVar):
if not keyVar.isGenuine: return
if keyVar[1] == "Done":
self.record()
def setNone(self, state):
self.labWdg.set("%s %s " % (self.name,state))
self.expTimer.setValue(newValue=0.0, newMin=0.0, newMax=0.0001)
self.labWdg.config(fg=self.fgList[0])
return
def record(self,):
self.state=self.sopModel.doApogeeScienceState[0]
if self.state!='running':
self.setNone(self.state)
return
#sop
self.sq, self.indState = self.sopModel.doApogeeScience_sequenceState[0:2]
self.seqCount = self.sopModel.doApogeeScience_seqCount[0]
self.sopExp = self.sopModel.doApogeeScience_expTime[0] / 60.0
self.sqTotal = len(self.sq) * self.sopExp
self.sqOver = (self.indState) * self.sopExp
#apogee
self.utr2, self.utr3 =self.apogeeModel.utrReadState[2:4]
self.utrExp=self.apogeeModel.utrReadTime[0]/60.
self.utrTotal=self.utr3*self.utrExp
self.utrOver=self.utr2*self.utrExp
# total time estimation
if self.sqTotal > self.sqOver:
total = self.sqTotal
self.timeLeft = self.sqTotal - self.sqOver - self.utrOver
else:
total = self.utrTotal
self.timeLeft = self.utrTotal - self.utrOver
self.labWdg.set("%s %5.1f min " % (self.name,self.timeLeft))
if self.timeLeft > self.alertTime:
self.alert = True
fgInd = 1
elif 0 < self.timeLeft <= self.alertTime:
if self.alert and self.checkWdg.getBool():
self.soundPlayer.play()
self.soundPlayer.play();
self.alert = False
fgInd = 2;
else:
fgInd = 0
self.labWdg.config(fg=self.fgList[fgInd])
self.expTimer.setValue(newValue=self.timeLeft, newMin=0, newMax=total)
return
def run(self, sr):
self.record()
def end(self, sr):
pass
# self.sopModel.doApogeeScience_sequenceState.removeCallback(self.seqState)
# self.apogeeModel.utrReadState.removeCallback(self.utrState)
# Key("doApogeeScience_expTime", Float(help="exposure time", units="sec"), Float(help="default", units="sec")),
# Key("doApogeeScience_sequenceState", String(help="full exposure sequence. Basically ditherSeq * seqCount"),
# Int(help="index of running exposure")),
| {
"repo_name": "r-owen/stui",
"path": "TUI/Scripts/APOGEE/Timer.py",
"copies": "1",
"size": "4828",
"license": "bsd-3-clause",
"hash": -2246721755276038700,
"line_mean": 36.1384615385,
"line_max": 114,
"alpha_frac": 0.6371168186,
"autogenerated": false,
"ratio": 3.1148387096774193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9179586653046126,
"avg_score": 0.014473775046258823,
"num_lines": 130
} |
"""A timer for BOSS exposures
History:
2013-04-20 EM: bossTimer.py
2013-04-20 EM: added color and sound
2013-04-22 EM: added check button on/off sound, default on.
2012-04-24 EM: added ProgressBar
2012-05-17 EM: cut label text to just "bossTimer"
2013-08-20 EM: moved to STUI
2014-03-05 changed keyword name sopModel.doScience to sopModel.doBossScience for new sop
2014-11-05 EM fixed bug with initial keyword value before connection.
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
2015-11-05 ROwen Stop using dangerous bare "except:"
"""
import os.path
import time
import Tkinter
import RO.Astro.Tm
import RO.Comm
import RO.OS
import RO.Wdg
import TUI.Models
import TUI.PlaySound
SoundsDir = RO.OS.getResourceDir(TUI, "Sounds")
SoundFileName = "Glass.wav"
class ScriptClass(object):
def __init__(self, sr, ):
sr.master.winfo_toplevel().wm_resizable(True, True)
self.fgList=["DarkGrey", "ForestGreen", "Brown"]
soundFilePath = os.path.join(SoundsDir, SoundFileName)
self.soundPlayer = RO.Wdg.SoundPlayer(soundFilePath)
self.sr=sr
F1 = Tkinter.Frame(sr.master)
F1.grid(row=0, column=0, sticky="sn")
self.labWdg = RO.Wdg.Label(master=F1, text =" ", fg=self.fgList[0])
self.labWdg.grid(row=0, column=0, sticky="ns")
self.checkWdg = RO.Wdg.Checkbutton(master=F1, text ="", defValue=True, helpText="Play sound",)
self.checkWdg.grid(row=0, column=1, sticky="we")
self.expTimer = RO.Wdg.ProgressBar(master=sr.master, valueFormat="%5.2f", label=None)
self.expTimer.grid(row=1, column=0, sticky="ew")
sr.master.rowconfigure(0, weight=1)
sr.master.rowconfigure(1, weight=1)
sr.master.columnconfigure(0, weight=1)
self.minAlert = 300.0/60.0
self.secEnd=None
self.alert = True
self.fooTimer = RO.Comm.Generic.Timer()
self.wait=1
# self.fooTimer.start(self.wait, foo) # schedule self again
self.foo()
self.sopModel = TUI.Models.getModel("sop")
self.nExp0, self.nExp1 = self.sopModel.doBossScience_nExp[0:2]
self.expTotal= sr.getKeyVar(self.sopModel.doBossScience_expTime, ind=0, defVal=900)
# I evaluated the time of reading out as 80 sec
self.expTotal=self.expTotal+80
self.sopModel.doBossScience_nExp.addCallback(self.doScience_nExp, callNow=True)
def getTAITimeStr(self,):
'''' get timestamp'''
self.currPythonSeconds = RO.Astro.Tm.getCurrPySec()
self.currTAITuple = time.gmtime(self.currPythonSeconds - RO.Astro.Tm.getUTCMinusTAI())
self.taiTimeStr = time.strftime("%H:%M:%S", self.currTAITuple)
return self.taiTimeStr,self.currPythonSeconds
def doScience_nExp(self, keyVar):
'''callback function if the number of sop done or scheduler exposures changed'''
sr=self.sr
self.expTotal= sr.getKeyVar(self.sopModel.doBossScience_expTime, ind=0, defVal=900)
self.expTotal=self.expTotal+80
self.nExp0, self.nExp1 = keyVar[0:2]
if keyVar[0] == keyVar[1]: # end seq
self.secEnd = None
elif keyVar[0] !=self.nExp0 : # begin seq, or next exposure
tai, sec = self.getTAITimeStr()
self.secEnd = sec + (self.nExp1 - self.nExp0) * self.expTotal
elif keyVar[1] != self.nExp1: # modification in progress
self.secEnd = self.secEnd + (keyVar[1] - self.nExp1) * self.expTotal
else:
tai, sec = self.getTAITimeStr()
self.secEnd = sec + (self.nExp1 - self.nExp0) * self.expTotal
try:
newValue = (self.nExp1 - self.nExp0) * self.expTotal / 60.
newMax = self.nExp1 * self.expTotal / 60.
except Exception:
newValue=0
newMax=900
else:
self.expTimer.setValue(newValue=newValue, newMin=0, newMax=newMax)
self.foo()
def foo(self):
''' Russel's timer'''
self.fooTimer.cancel()
lab=" BOSS Timer: "
if self.secEnd is None:
self.labWdg.set("%s None " % (lab))
self.labWdg.config(fg=self.fgList[0])
else:
tai, sec = self.getTAITimeStr()
self.minLeft = (self.secEnd - sec) / 60.0
self.labWdg.set("%s %6.2f min " % (lab,self.minLeft))
if self.minLeft > self.minAlert:
fgInd = 1
self.alert = True
elif 0 < self.minLeft <= self.minAlert:
fgInd = 2
if self.alert:
self.alert = False
if self.checkWdg.getBool():
self.soundPlayer.play()
self.soundPlayer.play()
else:
fgInd = 0
self.labWdg.config(fg=self.fgList[fgInd])
self.expTimer.setValue(newValue=self.minLeft)
self.fooTimer.start(self.wait, self.foo) # schedule self again
def run(self, sr):
pass
def end(self, sr):
self.fooTimer.cancel()
| {
"repo_name": "r-owen/stui",
"path": "TUI/Scripts/BOSS/Timer.py",
"copies": "1",
"size": "5154",
"license": "bsd-3-clause",
"hash": -2323548263877116000,
"line_mean": 37.1777777778,
"line_max": 108,
"alpha_frac": 0.6076833527,
"autogenerated": false,
"ratio": 3.2152214597629443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9293221859147277,
"avg_score": 0.005936590663133427,
"num_lines": 135
} |
"""A time zone-aware DateTime field.
When saving, naive datetime objects are assumed to belong to the local time
zone and are converted to UTC. When loading from the database the naive datetime
objects are converted to UTC.
These field types require database support. MySQL 5 will not work.
"""
from datetime import datetime
from django.conf import settings
from django.db import models
from timezones.forms import TZDateTimeField as TZFormField, parse_tzdt
import pytz
# added by ARJ according to http://south.aeracode.org/wiki/MyFieldsDontWork
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^timezones\.fields\.TZDateTimeField"])
class TZDatetime(datetime):
def aslocaltimezone(self):
"""Returns the datetime in the local time zone."""
tz = pytz.timezone(settings.TIME_ZONE)
return self.astimezone(tz)
class TZDateTimeField(models.DateTimeField):
"""A DateTimeField that treats naive datetimes as local time zone."""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
"""Returns a time zone-aware datetime object.
This ignores Django's parsing since we need to re-implement most of it
for validating form fields anyway.
"""
value = parse_tzdt(value)
if value is None:
return value
return TZDatetime(value.year, value.month, value.day, value.hour,
value.minute, value.second, value.microsecond, tzinfo=value.tzinfo)
def formfield(self, **kwargs):
defaults = {'form_class': TZFormField}
defaults.update(kwargs)
return super(TZDateTimeField, self).formfield(**defaults)
| {
"repo_name": "aarontropy/django-datebook",
"path": "timezones/fields.py",
"copies": "1",
"size": "1755",
"license": "bsd-3-clause",
"hash": -4751013411956499000,
"line_mean": 32.7692307692,
"line_max": 80,
"alpha_frac": 0.6866096866,
"autogenerated": false,
"ratio": 4.398496240601504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03133776513444852,
"num_lines": 52
} |
"""A tiny python command line browser for sterkinekor."""
from setuptools import setup, find_packages
from codecs import open
from os import path
from sterpy import *
here = path.abspath(path.dirname(__file__))
try:
from pypandoc import convert
read_md = lambda f: convert(f, 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
read_md = lambda f: open(f, 'r').read()
setup(
name='ster-py',
version=sterpy.__VERSION__,
description='A python cli based sterkinekor browser, whatever, it needed to be done.',
long_description=read_md('README.md'),
url='https://github.com/spookyUnknownUser/ster-py',
author='spookyUnknownUser',
author_email='spookyUnknownUser@users.noreply.github.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='cli command python sterkinekor browser',
packages=find_packages(),
install_requires=['click', 'omdb', 'colorama', 'requests'],
entry_points={
'console_scripts': [
'ster-py=sterpy:main',
],
},
)
| {
"repo_name": "spookyUnknownUser/ster-py",
"path": "setup.py",
"copies": "1",
"size": "1299",
"license": "mit",
"hash": -2038899313034461400,
"line_mean": 29.2093023256,
"line_max": 90,
"alpha_frac": 0.6481909161,
"autogenerated": false,
"ratio": 3.700854700854701,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4849045616954701,
"avg_score": null,
"num_lines": null
} |
# a tiny Tkinter calculator (improved v.1.2)
# tested with Python25 vegaseat 01apr2007
"""
calculator has a layout like this ...
< display >
7 8 9 * C
4 5 6 / M->
1 2 3 - ->M
0 . = + neg
"""
import Tkinter as tk
def click(key):
global memory
if key == '=':
# avoid division by integer
if '/' in entry.get() and '.' not in entry.get():
entry.insert(tk.END, ".0")
# guard against the bad guys abusing eval()
str1 = "-+0123456789."
if entry.get()[0] not in str1:
entry.insert(tk.END, "first char not in " + str1)
# here comes the calculation part
try:
result = eval(entry.get())
entry.insert(tk.END, " = " + str(result))
except:
entry.insert(tk.END, "--> Error!")
elif key == 'C':
entry.delete(0, tk.END) # clear entry
elif key == '->M':
memory = entry.get()
# extract the result
if '=' in memory:
ix = memory.find('=')
memory = memory[ix+2:]
root.title('M=' + memory)
elif key == 'M->':
entry.insert(tk.END, memory)
elif key == 'neg':
if '=' in entry.get():
entry.delete(0, tk.END)
try:
if entry.get()[0] == '-':
entry.delete(0)
else:
entry.insert(0, '-')
except IndexError:
pass
else:
# previous calculation has been done, clear entry
if '=' in entry.get():
entry.delete(0, tk.END)
entry.insert(tk.END, key)
root = tk.Tk()
root.title("Tiny Tk Calculator")
btn_list = [
'7', '8', '9', '*', 'C',
'4', '5', '6', '/', 'M->',
'1', '2', '3', '-', '->M',
'0', '.', '=', '+', 'neg' ]
# create all buttons with a loop
r = 1
c = 0
for b in btn_list:
rel = 'ridge'
cmd = lambda x=b: click(x)
tk.Button(root,text=b,width=5,relief=rel,command=cmd).grid(row=r,column=c)
#print b, r, c # test
c += 1
if c > 4:
c = 0
r += 1
# use Entry widget for an editable display
entry = tk.Entry(root, width=33, bg="yellow")
entry.grid(row=0, column=0, columnspan=5)
root.mainloop()
| {
"repo_name": "sniemi/SamPy",
"path": "sandbox/src1/calc.py",
"copies": "1",
"size": "2210",
"license": "bsd-2-clause",
"hash": 6566794073251775000,
"line_mean": 25.3095238095,
"line_max": 78,
"alpha_frac": 0.4936651584,
"autogenerated": false,
"ratio": 3.1258840169731257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9090635378044953,
"avg_score": 0.005782759465634366,
"num_lines": 84
} |
''' A tiny web browser '''
import sys
from PyQt4 import QtGui, QtCore, QtWebKit
class TinySurfer(QtGui.QMainWindow):
''' A tiny web browser written in PyQt4 '''
def __init__(self, url):
super(TinySurfer, self).__init__()
### Window elements ###
prog_bar = QtGui.QProgressBar()
prog_bar.setMaximumWidth(120)
self.web_view = QtWebKit.QWebView(loadProgress=prog_bar.setValue,
loadFinished=prog_bar.hide,
loadStarted=prog_bar.show,
titleChanged=self.setWindowTitle)
toolbar = self.addToolBar("") # Only returns a QToolbar object if given a title...
for action in (QtWebKit.QWebPage.Back, QtWebKit.QWebPage.Forward, QtWebKit.QWebPage.Reload):
toolbar.addAction(self.web_view.pageAction(action))
url_line = QtGui.QLineEdit(returnPressed=lambda: self.web_view.setUrl(QtCore.QUrl.fromUserInput(url_line.text())))
url_line.setStyleSheet("font-size:15px;")
toolbar.addWidget(url_line)
search_bar = QtGui.QLineEdit(returnPressed=lambda: self.web_view.findText(search_bar.text()))
search_bar.hide()
status_bar = self.statusBar()
status_bar.addPermanentWidget(search_bar)
status_bar.addPermanentWidget(prog_bar)
self.setCentralWidget(self.web_view)
self.web_view.load(url)
# Handle QWebView events
self.web_view.urlChanged.connect(lambda u: url_line.setText(u.toString()))
self.web_view.urlChanged.connect(self.url_completer)
self.web_view.statusBarMessage.connect(status_bar.showMessage)
self.web_view.page().linkHovered.connect(lambda l: status_bar.showMessage(l, 3000))
self.web_view.settings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True)
# Keyboard shortcuts
QtGui.QShortcut(QtGui.QKeySequence.Find, self, activated=lambda: (search_bar.show(), search_bar.setFocus()))
QtGui.QShortcut("Esc", self, activated=lambda: (search_bar.hide(), self.web_view.setFocus()))
QtGui.QShortcut("Ctrl+Q", self, activated=self.close)
QtGui.QShortcut("Ctrl+F4", self, activated=self.close)
QtGui.QShortcut(QtGui.QKeySequence.Refresh, self, activated=self.web_view.reload)
QtGui.QShortcut(QtGui.QKeySequence.Back, self, activated=self.web_view.back)
QtGui.QShortcut(QtGui.QKeySequence.Forward, self, activated=self.web_view.forward)
QtGui.QShortcut(QtGui.QKeySequence.ZoomIn, self, activated=lambda: self.web_view.setZoomFactor(self.web_view.zoomFactor()+.2))
QtGui.QShortcut(QtGui.QKeySequence.ZoomOut, self, activated=lambda: self.web_view.setZoomFactor(self.web_view.zoomFactor()-.2))
QtGui.QShortcut("Ctrl+=", self, activated=lambda: self.web_view.setZoomFactor(1))
def url_completer(self):
''' Autocomplete functionality for URL bar '''
# Just use the history for now
str_list = [QtCore.QString(i.url().toString()) for i in self.web_view.history().items()]
return QtGui.QCompleter(QtCore.QStringList(str_list),
caseSensitivity=QtCore.Qt.CaseInsensitive)
if __name__ == "__main__":
APP = QtGui.QApplication(sys.argv)
if len(sys.argv) > 1:
URL = QtCore.QUrl.fromUserInput(sys.argv[1])
else:
URL = QtCore.QUrl('http://www.python.org')
BROWSER = TinySurfer(URL)
BROWSER.show()
sys.exit(APP.exec_())
| {
"repo_name": "LucasRMehl/TinySurfer",
"path": "TinySurfer.py",
"copies": "1",
"size": "3533",
"license": "mit",
"hash": 6478411829416603000,
"line_mean": 47.397260274,
"line_max": 135,
"alpha_frac": 0.654118313,
"autogenerated": false,
"ratio": 3.616171954964176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9747652044569164,
"avg_score": 0.004527644679002496,
"num_lines": 73
} |
"""atlang: An extensible static type system inside Python."""
import ast # Python standard library's abstract syntax module
import inspect # for accessing source code for functions
import textwrap # for stripping leading spaces
import cypy # helper functions
import six # Python 2-3 compatibility, e.g. metaclasses
# TODO: semver
class UsageError(Exception):
pass
class TypeError(Exception):
def __init__(self, message, location):
Exception.__init__(message)
self.location = location
# TODO: error pretty-printing
class TypeFormationError(Exception):
pass
class _TypeMetaclass(type): # here, type is Python's "type" builtin
def __getitem__(self, idx):
if _contains_ellipsis(idx): return _construct_incty(self, idx)
else: return _construct_ty(self, idx)
def _contains_ellipsis(idx):
if idx is Ellipsis: return True
elif isinstance(idx, tuple):
for item in idx:
if item is Ellipsis: return True
return False
def _construct_incty(tycon, inc_idx):
tycon.validate_inc_idx(inc_idx)
return IncompleteType(tycon, inc_idx)
def _construct_ty(tycon, idx):
tycon.validate_idx(idx)
return tycon(idx, True)
@six.add_metaclass(_TypeMetaclass)
class Type(object):
"""Base class for atlang types.
An atlang type is an instance of atlang.Type.
An atlang tycon is a subclass of atlang.Type.
"""
def __init__(self, idx, ok=False):
if not ok:
raise TypeFormationError(
"Types should not be constructed directly. Use tycon[idx].")
self.idx = idx
@classmethod
def validate_idx(cls, idx):
pass
@classmethod
def validate_inc_idx(cls, inc_idx):
pass
def __eq__(self, other):
return tycon(self) is tycon(other) and self.idx == other.idx
def __ne__(self, other):
return not self.__eq__(other)
def ana_FunctionDef_TopLevel(self, tree, static_env):
raise NotImplementedError("ana_FunctionDef_TopLevel not implemented.")
@classmethod
def syn_idx_FunctionDef_TopLevel(self, tree, static_env):
raise NotImplementedError("syn_idx_FunctionDef_TopLevel not implemented.")
def __call__(self, f):
raise TypeError("Non-FnType used as a top-level function decorator.")
class IncompleteType(object):
"""Represents an incomplete type, used for literal forms.
An incomplete type is constructed by providing an index
containing one or more ellipses (so the constructor need
not typically be called directly):
tycon[a, ..., b]
"""
def __init__(self, tycon, inc_idx):
self.tycon = tycon
self.inc_idx = inc_idx
def __call__(self, f):
if issubclass(self.tycon, FnType):
(ast, static_env) = _reflect_func(f)
idx = self.tycon.syn_idx_FunctionDef_TopLevel(ast, static_env)
return Fn(ast, static_env, _construct_ty(self.tycon, idx))
else:
raise TypeError("Incomplete non-FnType used as a top-level function decorator.")
def tycon(ty):
"""Returns the tycon of the provided type or incomplete type ."""
if isinstance(ty, Type):
return ty.__class__
elif isinstance(ty, IncompleteType):
return ty.tycon
else:
raise UsageError("Argument to tycon is not a type or incomplete type.")
def is_tycon(x):
"""Indicates whether the provided value is a tycon."""
return issubclass(x, Type)
class FnType(Type):
"""Base class for atlang function types."""
def __call__(self, f):
(tree, static_env) = _reflect_func(f)
self.ana_FunctionDef_TopLevel(tree, static_env)
return Fn(tree, static_env, self)
class Fn(object):
"""All atlang functions are instances of Fn."""
def __init__(self, tree, static_env, ty):
self.tree = tree
self.static_env = static_env
self.ty = ty
def _reflect_func(f):
source = textwrap.dedent(inspect.getsource(f))
tree = ast.parse(source).body[0] # ast.parse produces a Module initially
return (tree, StaticEnv.from_func(f))
class StaticEnv(object):
def __init__(self, closure, globals):
self.closure = closure
self.globals = globals
def __getitem__(self, item):
try: return self.closure[item]
except KeyError: return self.globals[item]
@classmethod
def from_func(cls, f):
closure = _func_closure(f)
globals = f.func_globals
return cls(closure, globals)
def _func_closure(f):
closure = f.func_closure
if closure is None: return {}
else: return dict(zip(f.func_code.co_freevars, (c.cell_contents for c in closure)))
| {
"repo_name": "atlang/atlang",
"path": "atlang/__init__.py",
"copies": "1",
"size": "4395",
"license": "mit",
"hash": -7340469608900118000,
"line_mean": 28.3,
"line_max": 86,
"alpha_frac": 0.6919226394,
"autogenerated": false,
"ratio": 3.5047846889952154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46967073283952154,
"avg_score": null,
"num_lines": null
} |
import urllib
from types import MethodType
from atlas.error import AtlasError
from atlas.portability import urlopen, urlencode, get_content_type, load_response
from atlas.utils import *
BASE_URL = 'https://atlas.metabroadcast.com/3.0/%s.json'
class API(object):
"""Atlas API"""
def __getattribute__(self, name):
fn = MethodType(makeFunc(name), self)
setattr(self, name, MethodType(fn, self))
return fn
def makeFunc(name):
def call(self, **kw):
"""
Makes a call to Atlas
name = Endpoint for Atlas queries
**kw = Query string arguments to be appended to the base URL
"""
url = BASE_URL % name
json = import_simplejson()
if kw:
if 'from_' in kw:
kw['from'] = kw.pop('from_')
url = url + '?' + urlencode(kw)
try:
response = urlopen(url)
except:
raise AtlasError("Atlas API IO error")
mime_type = get_content_type(response.info())
if (response and mime_type.startswith('application/') and mime_type.endswith('json')):
result = json.loads(load_response(response))
return result
else:
return None
return call
| {
"repo_name": "micrypt/atlas-client",
"path": "atlas/api.py",
"copies": "1",
"size": "1362",
"license": "mit",
"hash": 8000419465201347000,
"line_mean": 27.9787234043,
"line_max": 94,
"alpha_frac": 0.5866372981,
"autogenerated": false,
"ratio": 3.982456140350877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5069093438450877,
"avg_score": null,
"num_lines": null
} |
import os
import click
from ..core import Core
from ..errors import AdaLinkError
from ..programmers import JLink, STLink, RasPi2
class STLink_ATSAMD21G18(STLink):
# ATSAMD21G18-specific STLink-based programmer. Required to add custom
# wipe function, and to use the load_image command for programming (the
# flash write_image function doesn't seem to work because of OpenOCD bugs).
def __init__(self):
# Call base STLink initializer and set it up to program the ATSAMD21G18.
super(STLink_ATSAMD21G18, self).__init__(params='-f interface/stlink-v2.cfg ' \
'-c "set CHIPNAME at91samd21g18; set ENDIAN little; set CPUTAPID 0x0bc11477; source [find target/at91samdXX.cfg]"')
def wipe(self):
# Run OpenOCD command to wipe ATSAMD21G18 memory.
commands = [
'init',
'reset init',
'at91samd chip-erase',
'exit'
]
self.run_commands(commands)
def program(self, hex_files=[], bin_files=[]):
# Program the ATSAMD21G18 with the provided hex/bin files.
click.echo('WARNING: Make sure the provided hex/bin files are padded with ' \
'at least 64 bytes of blank (0xFF) data! This will work around a cache bug with OpenOCD 0.9.0.')
commands = [
'init',
'reset init'
]
# Program each hex file.
for f in hex_files:
f = self.escape_path(os.path.abspath(f))
commands.append('load_image {0} 0 ihex'.format(f))
# Program each bin file.
for f, addr in bin_files:
f = self.escape_path(os.path.abspath(f))
commands.append('load_image {0} 0x{1:08X} bin'.format(f, addr))
# Verify each hex file.
for f in hex_files:
f = self.escape_path(os.path.abspath(f))
commands.append('verify_image {0} 0 ihex'.format(f))
# Verify each bin file.
for f, addr in bin_files:
f = self.escape_path(os.path.abspath(f))
commands.append('verify_image {0} 0x{1:08X} bin'.format(f, addr))
commands.append('reset run')
commands.append('exit')
# Run commands.
output = self.run_commands(commands)
# Check that expected number of files were verified. Look for output lines
# that start with 'verified ' to signal OpenOCD output that the verification
# succeeded. Count up these lines and expect they match the number of
# programmed files.
verified = len(filter(lambda x: x.startswith('verified '), output.splitlines()))
if verified != (len(hex_files) + len(bin_files)):
raise AdaLinkError('Failed to verify all files were programmed!')
class RasPi2_ATSAMD21G18(RasPi2):
# ATSAMD21G18-specific Raspi2 native-based programmer. Required to add custom
# wipe function, and to use the load_image command for programming (the
# flash write_image function doesn't seem to work because of OpenOCD bugs).
def __init__(self):
# Call base Raspi initializer and set it up to program the ATSAMD21G18.
super(RasPi2_ATSAMD21G18, self).__init__(params='-f interface/raspberrypi2-native.cfg ' \
'-c "transport select swd; set CHIPNAME at91samd21g18; adapter_nsrst_delay 100; adapter_nsrst_assert_width 100; source [find target/at91samdXX.cfg]"')
def wipe(self):
# Run OpenOCD command to wipe ATSAMD21G18 memory.
commands = [
'init',
'reset init',
'at91samd chip-erase',
'exit'
]
self.run_commands(commands)
def program(self, hex_files=[], bin_files=[]):
# Program the ATSAMD21G18 with the provided hex/bin files.
click.echo('WARNING: Make sure the provided hex/bin files are padded with ' \
'at least 64 bytes of blank (0xFF) data! This will work around a cache bug with OpenOCD 0.9.0.')
commands = [
'init',
'reset init'
]
# Program each hex file.
for f in hex_files:
f = self.escape_path(os.path.abspath(f))
commands.append('load_image {0} 0 ihex'.format(f))
# Program each bin file.
for f, addr in bin_files:
f = self.escape_path(os.path.abspath(f))
commands.append('load_image {0} 0x{1:08X} bin'.format(f, addr))
# Verify each hex file.
for f in hex_files:
f = self.escape_path(os.path.abspath(f))
commands.append('verify_image {0} 0 ihex'.format(f))
# Verify each bin file.
for f, addr in bin_files:
f = self.escape_path(os.path.abspath(f))
commands.append('verify_image {0} 0x{1:08X} bin'.format(f, addr))
commands.append('reset run')
commands.append('exit')
# Run commands.
output = self.run_commands(commands)
# Check that expected number of files were verified. Look for output lines
# that start with 'verified ' to signal OpenOCD output that the verification
# succeeded. Count up these lines and expect they match the number of
# programmed files.
verified = len(filter(lambda x: x.startswith('verified '), output.splitlines()))
if verified != (len(hex_files) + len(bin_files)):
raise AdaLinkError('Failed to verify all files were programmed!')
class ATSAMD21G18(Core):
"""Atmel ATSAMD21G18 CPU."""
# Note that the docstring will be used as the short help description.
def __init__(self):
# Call base class constructor--MUST be done!
super(ATSAMD21G18, self).__init__()
def list_programmers(self):
"""Return a list of the programmer names supported by this CPU."""
return ['jlink', 'stlink', "raspi2"]
def create_programmer(self, programmer):
"""Create and return a programmer instance that will be used to program
the core. Must be implemented by subclasses!
"""
if programmer == 'jlink':
return JLink('Cortex-M0 r0p1, Little endian',
params='-device ATSAMD21G18 -if swd -speed 1000')
elif programmer == 'stlink':
return STLink_ATSAMD21G18()
elif programmer == 'raspi2':
return RasPi2_ATSAMD21G18()
def info(self, programmer):
"""Display info about the device."""
click.echo('Not implemented!')
| {
"repo_name": "adafruit/Adafruit_Adalink",
"path": "adalink/cores/atsamd21g18.py",
"copies": "1",
"size": "6600",
"license": "mit",
"hash": 9126105905442820000,
"line_mean": 41.8571428571,
"line_max": 162,
"alpha_frac": 0.6160606061,
"autogenerated": false,
"ratio": 3.658536585365854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47745971914658536,
"avg_score": null,
"num_lines": null
} |
# Atmel Programmer Case Design (For USBAsp v2)
# Gustave Granroth 03/28/2014
# Imports
import math
import os
import subprocess
# Standard conversion factors
INtoMM = 25.4
DEGtoRAD = math.pi/180
# Command storage holders (for block-applications of geometry)
global itemCount
itemCount = 1 # The item count we are at ... we combine all items together.
# Create our object and prepare for using BRL-CAD
objName = 'ProgrammerCase';
os.chdir(r"C:\users\<INSERT YOUR BINARY PATH HERE>\Documents\BRLCAD 7.24.0\bin")
subprocess.call("del " + objName + ".g", shell=True)
header = "mged.exe -c " + objName + ".g "
commandQueue = []
## Start of Library Code ##
# Creates an arbitrary 6-point shape.
def Arb6(x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4, x5, y5, z5, x6, y6, z6):
global itemCount
commandQueue.append('in shape' + str(itemCount) + '.s arb6 '
+ str(x1) + ' ' + str(y1) + ' ' + str(z1) + ' '
+ str(x2) + ' ' + str(y2) + ' ' + str(z2) + ' '
+ str(x3) + ' ' + str(y3) + ' ' + str(z3) + ' '
+ str(x4) + ' ' + str(y4) + ' ' + str(z4) + ' '
+ str(x5) + ' ' + str(y5) + ' ' + str(z5) + ' '
+ str(x6) + ' ' + str(y6) + ' ' + str(z6) + ' \n')
itemCount += 1
return itemCount - 1
# Creates an arbitrary 8-pointed shape.
def Arb8(x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4, x5, y5, z5, x6, y6, z6, x7, y7, z7, x8, y8, z8):
global itemCount
commandQueue.append('in shape' + str(itemCount) + '.s arb8 '
+ str(x1) + ' ' + str(y1) + ' ' + str(z1) + ' '
+ str(x2) + ' ' + str(y2) + ' ' + str(z2) + ' '
+ str(x3) + ' ' + str(y3) + ' ' + str(z3) + ' '
+ str(x4) + ' ' + str(y4) + ' ' + str(z4) + ' '
+ str(x5) + ' ' + str(y5) + ' ' + str(z5) + ' '
+ str(x6) + ' ' + str(y6) + ' ' + str(z6) + ' '
+ str(x7) + ' ' + str(y7) + ' ' + str(z7) + ' '
+ str(x8) + ' ' + str(y8) + ' ' + str(z8) + ' \n')
itemCount += 1
return itemCount - 1
# Creates a triangular wedge growing in the z-direction
def Wedge(x1, y1, x2, y2, x3, y3, height, zOffset):
return Arb6(x1, y1, zOffset, x2, y2, zOffset, x2, y2, height + zOffset, x1, y1, height + zOffset,
x3, y3, zOffset, x3, y3, height + zOffset)
# Creates a smooth wedge at x1, y1 pointing towards x2, y2 with height 'height', width 'width'
def SmoothWedge(x1, y1, z1, x2, y2, height, width):
global itemCount
commandQueue.append('in shape' + str(itemCount) + '.s rpc '
+ str(x1) + ' ' + str(y1) + ' ' + str(z1) + ' '
+ '0 0 ' + str(height) + ' '
+ str(x2) + ' ' + str(y2) + ' 0 '
+ str(width) + ' \n')
itemCount += 1
return itemCount - 1
# Creates a cylinder pointing in the z direction
def Cylinder(x, y, z, r, h):
global itemCount
commandQueue.append('in shape' + str(itemCount) + '.s rcc '
+ str(x) + ' '
+ str(y) + ' '
+ str(z) + ' 0 0 '
+ str(h) + ' '
+ str(r) + ' \n')
itemCount += 1
return itemCount - 1
# Create an "arbitrary cylinder" pointing from two points with radius r
def ArbCylinder(x, y, z, xx, yy, zz, r):
global itemCount
commandQueue.append('in shape' + str(itemCount) + '.s rcc '
+ str(x) + ' '
+ str(y) + ' '
+ str(z) + ' '
+ str(xx) + ' '
+ str(yy) + ' '
+ str(zz) + ' '
+ str(r) + ' \n')
itemCount += 1
return itemCount - 1
# Create a sphere
def Sphere(x, y, z, r):
global itemCount
commandQueue.append('in shape' + str(itemCount) + '.s sph '
+ str(x) + ' '
+ str(y) + ' '
+ str(z) + ' '
+ str(r) + ' \n')
itemCount += 1
return itemCount - 1
# Create a x-y-z oriented cube
def Cube(x, y, z, xw, yw, zw):
return Arb8(x, y, z, x + xw, y, z, x + xw, y, z + zw, x, y, z + zw,
x, y + yw, z, x + xw, y + yw, z, x + xw, y + yw, z + zw, x, y + yw, z + zw)
## Start of library manipulation code ##
# Clear the command list as a block.
def FlushList(text):
global commandQueue
if len(commandQueue) >= 0:
subprocess.call(header + ''.join(commandQueue))
commandQueue = []
print(text)
## Start of CSG operation code ##
# Performs the difference of one item to another item
def Difference(total, removal):
global itemCount
commandQueue.append('comb shape' + str(itemCount) + '.s u shape'
+ str(total) + '.s - shape' + str(removal) + '.s \n')
itemCount += 1
return itemCount - 1
# Unions two items together.
def Union(first, second):
global itemCount
commandQueue.append('comb shape' + str(itemCount) + '.s u shape'
+ str(first) + '.s u shape' + str(second) + '.s \n')
itemCount += 1
return itemCount - 1
# A very simple pipe (no fancy bend radius stuffs.
def Spipe(x, y, z, r1, r2, h):
return difference(cylinder(x, y, z, r2, h), cylinder(x, y, z - 0.1, r1, h + 0.2))
## Start of application code ##
wallThickness = 1.2; # mm
length = INtoMM*1.724
width = INtoMM*0.743
jumpWidth = INtoMM*0.095
jumpSep = INtoMM*0.078
jumpOffset = INtoMM*1.206
usbWidthLowX = INtoMM*0.135
usbWidthHighX = INtoMM*0.125
usbWidth = INtoMM*0.474
usbExtLength = INtoMM*0.163
lightWidthOffset = INtoMM*0.369
lightWidth = INtoMM*0.170
lightLength = INtoMM*0.160
usbHeight = INtoMM*0.20
underHeight = INtoMM*0.080
smallUpDown = INtoMM*0.050
intoDistance = INtoMM*0.100
# Main cube and cutouts
mainCube = Cube(0, -wallThickness, -wallThickness,
length + wallThickness, width + 2*wallThickness, usbHeight + underHeight + 2*wallThickness)
smallCutout = Cube(-wallThickness, 0, underHeight - smallUpDown,
length + wallThickness, intoDistance + wallThickness, smallUpDown*2)
smallCutoutTop = Cube(-wallThickness, -2*wallThickness, smallUpDown*2 + wallThickness,
length + 2*wallThickness, intoDistance + wallThickness, usbHeight + underHeight - smallUpDown*2)
insideCutout = Cube(-wallThickness, intoDistance, 0,
length + wallThickness, width - intoDistance, underHeight + usbHeight)
mainCutouts = Difference(Difference(Difference(mainCube, smallCutout), smallCutoutTop), insideCutout)
# Light Hole and slider hole
lightHole = Cube(length - jumpOffset, width - (lightWidthOffset + lightWidth), underHeight,
lightLength, lightWidth, usbHeight + 2*wallThickness)
sliderHole = Cube(-wallThickness, width - (jumpWidth + jumpSep), underHeight,
length - jumpOffset, jumpWidth, usbHeight + 2*wallThickness)
specialCutouts = Difference(Difference(mainCutouts, lightHole), sliderHole)
# USB Hole
usbHolder = Cube(length, usbWidthLowX - wallThickness, 0,
usbExtLength, usbWidth + 2*wallThickness, usbHeight + underHeight)
usbHole = Cube(length - wallThickness, usbWidthLowX, wallThickness,
usbExtLength + 2*wallThickness, usbWidth, usbHeight + underHeight - 2*wallThickness)
#Final object
finObject = Difference(Union(specialCutouts, usbHolder), usbHole)
FlushList('Done with describing the programmer')
# Group all combinations and apply a region specifier for rendering
subprocess.call(header + 'r region1.r u shape' + str(finObject) + '.s \n');
# For fancy transparency (but slower) use "plastic {tr 0.5 re 0.2}"
subprocess.call(header + 'mater region1.r plastic 0 255 0 0\n')
# At this point, open mged to check your results using your .g input file
# Call 'draw region1.r', 'ae 45 45' 'rt -s 900'
# l-click, zoom out , r-click, zoom in
# 'exit' to quit. | {
"repo_name": "GuMiner/Python-BRLCAD-Interface",
"path": "example-programmer.py",
"copies": "1",
"size": "7464",
"license": "mit",
"hash": 6867379496540566000,
"line_mean": 34.2122641509,
"line_max": 105,
"alpha_frac": 0.6038317256,
"autogenerated": false,
"ratio": 2.7400881057268722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3843919831326872,
"avg_score": null,
"num_lines": null
} |
""" ATM_FORUM_TC_MIB
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AtmservicecategoryEnum(Enum):
"""
AtmservicecategoryEnum
ATM Service Categories use this data type
.. data:: other = 1
.. data:: cbr = 2
.. data:: rtVbr = 3
.. data:: nrtVbr = 4
.. data:: abr = 5
.. data:: ubr = 6
"""
other = 1
cbr = 2
rtVbr = 3
nrtVbr = 4
abr = 5
ubr = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_FORUM_TC_MIB as meta
return meta._meta_table['AtmservicecategoryEnum']
class TruthvalueEnum(Enum):
"""
TruthvalueEnum
Boolean values use this data type from RFC\-1903
.. data:: true = 1
.. data:: false = 2
"""
true = 1
false = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_FORUM_TC_MIB as meta
return meta._meta_table['TruthvalueEnum']
| {
"repo_name": "111pontes/ydk-py",
"path": "cisco-ios-xe/ydk/models/cisco_ios_xe/ATM_FORUM_TC_MIB.py",
"copies": "1",
"size": "1116",
"license": "apache-2.0",
"hash": -6826419721329018000,
"line_mean": 12.95,
"line_max": 79,
"alpha_frac": 0.5958781362,
"autogenerated": false,
"ratio": 3.3214285714285716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9342105997699088,
"avg_score": 0.015040141985896701,
"num_lines": 80
} |
#AtmGrid.py
import _pickle as pickle
import re, glob, os
import numpy as np
def split_file(file_m, ametal, path):
i = 0
with open(file_m) as f:
for line in f:
line = line.strip()
m1 = re.search(r'TEFF (\d*\.) GRAVITY (\d*\.\d*) LTE.*', line)
m2 = re.search(r'EFF (\d*\.) GRAVITY (\d*\.\d*) LTE.*', line)
if m1 or m2:
if i > 0:
new_file.close()
if m1:
Teff = float(m1.group(1))
logg = float(m1.group(2))
else:
Teff = float(m2.group(1))
logg = float(m2.group(2))
new_file = open(os.path.join(path, '%s_%s_%s.dat' %
(ametal, str(int(Teff)), str(logg))), 'w')
i += 1
try:
new_file.writelines(line + '\n')
except UnboundLocalError:
pass
try:
new_file.close()
except UnboundLocalError:
pass
def read_file(f, MT, N):
tabp = np.ones((MT, N))*np.nan
try:
mod = open(f)
except FileNotFoundError:
return tabp
i = 0
for line in mod:
m = re.search('\A[0-9]\.[0-9]*', line)
if m:
columnas = line.split()
for j in range(N):
try:
tabp[i][j] = float(columnas[j])
except IndexError:
break
i += 1
del columnas
mod.close()
return tabp
def create_grid_file(file_grid):
print('\t\tCreating the atmospheric model grid. This will be done just once.')
T = np.array([3500., 3750., 4000., 4250., 4500., 4750., 5000., 5250., 5500.,\
5750., 6000., 6250., 6500., 6750., 7000., 7250., 7500., 7750.,\
8000., 8250., 8500., 8750., 9000., 9250., 9500., 9750.,\
10000., 10250., 10500., 10750., 11000., 11250., 11500., 11750.,\
12000., 12250., 12500., 12750., 13000., 14000., 15000.])
G = np.linspace(0., 5., 11)
XM = np.array([-5.0, -4.5, -4.0, -3.5, -3.0, -2.5, -2.0, -1.5, -1.0, -0.5, -0.3,\
-0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.5, 1.0])
Tgg = []
Ggg = []
Mgg = []
ii, jj, kk = len(T), len(G), len(XM)
for i in range(ii):
for j in range(jj):
for k in range(kk):
Tgg.append(T[i])
Ggg.append(G[j])
Mgg.append(XM[k])
MT = 72
N = 9
col0 = []
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
for t,g,feh in zip(Tgg, Ggg, Mgg):
signo='p'
if feh < 0:
signo = 'm'
else:
signp = 'p'
avar = str(int(abs(feh*10)))
if abs(feh*10) <= 5.:
ametal = signo + '0' + avar
else:
ametal = signo + avar
path = './atm_models/atlas9/grids/grid' + ametal
path_odfnew = path + 'odfnew'
if os.path.isdir(path_odfnew):
path_file = path_odfnew
if len(glob.glob(path_file + '/' + ametal + '*.dat')) == 0:
files_m = glob.glob(path_file + '/a' + ametal + '*odfnew.dat')[0]
split_file(files_m, ametal, path_file)
else:
path_file = path
if len(glob.glob(path_file + '/' + ametal + '*.dat')) == 0:
files_m = glob.glob(path_file + '/a' + ametal + '*.dat')[0]
split_file(files_m, ametal, path_file)
tab = read_file(os.path.join(path_file, '%s_%s_%.1f.dat' %
(ametal, str(int(t)), g)), MT, N)
col0.append(tab.T[0])
col1.append(tab.T[1])
col2.append(tab.T[2])
col3.append(tab.T[3])
col4.append(tab.T[4])
col5.append(tab.T[5])
col6.append(tab.T[6])
del tab
Tgg = np.array(Tgg)
Mgg = np.array(Mgg)
Ggg = np.array(Ggg)
col0 = np.array(col0)
col1 = np.array(col1)
col2 = np.array(col2)
col3 = np.array(col3)
col4 = np.array(col4)
col5 = np.array(col5)
col6 = np.array(col6)
dic = {'tgrid': Tgg, 'ggrid': Ggg, 'mgrid': Mgg,
'col0': col0, 'col1': col1, 'col2': col2, 'col3': col3,\
'col4': col4, 'col5': col5, 'col6': col6}
with open(file_grid, 'wb') as grid:
pickle.dump(dic, grid)
del T, G, XM, Tgg, Mgg, Ggg, col0, col1, col2, col3, col4, col5, col6, dic
grid = None
# Check if grid file exists
file_grid = './atm_models/atlas9/ATLAS9_grid.pickle'
if not os.path.isfile(file_grid):
create_grid_file(file_grid)
with open(file_grid, 'rb') as f:
grid = pickle.load(f)
| {
"repo_name": "msotov/SPECIES",
"path": "AtmosGrid.py",
"copies": "1",
"size": "4781",
"license": "mit",
"hash": 2748870351980825600,
"line_mean": 27.9757575758,
"line_max": 87,
"alpha_frac": 0.4540891027,
"autogenerated": false,
"ratio": 2.9403444034440342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3894433506144034,
"avg_score": null,
"num_lines": null
} |
## Atmospheric Refraction Model
import numpy as np
import scipy.interpolate
import astropy.time
import astropy.coordinates
import astropy.units as u
import specsim
import tpcorr.distortion
# helper function for shifting angles to desired range
def normalize_angle(angle):
while angle <= -180:
angle += 360
while angle > 180:
angle -= 360
return angle
class Pointing(object):
"""Represents an observer pointing at a fixed sky location.
"""
def __init__(self, ra_center, dec_center):
self.plate_center = astropy.coordinates.ICRS(ra=ra_center, dec=dec_center)
self.plate_fid = astropy.coordinates.ICRS(ra=ra_center, dec=dec_center + 1.5 * u.deg)
self.platescale = 217.7358 * u.mm / u.deg
self.wlen0 = 5500 * u.Angstrom
self.relhum = 0.2
try:
self.where = astropy.coordinates.EarthLocation.of_site('apo')
except astropy.coordinates.errors.UnknownSiteException:
self.where = astropy.coordinates.EarthLocation(lat=32.7797556*u.deg, lon=-(105+49./60.+13/3600.)*u.deg, height=2797*u.m)
self.distortion_model = tpcorr.distortion.get_optical_distortion_model(330.0 * u.mm, self.platescale)
self.chromatic_model = tpcorr.distortion.get_chromatic_distortion_model(self.platescale)
def transform(self, targets, tai, wlen, temperature, pressure, do_chromatic_distortion=True, extrap_wlen=False):
"""Transform from sky coordinates to focal plane coordinates.
Args:
targets: astropy.coordinates.SkyCoord
tai: float or numpy.ndarray
wlen: astropy.units.quantity.Quantity
temperature: astropy.units.quantity.Quantity
pressure: astropy.units.quantity.Quantity or None to calculate based
on temperature and elevation
Returns:
tuple x, y of astropy.units.quantity.Quantity objects giving focal plane
positions in length units, broadcast over the input args.
"""
# Initialize time objects from the input TAI values in MJD seconds.
when = astropy.time.Time(tai/86400., format='mjd', scale='tai', location=self.where)
# Calculate the Alt-Az path of the telescope boresight at the plate design wavelength (5500A).
obs_model0 = specsim.transform.create_observing_model(
where=self.where, when=when, wavelength=self.wlen0, pressure=pressure,
temperature=temperature, relative_humidity=self.relhum)
altaz0 = specsim.transform.sky_to_altaz(self.plate_center, obs_model0)
alt0, az0 = altaz0.alt, altaz0.az
# Calculate the Alt-Az paths of each target over the input wavelength grid.
obs_model = specsim.transform.create_observing_model(
where=self.where, when=when, wavelength=wlen, pressure=pressure,
temperature=temperature, relative_humidity=self.relhum)
altaz = specsim.transform.sky_to_altaz(targets, obs_model)
# Convert each target's Alt-Az into local X, Y focal plane coordinates.
x, y = specsim.transform.altaz_to_focalplane(
altaz.alt, altaz.az, altaz0.alt, altaz0.az, self.platescale)
# Flip y to match the handedness of the XFOCAL, YFOCAL coordinate system.
y = -y
# Rotate the focal plane so that +y points towards a point that is offset from
# the plate center along DEC by +1.5 degrees.
altaz_fid = specsim.transform.sky_to_altaz(self.plate_fid, obs_model0)
x_fid, y_fid = specsim.transform.altaz_to_focalplane(
altaz_fid.alt, altaz_fid.az, altaz0.alt, altaz0.az, self.platescale)
angle = np.arctan2(x_fid.si, -y_fid.si)
cos_angle = np.cos(angle)
sin_angle = np.sin(angle)
x_rot = x * cos_angle - y * sin_angle
y_rot = x * sin_angle + y * cos_angle
# Apply radial optical distortions.
r = np.sqrt(x_rot**2 + y_rot**2)
distortion = ((r + self.distortion_model(r)) / r).si
x_dist = distortion * x_rot
y_dist = distortion * y_rot
if do_chromatic_distortion:
r_dist = np.sqrt(x_dist**2 + y_dist**2)
dr5000 = self.chromatic_model(r_dist, 5000, extrap_wlen)
dr = np.empty_like(r_dist)
# ugh broadcasting...
if wlen.isscalar:
dr = self.chromatic_model(r_dist, wlen, extrap_wlen) - dr5000
elif len(wlen.shape) == 2:
for iw,w in enumerate(wlen.flatten()):
dr[:,iw] = self.chromatic_model(r_dist[:,iw], w.to(u.Angstrom).value, extrap_wlen) - dr5000[:,iw]
chromatic_distortion = ((r_dist + dr) / r_dist).si
x_dist = chromatic_distortion * x_dist
y_dist = chromatic_distortion * y_dist
return x_dist, y_dist, altaz.alt, altaz.az
def hour_angle(self, tai):
"""Convert TAI to the hour angle of this plate's RA"""
when = astropy.time.Time(tai/86400., format='mjd', scale='tai', location=self.where)
return when.sidereal_time('apparent') - self.plate_center.ra
if __name__ == '__main__':
p = Pointing(ra_center=180*u.deg, dec_center=25*u.deg)
| {
"repo_name": "dmargala/tpcorr",
"path": "tpcorr/pointing.py",
"copies": "1",
"size": "5269",
"license": "mit",
"hash": -1439445773586322700,
"line_mean": 43.6525423729,
"line_max": 132,
"alpha_frac": 0.6314291137,
"autogenerated": false,
"ratio": 3.3818998716302953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9465527173859968,
"avg_score": 0.009560362294065422,
"num_lines": 118
} |
"""Atmospheric soundings.
--- NOTATION ---
The following letters will be used throughout this module.
T = number of lead times
n = number of storm objects
p = number of pressure levels, not including surface
P = number of pressure levels, including surface
F = number of sounding fields
N = number of soundings = T*n
"""
import os.path
import numpy
import pandas
import netCDF4
from scipy.interpolate import interp1d as scipy_interp1d
from gewittergefahr.gg_io import grib_io
from gewittergefahr.gg_io import netcdf_io
from gewittergefahr.gg_utils import geodetic_utils
from gewittergefahr.gg_utils import nwp_model_utils
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import interp
from gewittergefahr.gg_utils import moisture_conversions
from gewittergefahr.gg_utils import temperature_conversions
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
TIME_FORMAT_IN_FILE_NAMES = '%Y-%m-%d-%H%M%S'
MB_TO_PASCALS = 100
PASCALS_TO_MB = 0.01
PERCENT_TO_UNITLESS = 0.01
PRESSURE_LEVEL_KEY = 'pressure_level_mb'
LEAD_TIME_KEY = 'lead_time_seconds'
LAG_TIME_KEY = 'lag_time_for_convective_contamination_sec'
INITIAL_TIME_COLUMN = 'init_time_unix_sec'
FORECAST_TIME_COLUMN = 'forecast_time_unix_sec'
FULL_IDS_KEY = 'full_storm_id_strings'
INITIAL_TIMES_KEY = 'init_times_unix_sec'
LEAD_TIMES_KEY = 'lead_times_seconds'
STORM_ELEVATIONS_KEY = 'storm_elevations_m_asl'
SOUNDING_MATRIX_KEY = 'sounding_matrix'
SURFACE_PRESSURES_KEY = 'surface_pressures_mb'
PRESSURE_LEVELS_WITH_SFC_KEY = 'pressure_levels_with_surface_mb'
HEIGHT_LEVELS_KEY = 'height_levels_m_agl'
FIELD_NAMES_KEY = 'field_names'
GEOPOTENTIAL_HEIGHT_NAME = nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS
RELATIVE_HUMIDITY_NAME = 'relative_humidity_unitless'
TEMPERATURE_NAME = nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS
U_WIND_NAME = nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS
V_WIND_NAME = nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS
SPECIFIC_HUMIDITY_NAME = nwp_model_utils.SPFH_COLUMN_FOR_SOUNDINGS
VIRTUAL_POTENTIAL_TEMPERATURE_NAME = 'virtual_potential_temperature_kelvins'
PRESSURE_NAME = 'pressure_pascals'
VALID_FIELD_NAMES = [
GEOPOTENTIAL_HEIGHT_NAME, RELATIVE_HUMIDITY_NAME, TEMPERATURE_NAME,
U_WIND_NAME, V_WIND_NAME, SPECIFIC_HUMIDITY_NAME,
VIRTUAL_POTENTIAL_TEMPERATURE_NAME, PRESSURE_NAME
]
FIELD_NAME_TO_VERBOSE_DICT = {
GEOPOTENTIAL_HEIGHT_NAME: 'Geopotential height (m)',
RELATIVE_HUMIDITY_NAME: 'Relative humidity',
TEMPERATURE_NAME: 'Temperature (K)',
U_WIND_NAME: r'$u$-wind (m s$^{-1}$)',
V_WIND_NAME: r'$v$-wind (m s$^{-1}$)',
SPECIFIC_HUMIDITY_NAME: r'Specific humidity (kg kg$^{-1}$)',
VIRTUAL_POTENTIAL_TEMPERATURE_NAME: 'Virtual potential temperature (K)',
PRESSURE_NAME: 'Pressure (Pa)'
}
FIELD_NAME_TO_VERBOSE_UNITLESS_DICT = {
GEOPOTENTIAL_HEIGHT_NAME: 'Geopotential height',
RELATIVE_HUMIDITY_NAME: 'Relative humidity',
TEMPERATURE_NAME: 'Temperature',
U_WIND_NAME: r'$u$-wind',
V_WIND_NAME: r'$v$-wind',
SPECIFIC_HUMIDITY_NAME: 'Specific humidity',
VIRTUAL_POTENTIAL_TEMPERATURE_NAME: 'Virtual potential temperature',
PRESSURE_NAME: 'Pressure'
}
STORM_OBJECT_DIMENSION_KEY = 'storm_object'
FIELD_DIMENSION_KEY = 'field'
HEIGHT_DIMENSION_KEY = 'height_level'
STORM_ID_CHAR_DIMENSION_KEY = 'storm_id_character'
FIELD_NAME_CHAR_DIMENSION_KEY = 'field_name_character'
# Field names for MetPy.
PRESSURE_COLUMN_METPY = 'pressures_mb'
TEMPERATURE_COLUMN_METPY = 'temperatures_deg_c'
DEWPOINT_COLUMN_METPY = 'dewpoints_deg_c'
U_WIND_COLUMN_METPY = 'u_winds_kt'
V_WIND_COLUMN_METPY = 'v_winds_kt'
DEFAULT_LEAD_TIMES_SEC = numpy.array([0], dtype=int)
DEFAULT_LAG_TIME_FOR_CONVECTIVE_CONTAMINATION_SEC = 1800
DEFAULT_HEIGHT_LEVELS_M_AGL = numpy.linspace(0, 12000, num=49, dtype=int)
def _get_nwp_fields_for_sounding(
model_name, return_table, include_surface=False,
minimum_pressure_mb=0.):
"""Returns list of NWP fields needed to create sounding.
:param model_name: Model name (must be accepted by
`nwp_model_utils.check_model_name`).
:param return_table: Boolean flag. See below for how this affects output.
:param include_surface: Boolean flag. If True, this method will return the
"surface" (2-metre or 10-metre) level for each field.
:param minimum_pressure_mb: Leave this alone.
:return: sounding_field_names: [None if return_table = True]
length-F list with names of sounding fields (GewitterGefahr format).
:return: sounding_field_names_grib1: [None if return_table = True]
length-F list with names of sounding fields (grib1 format).
:return: sounding_field_name_table: [None if return_table = False]
pandas DataFrame with the following columns. Each row is one pressure
level. Only one of "relative_humidity_percent" and "specific_humidity"
(depending on the NWP model) will be present.
sounding_field_name_table.geopotential_height_metres: Name of geopotential-
height field.
sounding_field_name_table.temperature_kelvins: Name of temperature field.
sounding_field_name_table.relative_humidity_percent: Name of humidity field.
sounding_field_name_table.specific_humidity: Name of humidity field.
sounding_field_name_table.u_wind_m_s01: Name of u-wind field.
sounding_field_name_table.v_wind_m_s01: Name of v-wind field.
sounding_field_name_table.pressure_level_mb: Pressure level (millibars).
The surface is denoted by NaN.
"""
nwp_model_utils.check_model_name(model_name)
error_checking.assert_is_boolean(return_table)
error_checking.assert_is_geq(minimum_pressure_mb, 0.)
error_checking.assert_is_boolean(include_surface)
pressure_levels_no_surface_mb = nwp_model_utils.get_pressure_levels(
model_name=model_name, grid_name=nwp_model_utils.NAME_OF_130GRID
).astype(float)
pressure_levels_no_surface_mb = pressure_levels_no_surface_mb[
pressure_levels_no_surface_mb >= minimum_pressure_mb
]
if include_surface:
pressure_levels_with_surface_mb = numpy.concatenate((
pressure_levels_no_surface_mb, numpy.array([numpy.nan])
))
else:
pressure_levels_with_surface_mb = pressure_levels_no_surface_mb + 0.
num_pressure_levels_no_surface = len(pressure_levels_no_surface_mb)
num_pressure_levels_with_surface = len(pressure_levels_with_surface_mb)
field_names, field_names_grib1 = (
nwp_model_utils.get_columns_in_sounding_table(model_name)
)
num_fields = len(field_names)
sounding_field_name_table = None
sounding_field_names = []
sounding_field_names_grib1 = []
if return_table:
sounding_field_name_dict = {
PRESSURE_LEVEL_KEY: pressure_levels_with_surface_mb
}
list_of_empty_strings = [''] * num_pressure_levels_with_surface
for j in range(num_fields):
sounding_field_name_dict.update({
field_names[j]: list_of_empty_strings
})
sounding_field_name_table = pandas.DataFrame.from_dict(
sounding_field_name_dict)
for j in range(num_fields):
for k in range(num_pressure_levels_no_surface):
this_field_name = '{0:s}_{1:d}mb'.format(
field_names[j],
int(numpy.round(pressure_levels_no_surface_mb[k]))
)
if return_table:
sounding_field_name_table[field_names[j]].values[k] = (
this_field_name
)
else:
this_field_name_grib1 = '{0:s}:{1:d} mb'.format(
field_names_grib1[j],
int(numpy.round(pressure_levels_no_surface_mb[k]))
)
sounding_field_names.append(this_field_name)
sounding_field_names_grib1.append(this_field_name_grib1)
if not include_surface:
continue
if field_names[j] == GEOPOTENTIAL_HEIGHT_NAME:
this_field_name, this_field_name_grib1 = (
nwp_model_utils.get_lowest_height_name(model_name)
)
if field_names[j] == TEMPERATURE_NAME:
this_field_name, this_field_name_grib1 = (
nwp_model_utils.get_lowest_temperature_name(model_name)
)
if field_names[j] in [nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS,
SPECIFIC_HUMIDITY_NAME]:
this_field_name, this_field_name_grib1 = (
nwp_model_utils.get_lowest_humidity_name(model_name)
)
if field_names[j] == U_WIND_NAME:
this_field_name, this_field_name_grib1 = (
nwp_model_utils.get_lowest_u_wind_name(model_name)
)
if field_names[j] == V_WIND_NAME:
this_field_name, this_field_name_grib1 = (
nwp_model_utils.get_lowest_v_wind_name(model_name)
)
if return_table:
sounding_field_name_table[field_names[j]].values[
num_pressure_levels_with_surface - 1
] = this_field_name
else:
sounding_field_names.append(this_field_name)
sounding_field_names_grib1.append(this_field_name_grib1)
if return_table or not include_surface:
return (sounding_field_names, sounding_field_names_grib1,
sounding_field_name_table)
this_field_name, this_field_name_grib1 = (
nwp_model_utils.get_lowest_pressure_name(model_name)
)
sounding_field_names.append(this_field_name)
sounding_field_names_grib1.append(this_field_name_grib1)
return (sounding_field_names, sounding_field_names_grib1,
sounding_field_name_table)
def _create_target_points_for_interp(storm_object_table, lead_times_seconds):
"""Creates target points for interpolation.
Each target point consists of (latitude, longitude, time).
:param storm_object_table: pandas DataFrame with columns documented in
`storm_tracking_io.write_file`.
:param lead_times_seconds: 1-D numpy array of lead times (non-negative
integers). For each lead time t, each storm object will be extrapolated
t seconds into the future, along its estimated motion vector.
:return: target_point_table: pandas DataFrame with the following columns.
target_point_table.full_id_string: Full storm ID.
target_point_table.init_time_unix_sec: Initial time (storm time). Valid
time = initial time + lead time.
target_point_table.centroid_lat_deg: Latitude (deg N) of extrapolated storm
object's centroid.
target_point_table.centroid_lng_deg: Longitude (deg E) of extrapolated storm
object's centroid.
target_point_table.valid_time_unix_sec: Time of extrapolated storm object.
target_point_table.lead_time_seconds: Lead time used for extrapolation.
target_point_table.east_velocity_m_s01: Eastward component (metres per
second) of estimated storm-motion vector.
target_point_table.north_velocity_m_s01: Northward component.
"""
if numpy.any(lead_times_seconds > 0):
storm_speeds_m_s01, storm_bearings_deg = (
geodetic_utils.xy_to_scalar_displacements_and_bearings(
x_displacements_metres=
storm_object_table[tracking_utils.EAST_VELOCITY_COLUMN].values,
y_displacements_metres=
storm_object_table[tracking_utils.NORTH_VELOCITY_COLUMN].values)
)
num_storm_objects = len(storm_object_table.index)
num_lead_times = len(lead_times_seconds)
list_of_target_point_tables = [None] * num_lead_times
for i in range(num_lead_times):
if lead_times_seconds[i] == 0:
list_of_target_point_tables[i] = storm_object_table[[
tracking_utils.FULL_ID_COLUMN, tracking_utils.VALID_TIME_COLUMN,
tracking_utils.CENTROID_LATITUDE_COLUMN,
tracking_utils.CENTROID_LONGITUDE_COLUMN,
tracking_utils.EAST_VELOCITY_COLUMN,
tracking_utils.NORTH_VELOCITY_COLUMN
]]
argument_dict = {
LEAD_TIME_KEY: numpy.full(num_storm_objects, 0, dtype=int),
FORECAST_TIME_COLUMN: list_of_target_point_tables[i][
tracking_utils.VALID_TIME_COLUMN].values
}
list_of_target_point_tables[i] = (
list_of_target_point_tables[i].assign(**argument_dict)
)
if i == 0:
continue
list_of_target_point_tables[i] = (
list_of_target_point_tables[i].align(
list_of_target_point_tables[0], axis=1
)[0]
)
continue
these_extrap_latitudes_deg, these_extrap_longitudes_deg = (
geodetic_utils.start_points_and_displacements_to_endpoints(
start_latitudes_deg=storm_object_table[
tracking_utils.CENTROID_LATITUDE_COLUMN].values,
start_longitudes_deg=storm_object_table[
tracking_utils.CENTROID_LONGITUDE_COLUMN].values,
scalar_displacements_metres=
storm_speeds_m_s01 * lead_times_seconds[i],
geodetic_bearings_deg=storm_bearings_deg)
)
these_times_unix_sec = (
storm_object_table[tracking_utils.VALID_TIME_COLUMN].values +
lead_times_seconds[i]
)
this_dict = {
tracking_utils.FULL_ID_COLUMN:
storm_object_table[tracking_utils.FULL_ID_COLUMN].values,
tracking_utils.VALID_TIME_COLUMN:
storm_object_table[tracking_utils.VALID_TIME_COLUMN].values,
tracking_utils.CENTROID_LATITUDE_COLUMN: these_extrap_latitudes_deg,
tracking_utils.CENTROID_LONGITUDE_COLUMN:
these_extrap_longitudes_deg,
FORECAST_TIME_COLUMN: these_times_unix_sec,
tracking_utils.EAST_VELOCITY_COLUMN:
storm_object_table[tracking_utils.EAST_VELOCITY_COLUMN].values,
tracking_utils.NORTH_VELOCITY_COLUMN:
storm_object_table[tracking_utils.NORTH_VELOCITY_COLUMN].values,
LEAD_TIME_KEY: numpy.full(
num_storm_objects, lead_times_seconds[i], dtype=int)
}
list_of_target_point_tables[i] = pandas.DataFrame.from_dict(this_dict)
if i == 0:
continue
list_of_target_point_tables[i] = list_of_target_point_tables[i].align(
list_of_target_point_tables[0], axis=1
)[0]
target_point_table = pandas.concat(
list_of_target_point_tables, axis=0, ignore_index=True)
column_dict_old_to_new = {
tracking_utils.VALID_TIME_COLUMN: INITIAL_TIME_COLUMN
}
return target_point_table.rename(
columns=column_dict_old_to_new, inplace=False)
def _interp_soundings_from_nwp(
target_point_table, top_grib_directory_name, include_surface,
model_name, use_all_grids, grid_id, wgrib_exe_name, wgrib2_exe_name,
raise_error_if_missing):
"""Interpolates soundings from NWP model to target points.
Each target point consists of (latitude, longitude, time).
:param target_point_table: pandas DataFrame created by
`_create_target_points_for_interp`.
:param top_grib_directory_name: See doc for
`interp.interp_nwp_from_xy_grid`.
:param include_surface: See doc for `_get_nwp_fields_for_sounding`.
:param model_name: See doc for `interp.interp_nwp_from_xy_grid`.
:param use_all_grids: Same.
:param grid_id: Same.
:param wgrib_exe_name: Same.
:param wgrib2_exe_name: Same.
:param raise_error_if_missing: Same.
:return: interp_table: pandas DataFrame, where each column is one field and
each row is one target point. Column names are from the list
"""
sounding_field_names, sounding_field_names_grib1 = (
_get_nwp_fields_for_sounding(
model_name=model_name, return_table=False,
include_surface=include_surface
)[:2]
)
return interp.interp_nwp_from_xy_grid(
query_point_table=target_point_table, field_names=sounding_field_names,
field_names_grib1=sounding_field_names_grib1, model_name=model_name,
top_grib_directory_name=top_grib_directory_name,
use_all_grids=use_all_grids, grid_id=grid_id,
temporal_interp_method_string=interp.PREV_NEIGHBOUR_METHOD_STRING,
spatial_interp_method_string=interp.NEAREST_NEIGHBOUR_METHOD_STRING,
wgrib_exe_name=wgrib_exe_name, wgrib2_exe_name=wgrib2_exe_name,
raise_error_if_missing=raise_error_if_missing)
def _convert_interp_table_to_soundings(
interp_table, target_point_table, model_name, include_surface=False,
minimum_pressure_mb=0.):
"""Converts table of interpolated values to list of soundings.
:param interp_table: N-row pandas DataFrame created by
`_interp_soundings_from_nwp`.
:param target_point_table: N-row pandas DataFrame created by
`_create_target_points_for_interp`.
:param model_name: Model name (must be accepted by
`nwp_model_utils.check_model_name`).
:param include_surface: See doc for `_get_nwp_fields_for_sounding`.
:param minimum_pressure_mb: Same.
:return: sounding_dict_pressure_coords: Dictionary with the following keys.
sounding_dict_pressure_coords['full_storm_id_strings']: length-N list of
full IDs.
sounding_dict_pressure_coords['init_times_unix_sec']: length-N numpy array
of initial times (storm times). Valid time = initial time + lead time.
sounding_dict_pressure_coords['lead_times_seconds']: length-N numpy array of
lead times.
sounding_dict_pressure_coords['sounding_matrix']: N-by-P-by-F numpy array of
sounding values.
sounding_dict_pressure_coords['surface_pressures_mb']: length-N numpy array
with surface pressure (millibars) for each storm object. If
`include_surface = False`, this is `None`.
sounding_dict_pressure_coords['pressure_levels_mb']: length-P numpy array of
pressure levels (millibars). The surface is denoted by NaN.
sounding_dict_pressure_coords['field_names']: length-F list of field names.
"""
sounding_field_name_table = _get_nwp_fields_for_sounding(
model_name=model_name, return_table=True,
include_surface=include_surface,
minimum_pressure_mb=minimum_pressure_mb
)[-1]
if include_surface:
surface_pressure_name = nwp_model_utils.get_lowest_pressure_name(
model_name
)[0]
surface_pressures_mb = (
PASCALS_TO_MB * interp_table[surface_pressure_name].values
)
else:
surface_pressures_mb = None
field_names = list(sounding_field_name_table)
field_names.remove(PRESSURE_LEVEL_KEY)
pressure_levels_with_surface_mb = sounding_field_name_table[
PRESSURE_LEVEL_KEY
].values
num_fields = len(field_names)
num_pressure_levels = len(sounding_field_name_table.index)
num_storm_objects = len(interp_table.index)
sounding_matrix = numpy.full(
(num_storm_objects, num_pressure_levels, num_fields), numpy.nan
)
for j in range(num_pressure_levels):
for k in range(num_fields):
this_field_name = (
sounding_field_name_table[field_names[k]].values[j]
)
sounding_matrix[:, j, k] = interp_table[this_field_name].values
return {
FULL_IDS_KEY:
target_point_table[tracking_utils.FULL_ID_COLUMN].values.tolist(),
INITIAL_TIMES_KEY: target_point_table[INITIAL_TIME_COLUMN].values,
LEAD_TIMES_KEY: target_point_table[LEAD_TIME_KEY].values,
SOUNDING_MATRIX_KEY: sounding_matrix,
SURFACE_PRESSURES_KEY: surface_pressures_mb,
PRESSURE_LEVELS_WITH_SFC_KEY: pressure_levels_with_surface_mb,
FIELD_NAMES_KEY: field_names
}
def _get_pressures(sounding_dict):
"""Returns pressure levels in soundings.
:param sounding_dict: Dictionary created by
`_convert_interp_table_to_soundings` or `_pressure_to_height_coords`.
:return: pressure_matrix_pascals: N-by-P numpy array of pressures.
"""
if PRESSURE_LEVELS_WITH_SFC_KEY not in sounding_dict:
pressure_index = sounding_dict[FIELD_NAMES_KEY].index(PRESSURE_NAME)
return sounding_dict[SOUNDING_MATRIX_KEY][..., pressure_index]
num_storm_objects = sounding_dict[SOUNDING_MATRIX_KEY].shape[0]
num_pressure_levels = sounding_dict[SOUNDING_MATRIX_KEY].shape[1]
pressure_matrix_pascals = numpy.full(
(num_storm_objects, num_pressure_levels), numpy.nan)
for i in range(num_storm_objects):
pressure_matrix_pascals[i, :] = sounding_dict[
PRESSURE_LEVELS_WITH_SFC_KEY]
if sounding_dict[SURFACE_PRESSURES_KEY] is not None:
surface_index = numpy.where(
numpy.isnan(sounding_dict[PRESSURE_LEVELS_WITH_SFC_KEY]))[0][0]
pressure_matrix_pascals[:, surface_index] = sounding_dict[
SURFACE_PRESSURES_KEY]
return MB_TO_PASCALS * pressure_matrix_pascals
def _relative_to_specific_humidity(sounding_dict, pressure_matrix_pascals):
"""Converts relative to specific humidity in each sounding.
:param sounding_dict: Dictionary created by
`_convert_interp_table_to_soundings` or `_pressure_to_height_coords`.
:param pressure_matrix_pascals: N-by-P numpy array of pressures.
:return: sounding_dict: Same as input, with the following exceptions.
[1] contains specific humidity
[2] relative humidity is in 0...1, rather than a percentage
:return: dewpoint_matrix_kelvins: N-by-P numpy array of dewpoints.
"""
field_names = sounding_dict[FIELD_NAMES_KEY]
sounding_matrix = sounding_dict[SOUNDING_MATRIX_KEY]
temperature_index = field_names.index(TEMPERATURE_NAME)
if nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS in field_names:
relative_humidity_index = field_names.index(
nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS)
sounding_matrix[..., relative_humidity_index] = (
PERCENT_TO_UNITLESS * sounding_matrix[..., relative_humidity_index]
)
field_names[relative_humidity_index] = RELATIVE_HUMIDITY_NAME
else:
relative_humidity_index = field_names.index(RELATIVE_HUMIDITY_NAME)
dewpoint_matrix_kelvins = (
moisture_conversions.relative_humidity_to_dewpoint(
relative_humidities=sounding_matrix[..., relative_humidity_index],
temperatures_kelvins=sounding_matrix[..., temperature_index],
total_pressures_pascals=pressure_matrix_pascals
)
)
spec_humidity_matrix_kg_kg01 = (
moisture_conversions.dewpoint_to_specific_humidity(
dewpoints_kelvins=dewpoint_matrix_kelvins,
temperatures_kelvins=sounding_matrix[..., temperature_index],
total_pressures_pascals=pressure_matrix_pascals
)
)
if SPECIFIC_HUMIDITY_NAME in field_names:
sounding_matrix[
..., field_names.index(SPECIFIC_HUMIDITY_NAME)
] = spec_humidity_matrix_kg_kg01
else:
field_names.append(SPECIFIC_HUMIDITY_NAME)
spec_humidity_matrix_kg_kg01 = numpy.reshape(
spec_humidity_matrix_kg_kg01,
spec_humidity_matrix_kg_kg01.shape + (1,)
)
sounding_matrix = numpy.concatenate(
(sounding_matrix, spec_humidity_matrix_kg_kg01), axis=-1
)
sounding_dict[FIELD_NAMES_KEY] = field_names
sounding_dict[SOUNDING_MATRIX_KEY] = sounding_matrix
return sounding_dict, dewpoint_matrix_kelvins
def _specific_to_relative_humidity(sounding_dict, pressure_matrix_pascals):
"""Converts specific to relative humidity in each sounding.
:param sounding_dict: Dictionary created by
`_convert_interp_table_to_soundings` or `_pressure_to_height_coords`.
:param pressure_matrix_pascals: N-by-P numpy array of pressures.
:return: sounding_dict: Same as input, but including relative humidity.
:return: dewpoint_matrix_kelvins: N-by-P numpy array of dewpoints.
"""
field_names = sounding_dict[FIELD_NAMES_KEY]
sounding_matrix = sounding_dict[SOUNDING_MATRIX_KEY]
temperature_index = field_names.index(TEMPERATURE_NAME)
specific_humidity_index = field_names.index(SPECIFIC_HUMIDITY_NAME)
dewpoint_matrix_kelvins = (
moisture_conversions.specific_humidity_to_dewpoint(
specific_humidities_kg_kg01=
sounding_matrix[..., specific_humidity_index],
temperatures_kelvins=sounding_matrix[..., temperature_index],
total_pressures_pascals=pressure_matrix_pascals
)
)
relative_humidity_matrix = (
moisture_conversions.dewpoint_to_relative_humidity(
dewpoints_kelvins=dewpoint_matrix_kelvins,
temperatures_kelvins=sounding_matrix[..., temperature_index],
total_pressures_pascals=pressure_matrix_pascals
)
)
if RELATIVE_HUMIDITY_NAME in field_names:
sounding_matrix[
..., field_names.index(RELATIVE_HUMIDITY_NAME)
] = relative_humidity_matrix
else:
field_names.append(RELATIVE_HUMIDITY_NAME)
relative_humidity_matrix = numpy.reshape(
relative_humidity_matrix, relative_humidity_matrix.shape + (1,)
)
sounding_matrix = numpy.concatenate(
(sounding_matrix, relative_humidity_matrix), axis=-1
)
sounding_dict[FIELD_NAMES_KEY] = field_names
sounding_dict[SOUNDING_MATRIX_KEY] = sounding_matrix
return sounding_dict, dewpoint_matrix_kelvins
def _get_virtual_potential_temperatures(
sounding_dict, pressure_matrix_pascals, dewpoint_matrix_kelvins):
"""Adds virtual potential temperature to each sounding.
:param sounding_dict: Dictionary created by
`_convert_interp_table_to_soundings` or `_pressure_to_height_coords`.
:param pressure_matrix_pascals: N-by-P numpy array of pressures.
:param dewpoint_matrix_kelvins: N-by-P numpy array of dewpoints.
:return: sounding_dict: Same as input, but including virtual potential
temperature.
"""
field_names = sounding_dict[FIELD_NAMES_KEY]
sounding_matrix = sounding_dict[SOUNDING_MATRIX_KEY]
temperature_index = field_names.index(TEMPERATURE_NAME)
vapour_pressure_matrix_pascals = (
moisture_conversions.dewpoint_to_vapour_pressure(
dewpoints_kelvins=dewpoint_matrix_kelvins,
temperatures_kelvins=sounding_matrix[..., temperature_index],
total_pressures_pascals=pressure_matrix_pascals
)
)
virtual_temperature_matrix_kelvins = (
moisture_conversions.temperature_to_virtual_temperature(
temperatures_kelvins=sounding_matrix[..., temperature_index],
total_pressures_pascals=pressure_matrix_pascals,
vapour_pressures_pascals=vapour_pressure_matrix_pascals
)
)
theta_v_matrix_kelvins = (
temperature_conversions.temperatures_to_potential_temperatures(
temperatures_kelvins=virtual_temperature_matrix_kelvins,
total_pressures_pascals=pressure_matrix_pascals)
)
if VIRTUAL_POTENTIAL_TEMPERATURE_NAME in field_names:
sounding_matrix[
..., field_names.index(VIRTUAL_POTENTIAL_TEMPERATURE_NAME)
] = theta_v_matrix_kelvins
else:
field_names.append(VIRTUAL_POTENTIAL_TEMPERATURE_NAME)
theta_v_matrix_kelvins = numpy.reshape(
theta_v_matrix_kelvins, theta_v_matrix_kelvins.shape + (1,)
)
sounding_matrix = numpy.concatenate(
(sounding_matrix, theta_v_matrix_kelvins), axis=-1
)
sounding_dict[FIELD_NAMES_KEY] = field_names
sounding_dict[SOUNDING_MATRIX_KEY] = sounding_matrix
return sounding_dict
def _fill_nans_in_soundings(
sounding_dict_pressure_coords, pressure_matrix_pascals,
min_num_pressure_levels_without_nan=15):
"""Interpolates to fill NaN's in each sounding.
:param sounding_dict_pressure_coords: See doc for
`_convert_interp_table_to_soundings`.
:param pressure_matrix_pascals: N-by-P numpy array of pressures.
:param min_num_pressure_levels_without_nan: Minimum number of pressure
levels without NaN. For a given sounding S, if any field has fewer
pressure levels without NaN, S will be thrown out.
:return: sounding_dict_pressure_coords: Same as input, with the following
exceptions.
[1] maybe fewer soundings
[2] NaN's have been replaced
"""
# TODO(thunderhoser): Remove surface pressure of NaN.
field_names = sounding_dict_pressure_coords[FIELD_NAMES_KEY]
sounding_matrix = sounding_dict_pressure_coords[SOUNDING_MATRIX_KEY]
height_index = field_names.index(GEOPOTENTIAL_HEIGHT_NAME)
num_soundings = sounding_matrix.shape[0]
keep_sounding_flags = numpy.full(num_soundings, True, dtype=bool)
field_names_to_interp = [
GEOPOTENTIAL_HEIGHT_NAME, U_WIND_NAME, V_WIND_NAME, TEMPERATURE_NAME,
SPECIFIC_HUMIDITY_NAME
]
for i in range(num_soundings):
for this_field_name in field_names_to_interp:
this_field_index = field_names.index(this_field_name)
these_real_flags = numpy.invert(numpy.isnan(
sounding_matrix[i, :, this_field_index]
))
if numpy.all(these_real_flags):
continue
if (numpy.sum(these_real_flags) <
min_num_pressure_levels_without_nan):
keep_sounding_flags[i] = False
break
these_nan_indices = numpy.where(numpy.invert(these_real_flags))[0]
these_real_indices = numpy.where(these_real_flags)[0]
if this_field_name == GEOPOTENTIAL_HEIGHT_NAME:
interp_object = scipy_interp1d(
x=numpy.log(pressure_matrix_pascals[i, these_real_indices]),
y=sounding_matrix[i, these_real_indices, this_field_index],
kind='linear', bounds_error=False, fill_value='extrapolate',
assume_sorted=False)
sounding_matrix[i, these_nan_indices, this_field_index] = (
interp_object(
numpy.log(pressure_matrix_pascals[i, these_nan_indices])
)
)
else:
interp_object = scipy_interp1d(
x=sounding_matrix[i, these_real_indices, height_index],
y=sounding_matrix[i, these_real_indices, this_field_index],
kind='linear', bounds_error=False, fill_value='extrapolate',
assume_sorted=False)
sounding_matrix[i, these_nan_indices, this_field_index] = (
interp_object(
sounding_matrix[i, these_nan_indices, height_index]
)
)
keep_sounding_indices = numpy.where(keep_sounding_flags)[0]
sounding_dict_pressure_coords[SOUNDING_MATRIX_KEY] = (
sounding_matrix[keep_sounding_indices, ...]
)
sounding_dict_pressure_coords[FULL_IDS_KEY] = [
sounding_dict_pressure_coords[FULL_IDS_KEY][i]
for i in keep_sounding_indices
]
sounding_dict_pressure_coords[INITIAL_TIMES_KEY] = (
sounding_dict_pressure_coords[INITIAL_TIMES_KEY][keep_sounding_indices]
)
sounding_dict_pressure_coords[LEAD_TIMES_KEY] = (
sounding_dict_pressure_coords[LEAD_TIMES_KEY][keep_sounding_indices]
)
if sounding_dict_pressure_coords[SURFACE_PRESSURES_KEY] is not None:
sounding_dict_pressure_coords[SURFACE_PRESSURES_KEY] = (
sounding_dict_pressure_coords[SURFACE_PRESSURES_KEY][
keep_sounding_indices]
)
return sounding_dict_pressure_coords
def _convert_fields_and_units(sounding_dict_pressure_coords):
"""Converts fields and units in each sounding.
:param sounding_dict_pressure_coords: See doc for
`_convert_interp_table_to_soundings`.
:return: sounding_dict_pressure_coords: Same as input, but with different
fields and units.
"""
pressure_matrix_pascals = _get_pressures(sounding_dict_pressure_coords)
found_rh = (
nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS in
sounding_dict_pressure_coords[FIELD_NAMES_KEY]
)
if found_rh:
sounding_dict_pressure_coords, dewpoint_matrix_kelvins = (
_relative_to_specific_humidity(
sounding_dict=sounding_dict_pressure_coords,
pressure_matrix_pascals=pressure_matrix_pascals)
)
sounding_dict_pressure_coords = _fill_nans_in_soundings(
sounding_dict_pressure_coords=sounding_dict_pressure_coords,
pressure_matrix_pascals=pressure_matrix_pascals)
pressure_matrix_pascals = _get_pressures(sounding_dict_pressure_coords)
if found_rh:
field_names = sounding_dict_pressure_coords[FIELD_NAMES_KEY]
sounding_matrix = sounding_dict_pressure_coords[SOUNDING_MATRIX_KEY]
specific_humidity_index = field_names.index(SPECIFIC_HUMIDITY_NAME)
temperature_index = field_names.index(TEMPERATURE_NAME)
dewpoint_matrix_kelvins = (
moisture_conversions.specific_humidity_to_dewpoint(
specific_humidities_kg_kg01=
sounding_matrix[..., specific_humidity_index],
temperatures_kelvins=sounding_matrix[..., temperature_index],
total_pressures_pascals=pressure_matrix_pascals
)
)
else:
sounding_dict_pressure_coords, dewpoint_matrix_kelvins = (
_specific_to_relative_humidity(
sounding_dict=sounding_dict_pressure_coords,
pressure_matrix_pascals=pressure_matrix_pascals
)
)
return _get_virtual_potential_temperatures(
sounding_dict=sounding_dict_pressure_coords,
pressure_matrix_pascals=pressure_matrix_pascals,
dewpoint_matrix_kelvins=dewpoint_matrix_kelvins
)
def _pressure_to_height_coords(
sounding_dict_pressure_coords, height_levels_m_agl):
"""Converts soundings from pressure coords to ground-relative height coords.
:param sounding_dict_pressure_coords: Dictionary created by
`_convert_fields_and_units`, but with additional keys listed below.
sounding_dict_pressure_coords['storm_elevations_m_asl']: length-N numpy
array of storm elevations (metres above sea level).
:param height_levels_m_agl: length-H numpy array of height levels (integer
metres above ground level). This method will interpolate each sounding
to said heights, and the output soundings will be in ground-relative
height coords rather than pressure coords.
:return: sounding_dict_height_coords: Dictionary with the following keys.
sounding_dict_height_coords['full_storm_id_strings']: length-N list of full
IDs.
sounding_dict_height_coords['init_times_unix_sec']: length-N numpy array of
initial times (storm times). Valid time = initial time + lead time.
sounding_dict_height_coords['lead_times_seconds']: length-N numpy array of
lead times.
sounding_dict_height_coords['storm_elevations_m_asl']: length-N numpy array
of storm elevations (metres above sea level).
sounding_dict_height_coords['sounding_matrix']: N-by-P-by-F numpy array of
sounding values.
sounding_dict_height_coords['height_levels_m_agl']: length-H numpy array of
height levels (metres above ground level).
sounding_dict_height_coords['field_names']: length-F list of field names.
"""
error_checking.assert_is_numpy_array(height_levels_m_agl, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(height_levels_m_agl, 0)
height_levels_m_agl = numpy.round(height_levels_m_agl).astype(int)
sounding_dict_height_coords = {
FULL_IDS_KEY: sounding_dict_pressure_coords[FULL_IDS_KEY],
INITIAL_TIMES_KEY: sounding_dict_pressure_coords[INITIAL_TIMES_KEY],
LEAD_TIMES_KEY: sounding_dict_pressure_coords[LEAD_TIMES_KEY],
STORM_ELEVATIONS_KEY:
sounding_dict_pressure_coords[STORM_ELEVATIONS_KEY],
SOUNDING_MATRIX_KEY: sounding_dict_pressure_coords[SOUNDING_MATRIX_KEY],
HEIGHT_LEVELS_KEY: height_levels_m_agl,
FIELD_NAMES_KEY: sounding_dict_pressure_coords[FIELD_NAMES_KEY]
}
field_names = sounding_dict_pressure_coords[FIELD_NAMES_KEY]
orig_sounding_matrix = sounding_dict_pressure_coords[SOUNDING_MATRIX_KEY]
storm_elevations_m_asl = sounding_dict_pressure_coords[STORM_ELEVATIONS_KEY]
height_index = field_names.index(GEOPOTENTIAL_HEIGHT_NAME)
orig_height_matrix_m_asl = orig_sounding_matrix[..., height_index] + 0.
pressure_matrix_pascals = _get_pressures(sounding_dict_pressure_coords)
field_names[height_index] = PRESSURE_NAME
orig_sounding_matrix[..., height_index] = pressure_matrix_pascals + 0.
field_names_to_interp = [
PRESSURE_NAME, TEMPERATURE_NAME, U_WIND_NAME, V_WIND_NAME
]
if RELATIVE_HUMIDITY_NAME in field_names:
field_names_to_interp.append(RELATIVE_HUMIDITY_NAME)
else:
field_names_to_interp.append(SPECIFIC_HUMIDITY_NAME)
num_soundings = orig_sounding_matrix.shape[0]
num_fields = orig_sounding_matrix.shape[-1]
num_height_levels = len(height_levels_m_agl)
new_sounding_matrix = numpy.full(
(num_soundings, num_height_levels, num_fields), numpy.nan
)
pressure_index = field_names.index(PRESSURE_NAME)
for j in range(len(field_names_to_interp)):
this_field_index = field_names.index(field_names_to_interp[j])
for i in range(num_soundings):
if field_names_to_interp[j] == PRESSURE_NAME:
this_interp_object = scipy_interp1d(
x=orig_height_matrix_m_asl[i, ...],
y=numpy.log(orig_sounding_matrix[i, ..., this_field_index]),
kind='linear', bounds_error=False, fill_value='extrapolate',
assume_sorted=False)
new_sounding_matrix[i, ..., this_field_index] = numpy.exp(
this_interp_object(
storm_elevations_m_asl[i] + height_levels_m_agl)
)
else:
this_interp_object = scipy_interp1d(
x=orig_sounding_matrix[i, ..., pressure_index],
y=orig_sounding_matrix[i, ..., this_field_index],
kind='linear', bounds_error=False, fill_value='extrapolate',
assume_sorted=False)
new_sounding_matrix[i, ..., this_field_index] = (
this_interp_object(
new_sounding_matrix[i, ..., pressure_index]
)
)
sounding_dict_height_coords[FIELD_NAMES_KEY] = field_names
sounding_dict_height_coords[SOUNDING_MATRIX_KEY] = new_sounding_matrix
pressure_matrix_pascals = _get_pressures(sounding_dict_height_coords)
if RELATIVE_HUMIDITY_NAME in field_names:
sounding_dict_height_coords, dewpoint_matrix_kelvins = (
_relative_to_specific_humidity(
sounding_dict=sounding_dict_height_coords,
pressure_matrix_pascals=pressure_matrix_pascals)
)
else:
sounding_dict_height_coords, dewpoint_matrix_kelvins = (
_specific_to_relative_humidity(
sounding_dict=sounding_dict_height_coords,
pressure_matrix_pascals=pressure_matrix_pascals)
)
rh_index = field_names.index(RELATIVE_HUMIDITY_NAME)
sounding_matrix = sounding_dict_height_coords[SOUNDING_MATRIX_KEY]
sounding_matrix[..., rh_index] = numpy.maximum(
sounding_matrix[..., rh_index], 0.
)
sounding_matrix[..., rh_index] = numpy.minimum(
sounding_matrix[..., rh_index], 1.
)
sounding_dict_height_coords[SOUNDING_MATRIX_KEY] = sounding_matrix
return _get_virtual_potential_temperatures(
sounding_dict=sounding_dict_height_coords,
pressure_matrix_pascals=pressure_matrix_pascals,
dewpoint_matrix_kelvins=dewpoint_matrix_kelvins)
def check_field_name(field_name):
"""Error-checks name of sounding field.
:param field_name: Name of sounding field.
:raises: ValueError: if `field_name not in VALID_FIELD_NAMES`.
"""
error_checking.assert_is_string(field_name)
if field_name not in VALID_FIELD_NAMES:
error_string = (
'\n{0:s}\nValid field names (listed above) do not include "{1:s}".'
).format(str(VALID_FIELD_NAMES), field_name)
raise ValueError(error_string)
def field_name_to_verbose(field_name, include_units=True):
"""Converts field name from underscore-separated format to verbose.
:param field_name: Field name in default (underscore-separated) format.
:param include_units: Boolean flag. If True, verbose name will include
units.
:return: field_name_verbose: Verbose field name.
"""
error_checking.assert_is_boolean(include_units)
if include_units:
return FIELD_NAME_TO_VERBOSE_DICT[field_name]
return FIELD_NAME_TO_VERBOSE_UNITLESS_DICT[field_name]
def interp_soundings_to_storm_objects(
storm_object_table, top_grib_directory_name, model_name,
elevation_dir_name, use_all_grids=True, grid_id=None,
height_levels_m_agl=DEFAULT_HEIGHT_LEVELS_M_AGL,
lead_times_seconds=DEFAULT_LEAD_TIMES_SEC,
lag_time_for_convective_contamination_sec=
DEFAULT_LAG_TIME_FOR_CONVECTIVE_CONTAMINATION_SEC,
wgrib_exe_name=grib_io.WGRIB_EXE_NAME_DEFAULT,
wgrib2_exe_name=grib_io.WGRIB2_EXE_NAME_DEFAULT,
raise_error_if_missing=False):
"""Interpolates NWP sounding to each storm object at each lead time.
:param storm_object_table: pandas DataFrame with columns listed in
`storm_tracking_io.write_file`.
:param top_grib_directory_name: Name of top-level directory with grib files
for the given NWP model.
:param model_name: Model name (must be accepted by
`nwp_model_utils.check_grid_name`).
:param elevation_dir_name: Name of directory with elevation data (used by
the Python package "srtm").
:param use_all_grids: Boolean flag. If True, this method will interp from
the highest-resolution grid available at each model-initialization time.
If False, will use only `grid_id`.
:param grid_id: [used only if `use_all_grids = False`]
Grid ID (must be accepted by `nwp_model_utils.check_grid_name`).
:param height_levels_m_agl: 1-D numpy array of height levels (metres above
ground level). These will be the height levels in each sounding.
:param lead_times_seconds: length-T numpy array of lead times.
:param lag_time_for_convective_contamination_sec: Lag time (used to avoid
convective contamination of soundings, where the sounding for storm S is
heavily influenced by storm S). This will be subtracted from each lead
time.
:param wgrib_exe_name: Path to wgrib executable.
:param wgrib2_exe_name: Path to wgrib2 executable.
:param raise_error_if_missing: Boolean flag. If any grib file is missing
and `raise_error_if_missing = True`, this method will error out. If any
grib file is missing and `raise_error_if_missing = False`, this method
will carry on, leaving the affected values as NaN.
:return: sounding_dict_by_lead_time: length-T list of dictionaries, each
containing the keys listed in `_pressure_to_height_coords`.
"""
error_checking.assert_is_integer_numpy_array(lead_times_seconds)
error_checking.assert_is_numpy_array(lead_times_seconds, num_dimensions=1)
error_checking.assert_is_geq_numpy_array(lead_times_seconds, 0)
error_checking.assert_is_integer(lag_time_for_convective_contamination_sec)
error_checking.assert_is_geq(lag_time_for_convective_contamination_sec, 0)
print((
'Creating target point for each storm object and lead time ({0:s} '
'seconds)...'
).format(
str(lead_times_seconds)
))
target_point_table = _create_target_points_for_interp(
storm_object_table=storm_object_table,
lead_times_seconds=lead_times_seconds)
print((
'Subtracting lag time ({0:d} seconds) from each target point, to '
'account for convective contamination...'
).format(lag_time_for_convective_contamination_sec))
target_point_table[
FORECAST_TIME_COLUMN
] -= lag_time_for_convective_contamination_sec
column_dict_old_to_new = {
tracking_utils.CENTROID_LATITUDE_COLUMN: interp.QUERY_LAT_COLUMN,
tracking_utils.CENTROID_LONGITUDE_COLUMN: interp.QUERY_LNG_COLUMN,
FORECAST_TIME_COLUMN: interp.QUERY_TIME_COLUMN
}
target_point_table.rename(columns=column_dict_old_to_new, inplace=True)
print(SEPARATOR_STRING)
interp_table = _interp_soundings_from_nwp(
target_point_table=target_point_table,
top_grib_directory_name=top_grib_directory_name, include_surface=False,
model_name=model_name, use_all_grids=use_all_grids, grid_id=grid_id,
wgrib_exe_name=wgrib_exe_name, wgrib2_exe_name=wgrib2_exe_name,
raise_error_if_missing=raise_error_if_missing)
print(SEPARATOR_STRING)
print('Converting interpolated values to soundings...')
sounding_dict_pressure_coords = _convert_interp_table_to_soundings(
interp_table=interp_table, target_point_table=target_point_table,
model_name=model_name, include_surface=False)
print('Converting fields and units in each sounding...')
orig_num_soundings = len(sounding_dict_pressure_coords[FULL_IDS_KEY])
sounding_dict_pressure_coords = _convert_fields_and_units(
sounding_dict_pressure_coords)
num_soundings = len(sounding_dict_pressure_coords[FULL_IDS_KEY])
print('Removed {0:d} of {1:d} soundings (too many NaN''s).'.format(
orig_num_soundings - num_soundings, orig_num_soundings))
print('Finding elevation of each storm object...')
storm_elevations_m_asl = geodetic_utils.get_elevations(
latitudes_deg=storm_object_table[
tracking_utils.CENTROID_LATITUDE_COLUMN].values,
longitudes_deg=storm_object_table[
tracking_utils.CENTROID_LONGITUDE_COLUMN].values,
working_dir_name=elevation_dir_name
)
these_indices = tracking_utils.find_storm_objects(
all_id_strings=storm_object_table[
tracking_utils.FULL_ID_COLUMN].values.tolist(),
all_times_unix_sec=storm_object_table[
tracking_utils.VALID_TIME_COLUMN].values,
id_strings_to_keep=sounding_dict_pressure_coords[FULL_IDS_KEY],
times_to_keep_unix_sec=sounding_dict_pressure_coords[INITIAL_TIMES_KEY]
)
storm_elevations_m_asl = storm_elevations_m_asl[these_indices]
sounding_dict_pressure_coords.update({
STORM_ELEVATIONS_KEY: storm_elevations_m_asl
})
print('Converting soundings from pressure coords to metres AGL...\n')
sounding_dict_height_coords = _pressure_to_height_coords(
sounding_dict_pressure_coords=sounding_dict_pressure_coords,
height_levels_m_agl=height_levels_m_agl)
num_lead_times = len(lead_times_seconds)
sounding_dict_by_lead_time = [None] * num_lead_times
for k in range(num_lead_times):
print((
'Creating separate sounding dictionary for {0:d}-second lead '
'time...'
).format(lead_times_seconds[k]))
these_indices = numpy.where(
sounding_dict_height_coords[LEAD_TIMES_KEY] ==
lead_times_seconds[k]
)[0]
sounding_dict_by_lead_time[k] = {
FULL_IDS_KEY: [
sounding_dict_height_coords[FULL_IDS_KEY][i]
for i in these_indices
],
INITIAL_TIMES_KEY:
sounding_dict_height_coords[INITIAL_TIMES_KEY][these_indices],
LEAD_TIMES_KEY:
sounding_dict_height_coords[LEAD_TIMES_KEY][these_indices],
STORM_ELEVATIONS_KEY:
sounding_dict_height_coords[STORM_ELEVATIONS_KEY][
these_indices],
SOUNDING_MATRIX_KEY:
sounding_dict_height_coords[SOUNDING_MATRIX_KEY][
these_indices, ...],
HEIGHT_LEVELS_KEY: sounding_dict_height_coords[HEIGHT_LEVELS_KEY],
FIELD_NAMES_KEY: sounding_dict_height_coords[FIELD_NAMES_KEY]
}
print((
'Dictionary for {0:d}-second lead time contains {1:d} of {2:d} '
'soundings.'
).format(lead_times_seconds[k], len(these_indices), num_soundings))
return sounding_dict_by_lead_time
def write_soundings(
netcdf_file_name, sounding_dict_height_coords, lead_time_seconds,
lag_time_for_convective_contamination_sec):
"""Writes soundings to NetCDF file.
This file may contain soundings with one lead time only.
:param netcdf_file_name: Path to output file.
:param sounding_dict_height_coords: Dictionary created by
`interp_soundings_to_storm_objects`.
:param lead_time_seconds: Lead time for all soundings.
:param lag_time_for_convective_contamination_sec: Lag time for all soundings
(see doc for `interp_soundings_to_storm_objects`).
:raises: ValueError: if `sounding_dict_height_coords` contains more than one
unique lead time.
:raises: ValueError: if lead time in `sounding_dict_height_coords` does not
match the input arg `lead_time_seconds`.
"""
error_checking.assert_is_integer(lead_time_seconds)
error_checking.assert_is_geq(lead_time_seconds, 0)
error_checking.assert_is_integer(lag_time_for_convective_contamination_sec)
error_checking.assert_is_geq(lag_time_for_convective_contamination_sec, 0)
unique_lead_times_seconds = numpy.unique(
sounding_dict_height_coords[LEAD_TIMES_KEY]
)
if not numpy.all(unique_lead_times_seconds == lead_time_seconds):
error_string = (
'All lead times in sounding dictionary should be {0:d} seconds. '
'Instead, got lead times listed below.\n{1:s}'
).format(lead_time_seconds, str(unique_lead_times_seconds))
raise ValueError(error_string)
# Create file and set global attributes.
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
netcdf_dataset = netCDF4.Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
netcdf_dataset.setncattr(LEAD_TIME_KEY, lead_time_seconds)
netcdf_dataset.setncattr(
LAG_TIME_KEY, lag_time_for_convective_contamination_sec)
num_storm_objects = len(sounding_dict_height_coords[FULL_IDS_KEY])
num_height_levels = len(sounding_dict_height_coords[HEIGHT_LEVELS_KEY])
num_fields = len(sounding_dict_height_coords[FIELD_NAMES_KEY])
netcdf_dataset.createDimension(
STORM_OBJECT_DIMENSION_KEY, num_storm_objects)
netcdf_dataset.createDimension(HEIGHT_DIMENSION_KEY, num_height_levels)
netcdf_dataset.createDimension(FIELD_DIMENSION_KEY, num_fields)
id_lengths = [len(f) for f in sounding_dict_height_coords[FULL_IDS_KEY]]
num_id_characters = max(id_lengths + [1])
netcdf_dataset.createDimension(
STORM_ID_CHAR_DIMENSION_KEY, num_id_characters)
num_field_name_chars = max([
len(f) for f in sounding_dict_height_coords[FIELD_NAMES_KEY]
])
netcdf_dataset.createDimension(
FIELD_NAME_CHAR_DIMENSION_KEY, num_field_name_chars)
# Add storm IDs to file.
netcdf_dataset.createVariable(
FULL_IDS_KEY, datatype='S1',
dimensions=(STORM_OBJECT_DIMENSION_KEY, STORM_ID_CHAR_DIMENSION_KEY)
)
string_type = 'S{0:d}'.format(num_id_characters)
full_ids_char_array = netCDF4.stringtochar(numpy.array(
sounding_dict_height_coords[FULL_IDS_KEY], dtype=string_type
))
netcdf_dataset.variables[FULL_IDS_KEY][:] = numpy.array(full_ids_char_array)
# Add initial times (storm times) to file.
netcdf_dataset.createVariable(
INITIAL_TIMES_KEY, datatype=numpy.int32,
dimensions=STORM_OBJECT_DIMENSION_KEY)
netcdf_dataset.variables[INITIAL_TIMES_KEY][:] = (
sounding_dict_height_coords[INITIAL_TIMES_KEY]
)
# Add storm elevations to file.
netcdf_dataset.createVariable(
STORM_ELEVATIONS_KEY, datatype=numpy.float32,
dimensions=STORM_OBJECT_DIMENSION_KEY)
netcdf_dataset.variables[STORM_ELEVATIONS_KEY][:] = (
sounding_dict_height_coords[STORM_ELEVATIONS_KEY]
)
# Add height levels to file.
netcdf_dataset.createVariable(
HEIGHT_LEVELS_KEY, datatype=numpy.int32,
dimensions=HEIGHT_DIMENSION_KEY)
netcdf_dataset.variables[HEIGHT_LEVELS_KEY][:] = (
sounding_dict_height_coords[HEIGHT_LEVELS_KEY]
)
# Add field names to file.
netcdf_dataset.createVariable(
FIELD_NAMES_KEY, datatype='S1',
dimensions=(FIELD_DIMENSION_KEY, FIELD_NAME_CHAR_DIMENSION_KEY)
)
string_type = 'S{0:d}'.format(num_field_name_chars)
field_names_as_char_array = netCDF4.stringtochar(numpy.array(
sounding_dict_height_coords[FIELD_NAMES_KEY], dtype=string_type
))
netcdf_dataset.variables[FIELD_NAMES_KEY][:] = numpy.array(
field_names_as_char_array)
# Add soundings to file.
netcdf_dataset.createVariable(
SOUNDING_MATRIX_KEY, datatype=numpy.float32,
dimensions=(STORM_OBJECT_DIMENSION_KEY, HEIGHT_DIMENSION_KEY,
FIELD_DIMENSION_KEY)
)
netcdf_dataset.variables[SOUNDING_MATRIX_KEY][:] = (
sounding_dict_height_coords[SOUNDING_MATRIX_KEY]
)
netcdf_dataset.close()
def read_soundings(
netcdf_file_name, field_names_to_keep=None,
full_id_strings_to_keep=None, init_times_to_keep_unix_sec=None):
"""Reads soundings from NetCDF file.
K = number of storm objects to keep
If `full_id_strings_to_keep is None or init_times_to_keep_unix_sec is None`,
this method will return soundings for all storm objects. Otherwise, will
return only a subset of storm objects.
If `field_names_to_keep is None`, this method will return all sounding
fields. Otherwise, will return only a subset of fields.
:param netcdf_file_name: Path to input file.
:param field_names_to_keep: 1-D list with names of sounding fields.
:param full_id_strings_to_keep: length-K list of full IDs.
:param init_times_to_keep_unix_sec: length-K numpy array of initial times
(storm times).
:return: sounding_dict_height_coords: Dictionary with keys listed in
`_pressure_to_height_coords`.
:return: lag_time_for_convective_contamination_sec: See doc for
`interp_soundings_to_storm_objects`.
"""
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name=netcdf_file_name, raise_error_if_fails=True)
lead_time_seconds = getattr(netcdf_dataset, LEAD_TIME_KEY)
lag_time_for_convective_contamination_sec = int(getattr(
netcdf_dataset, LAG_TIME_KEY
))
height_levels_m_agl = numpy.array(
netcdf_dataset.variables[HEIGHT_LEVELS_KEY][:], dtype=int
)
field_names = netCDF4.chartostring(
netcdf_dataset.variables[FIELD_NAMES_KEY][:]
)
field_names = [str(f) for f in field_names]
if field_names_to_keep is None:
field_indices_to_keep = numpy.linspace(
0, len(field_names) - 1, num=len(field_names), dtype=int
)
else:
error_checking.assert_is_numpy_array(
numpy.array(field_names_to_keep), num_dimensions=1
)
for this_field_name in field_names_to_keep:
check_field_name(this_field_name)
field_indices_to_keep = numpy.array(
[field_names.index(f) for f in field_names_to_keep], dtype=int
)
field_names = field_names_to_keep + []
num_storm_objects = netcdf_dataset.variables[FULL_IDS_KEY].shape[0]
num_height_levels = len(height_levels_m_agl)
num_fields = len(field_names)
if num_storm_objects == 0:
full_id_strings = []
init_times_unix_sec = numpy.array([], dtype=int)
storm_elevations_m_asl = numpy.array([], dtype=float)
sounding_matrix = numpy.full(
(num_storm_objects, num_height_levels, num_fields), numpy.nan
)
else:
full_id_strings = netCDF4.chartostring(
netcdf_dataset.variables[FULL_IDS_KEY][:]
)
full_id_strings = [str(this_id) for this_id in full_id_strings]
init_times_unix_sec = numpy.array(
netcdf_dataset.variables[INITIAL_TIMES_KEY][:], dtype=int
)
storm_elevations_m_asl = numpy.array(
netcdf_dataset.variables[STORM_ELEVATIONS_KEY][:]
)
sounding_matrix = numpy.array(
netcdf_dataset.variables[SOUNDING_MATRIX_KEY][
..., field_indices_to_keep]
)
netcdf_dataset.close()
filter_storm_objects = (
full_id_strings_to_keep is not None and
init_times_to_keep_unix_sec is not None and
num_storm_objects != 0
)
if filter_storm_objects:
these_indices = tracking_utils.find_storm_objects(
all_id_strings=full_id_strings,
all_times_unix_sec=init_times_unix_sec,
id_strings_to_keep=full_id_strings_to_keep,
times_to_keep_unix_sec=init_times_to_keep_unix_sec,
allow_missing=True)
these_indices = these_indices[these_indices != -1]
full_id_strings = [full_id_strings[i] for i in these_indices]
init_times_unix_sec = init_times_unix_sec[these_indices]
storm_elevations_m_asl = storm_elevations_m_asl[these_indices]
sounding_matrix = sounding_matrix[these_indices, ...]
num_storm_objects = len(full_id_strings)
lead_times_seconds = numpy.full(
num_storm_objects, lead_time_seconds, dtype=int)
sounding_dict_height_coords = {
FULL_IDS_KEY: full_id_strings,
INITIAL_TIMES_KEY: init_times_unix_sec,
LEAD_TIMES_KEY: lead_times_seconds,
STORM_ELEVATIONS_KEY: storm_elevations_m_asl,
SOUNDING_MATRIX_KEY: sounding_matrix,
HEIGHT_LEVELS_KEY: height_levels_m_agl,
FIELD_NAMES_KEY: field_names
}
return (sounding_dict_height_coords,
lag_time_for_convective_contamination_sec)
def find_sounding_file(
top_directory_name, spc_date_string, lead_time_seconds,
lag_time_for_convective_contamination_sec, init_time_unix_sec=None,
raise_error_if_missing=True):
"""Finds NetCDF file created by `write_soundings`.
If `init_time_unix_sec is None`, this method will seek a file with all
soundings for one SPC date. Otherwise, will seek a file with soundings for
one time step.
:param top_directory_name: Name of top-level directory with sounding files.
:param spc_date_string: SPC date (format "yyyymmdd").
:param lead_time_seconds: Lead time.
:param lag_time_for_convective_contamination_sec: See doc for
`interp_soundings_to_storm_objects`.
:param init_time_unix_sec: Initial time (storm time).
:param raise_error_if_missing: Boolean flag. If file is missing and
`raise_error_if_missing = True`, this method will error out.
:return: sounding_file_name: Path to sounding file. If file is missing and
`raise_error_if_missing = False`, this is the *expected* path.
:raises: ValueError: if file is missing and `raise_error_if_missing = True`.
"""
error_checking.assert_is_string(top_directory_name)
time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_boolean(raise_error_if_missing)
if init_time_unix_sec is None:
sounding_file_name = (
'{0:s}/{1:s}/storm_soundings_{2:s}_lead-time-{3:05d}sec'
'_lag-time-{4:04d}sec.nc'
).format(
top_directory_name, spc_date_string[:4], spc_date_string,
lead_time_seconds, lag_time_for_convective_contamination_sec
)
else:
sounding_file_name = (
'{0:s}/{1:s}/{2:s}/storm_soundings_{3:s}_lead-time-{4:05d}sec'
'_lag-time-{5:04d}sec.nc'
).format(
top_directory_name, spc_date_string[:4], spc_date_string,
time_conversion.unix_sec_to_string(
init_time_unix_sec, TIME_FORMAT_IN_FILE_NAMES),
lead_time_seconds, lag_time_for_convective_contamination_sec
)
if raise_error_if_missing and not os.path.isfile(sounding_file_name):
error_string = (
'Cannot find file with soundings interpolated to storm objects. '
'Expected at: {0:s}'
).format(sounding_file_name)
raise ValueError(error_string)
return sounding_file_name
| {
"repo_name": "thunderhoser/GewitterGefahr",
"path": "gewittergefahr/gg_utils/soundings.py",
"copies": "1",
"size": "61534",
"license": "mit",
"hash": -3512333549687126500,
"line_mean": 39.6701916722,
"line_max": 80,
"alpha_frac": 0.6597815842,
"autogenerated": false,
"ratio": 3.3568272325568707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.451660881675687,
"avg_score": null,
"num_lines": null
} |
""" ATM_TC_MIB
This MIB Module provides Textual Conventions
and OBJECT\-IDENTITY Objects to be used by
ATM systems.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
from ydk.models.ietf.ietf_yang_smiv2 import ObjectIdentityIdentity
class AtmconncasttypeEnum(Enum):
"""
AtmconncasttypeEnum
The type of topology of a connection (point\-
to\-point, point\-to\-multipoint). In the case
of point\-to\-multipoint, the orientation of
this VPL or VCL in the connection.
On a host\:
\- p2mpRoot indicates that the host
is the root of the p2mp connection.
\- p2mpLeaf indicates that the host
is a leaf of the p2mp connection.
On a switch interface\:
\- p2mpRoot indicates that cells received
by the switching fabric from the interface
are from the root of the p2mp connection.
\- p2mpLeaf indicates that cells transmitted
to the interface from the switching fabric
are to the leaf of the p2mp connection.
.. data:: p2p = 1
.. data:: p2mpRoot = 2
.. data:: p2mpLeaf = 3
"""
p2p = 1
p2mpRoot = 2
p2mpLeaf = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmconncasttypeEnum']
class AtmconnkindEnum(Enum):
"""
AtmconnkindEnum
The type of call control used for an ATM
connection at a particular interface. The use
is as follows\:
pvc(1)
Virtual link of a PVC. Should not be
used for an PVC/SVC (i.e., Soft PVC)
crossconnect.
svcIncoming(2)
Virtual link established after a
received signaling request to setup
an SVC.
svcOutgoing(3)
Virtual link established after a
transmitted or forwarded signaling
request to setup an SVC.
spvcInitiator(4)
Virtual link at the PVC side of an
SVC/PVC crossconnect, where the
switch is the initiator of the Soft PVC
setup.
spvcTarget(5)
Virtual link at the PVC side of an
SVC/PVC crossconnect, where the
switch is the target of the Soft PVC
setup.
For PVCs, a pvc virtual link is always cross\-
connected to a pvc virtual link.
For SVCs, an svcIncoming virtual link is always cross\-
connected to an svcOutgoing virtual link.
For Soft PVCs, an spvcInitiator is either cross\-connected to
an svcOutgoing or an spvcTarget, and an spvcTarget is either
cross\-connected to an svcIncoming or an spvcInitiator.
.. data:: pvc = 1
.. data:: svcIncoming = 2
.. data:: svcOutgoing = 3
.. data:: spvcInitiator = 4
.. data:: spvcTarget = 5
"""
pvc = 1
svcIncoming = 2
svcOutgoing = 3
spvcInitiator = 4
spvcTarget = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmconnkindEnum']
class AtminterfacetypeEnum(Enum):
"""
AtminterfacetypeEnum
The connection setup procedures used for the
identified interface.
Other\: Connection setup procedures other than
those listed below.
Auto\-configuration\:
Indicates that the connection setup
procedures are to be determined dynamically,
or that determination has not yet been
completed. One such mechanism is via ATM
Forum ILMI auto\-configuration procedures.
ITU\-T DSS2\:
\- ITU\-T Recommendation Q.2931, Broadband
Integrated Service Digital Network (B\-ISDN)
Digital Subscriber Signalling System No.2
(DSS2) User\-Network Interface (UNI) Layer 3
Specification for Basic Call/Connection
Control (September 1994)
\- ITU\-T Draft Recommendation Q.2961,
B\-ISDN DSS 2 Support of Additional Traffic
Parameters (May 1995)
\- ITU\-T Draft Recommendation Q.2971,
B\-ISDN DSS 2 User Network Interface Layer 3
Specification for Point\-to\-multipoint
Call/connection Control (May 1995)
ATM Forum UNI 3.0\:
ATM Forum, ATM User\-Network Interface,
Version 3.0 (UNI 3.0) Specification,
(1994).
ATM Forum UNI 3.1\:
ATM Forum, ATM User\-Network Interface,
Version 3.1 (UNI 3.1) Specification,
(November 1994).
ATM Forum UNI Signalling 4.0\:
ATM Forum, ATM User\-Network Interface (UNI)
Signalling Specification Version 4.0,
af\-sig\-0061.000 (June 1996).
ATM Forum IISP (based on UNI 3.0 or UNI 3.1) \:
Interim Inter\-switch Signaling Protocol
(IISP) Specification, Version 1.0,
af\-pnni\-0026.000, (December 1994).
ATM Forum PNNI 1.0 \:
ATM Forum, Private Network\-Network Interface
Specification, Version 1.0, af\-pnni\-0055.000,
(March 1996).
ATM Forum B\-ICI\:
ATM Forum, B\-ICI Specification, Version 2.0,
af\-bici\-0013.002, (November 1995).
ATM Forum UNI PVC Only\:
An ATM Forum compliant UNI with the
signalling disabled.
ATM Forum NNI PVC Only\:
An ATM Forum compliant NNI with the
signalling disabled.
.. data:: other = 1
.. data:: autoConfig = 2
.. data:: ituDss2 = 3
.. data:: atmfUni3Dot0 = 4
.. data:: atmfUni3Dot1 = 5
.. data:: atmfUni4Dot0 = 6
.. data:: atmfIispUni3Dot0 = 7
.. data:: atmfIispUni3Dot1 = 8
.. data:: atmfIispUni4Dot0 = 9
.. data:: atmfPnni1Dot0 = 10
.. data:: atmfBici2Dot0 = 11
.. data:: atmfUniPvcOnly = 12
.. data:: atmfNniPvcOnly = 13
"""
other = 1
autoConfig = 2
ituDss2 = 3
atmfUni3Dot0 = 4
atmfUni3Dot1 = 5
atmfUni4Dot0 = 6
atmfIispUni3Dot0 = 7
atmfIispUni3Dot1 = 8
atmfIispUni4Dot0 = 9
atmfPnni1Dot0 = 10
atmfBici2Dot0 = 11
atmfUniPvcOnly = 12
atmfNniPvcOnly = 13
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtminterfacetypeEnum']
class AtmservicecategoryEnum(Enum):
"""
AtmservicecategoryEnum
The service category for a connection.
.. data:: other = 1
.. data:: cbr = 2
.. data:: rtVbr = 3
.. data:: nrtVbr = 4
.. data:: abr = 5
.. data:: ubr = 6
"""
other = 1
cbr = 2
rtVbr = 3
nrtVbr = 4
abr = 5
ubr = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmservicecategoryEnum']
class AtmvorxadminstatusEnum(Enum):
"""
AtmvorxadminstatusEnum
The value determines the desired administrative
status of a virtual link or cross\-connect. The up
and down states indicate that the traffic flow is
enabled or disabled respectively on the virtual
link or cross\-connect.
.. data:: up = 1
.. data:: down = 2
"""
up = 1
down = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmvorxadminstatusEnum']
class AtmvorxoperstatusEnum(Enum):
"""
AtmvorxoperstatusEnum
The value determines the operational status of a
virtual link or cross\-connect. The up and down
states indicate that the traffic flow is enabled
or disabled respectively on the virtual link or
cross\-connect. The unknown state indicates that
the state of it cannot be determined. The state
will be down or unknown if the supporting ATM
interface(s) is down or unknown respectively.
.. data:: up = 1
.. data:: down = 2
.. data:: unknown = 3
"""
up = 1
down = 2
unknown = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmvorxoperstatusEnum']
class AtmclptransparentscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for the CLP\-
transparent model with Sustained Cell Rate.
The use of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0+1 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: CDVT in tenths of microseconds
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the VBR.1 conformance
definition.
Connections specifying this traffic descriptor
type will be rejected at UNI 3.0 or UNI 3.1
interfaces. For a similar traffic descriptor
type that can be accepted at UNI 3.0 and
UNI 3.1 interfaces, see atmNoClpScr.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptransparentscrIdentity']['meta_info']
class AtmclpnotaggingmcrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
Minimum Cell Rate and no tagging. The use of
the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: CDVT in tenths of microseconds
Parameter 3\: minimum cell rate in cells/second
Parameter 4\: unused
Parameter 5\: unused.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclpnotaggingmcrIdentity']['meta_info']
class AtmnoclpnoscrcdvtIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
and no Sustained Cell Rate. The use of the
parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: CDVT in tenths of microseconds
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
This traffic descriptor type is applicable to
CBR connections following the UNI 3.0/3.1
conformance definition for PCR CLP=0+1.
These CBR connections differ from CBR.1
connections in that the CLR objective
applies only to the CLP=0 cell flow.
This traffic descriptor type is also
applicable to connections following the UBR.1
conformance definition.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclpnoscrcdvtIdentity']['meta_info']
class AtmclptaggingscrcdvtIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
tagging and Sustained Cell Rate. The use of
the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0 traffic, excess tagged as
CLP=1
Parameter 3\: maximum burst size in cells
Parameter 4\: CDVT in tenths of microseconds
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the VBR.3 conformance
definition.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptaggingscrcdvtIdentity']['meta_info']
class AtmclpnotaggingscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
Sustained Cell Rate and no tagging. The use
of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclpnotaggingscrIdentity']['meta_info']
class AtmnoclpscrcdvtIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
with Sustained Cell Rate. The use of the
parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0+1 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: CDVT in tenths of microseconds
Parameter 5\: not used.
This traffic descriptor type is applicable
to VBR connections following the UNI 3.0/3.1
conformance definition for PCR CLP=0+1 and
SCR CLP=0+1. These VBR connections
differ from VBR.1 connections in that
the CLR objective applies only to the CLP=0
cell flow.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclpscrcdvtIdentity']['meta_info']
class AtmnotrafficdescriptorIdentity(ObjectIdentityIdentity):
"""
This identifies the no ATM traffic
descriptor type. Parameters 1, 2, 3, 4,
and 5 are not used. This traffic descriptor
type can be used for best effort traffic.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnotrafficdescriptorIdentity']['meta_info']
class AtmclptransparentnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for the CLP\-
transparent model and no Sustained Cell Rate.
The use of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: CDVT in tenths of microseconds
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the CBR.1 conformance
definition.
Connections specifying this traffic descriptor
type will be rejected at UNI 3.0 or UNI 3.1
interfaces. For a similar traffic descriptor
type that can be accepted at UNI 3.0 and
UNI 3.1 interfaces, see atmNoClpNoScr.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptransparentnoscrIdentity']['meta_info']
class AtmclptaggingscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
tagging and Sustained Cell Rate. The use of
the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0 traffic, excess tagged as
CLP=1
Parameter 3\: maximum burst size in cells
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptaggingscrIdentity']['meta_info']
class AtmnoclpnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
and no Sustained Cell Rate. The use of the
parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: not used
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclpnoscrIdentity']['meta_info']
class AtmnoclpscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
with Sustained Cell Rate. The use of the
parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0+1 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclpscrIdentity']['meta_info']
class AtmclpnotaggingnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor is for CLP without
tagging and no Sustained Cell Rate. The use
of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: peak cell rate in cells/second
for CLP=0 traffic
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclpnotaggingnoscrIdentity']['meta_info']
class AtmclptaggingnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor is for CLP with
tagging and no Sustained Cell Rate. The use
of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: peak cell rate in cells/second
for CLP=0 traffic, excess
tagged as CLP=1
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclptaggingnoscrIdentity']['meta_info']
class AtmclpnotaggingscrcdvtIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for CLP with
Sustained Cell Rate and no tagging. The use
of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: sustainable cell rate in cells/second
for CLP=0 traffic
Parameter 3\: maximum burst size in cells
Parameter 4\: CDVT in tenths of microseconds
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the VBR.2 conformance
definition.
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmclpnotaggingscrcdvtIdentity']['meta_info']
class AtmnoclptaggingnoscrIdentity(ObjectIdentityIdentity):
"""
This traffic descriptor type is for no CLP
with tagging and no Sustained Cell Rate. The
use of the parameter vector for this type\:
Parameter 1\: peak cell rate in cells/second
for CLP=0+1 traffic
Parameter 2\: CDVT in tenths of microseconds
Parameter 3\: not used
Parameter 4\: not used
Parameter 5\: not used.
This traffic descriptor type is applicable to
connections following the UBR.2 conformance
definition .
"""
_prefix = 'ATM-TC-MIB'
_revision = '1998-10-19'
def __init__(self):
ObjectIdentityIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _ATM_TC_MIB as meta
return meta._meta_table['AtmnoclptaggingnoscrIdentity']['meta_info']
| {
"repo_name": "111pontes/ydk-py",
"path": "cisco-ios-xe/ydk/models/cisco_ios_xe/ATM_TC_MIB.py",
"copies": "1",
"size": "21903",
"license": "apache-2.0",
"hash": -6047229158690702000,
"line_mean": 22.4006410256,
"line_max": 79,
"alpha_frac": 0.6399579966,
"autogenerated": false,
"ratio": 3.685512367491166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4825470364091166,
"avg_score": null,
"num_lines": null
} |
'''ATM-> Will have to make c_graph_from_string for each statement, and
combinde it all into a single c_graph'''
from network_base import *
from network_classes import *
def clean_number(string):
tmp = list(string)
to_kill = []
allowed = ['1','2','3','4','5','6','7','8','9','0','.']
for i in xrange(0,len(tmp)):
if tmp[i] not in allowed :
# Apparently killing them in the moment causes problems. Python's
# for-in loops break the progression when you take items out as it
# is iterating.
to_kill.append(tmp[i])
for c in to_kill:
tmp.remove(c)
return ''.join(tmp)
def c_graph_from_string(string):
c_string = '.' # String marking connections
s_string = '/' # String marking separations
m_string = ['(', ')'] # String marking magnitudes
l_string = ['*', '*'] # String marking labels
g_string = ['[', ']'] # String marking groups
#elem = full_partition(string, c_string)
# Description of all elements, in order of connection. May repeat nodes.
nc_path = c_path_from_string(string)
# The path to connect. Named Connection path.
print 'nc_path ' + str(nc_path)
node_map = map_node_list(get_node_names(nc_path))
# Makes node names a key that return a unique id number, using the list's order.
print 'node map ' + str(node_map)
c_graph = c_graph_from_connection_path_and_map(nc_path, node_map)
print_matrix(c_graph)
return c_graph
def c_graph_from_connection_path_and_map(nc_path, node_map):
# Create c_graph
c_graph = [[0 for _ in range(len(node_map))] for _ in range(len(node_map))]
for i in xrange(0,len(nc_path)-1): #Iterate through dictionary
src_node_c_coordinate = node_map[nc_path[i][0]].uid
dst_node_c_coordinate = node_map[nc_path[i+1][0]].uid
#print str(i) + ': ' + str(src_node_c_coordinate) + ' & ' + str(dst_node_c_coordinate)
# Find point x, y on c_graph and mark a connection.
c_graph[src_node_c_coordinate][dst_node_c_coordinate] = 1
#print str(node_map[nc_path[i]]) + '->' + str(node_map[nc_path[i+1]])
return c_graph
def pop_contents_inside_chars(string, a_char, b_char):
result = string
a_pos = string.find(a_char)
b_pos = string.find(b_char)
if a_pos > 0 and b_pos > 0:
result = result[a_pos:b_pos-1]
return result
def map_node_list(node_names):
'''
Returns dictionary where the key is the node name of a node
and the object is the Node instance itself.
Precondition: A list of strings (node names)
'''
result = {}
for i in xrange(0, len(node_names)):
if node_names[i] not in result:
node_id = len(result)
node_name = node_names[i]
node = Node(node_id, node_name)
result[node_names[i]] = node
return result
def get_node_names(node_list):
''' Returns: List of strings
Summary: Extracts node names from list of element paths
(Longer summary)
Value Returned is of type list
Precondition: el_list is list of strings or shallow lists.
'''
n_list = node_list
for i in xrange(0,len(n_list)):
if type(n_list[i]) is list:
n_list[i] = n_list[i][0]
return n_list
def full_partition(string, d):
if string.find(d) < 0:
return string
parts = string.split(d)
for i in xrange(0, len(parts)):
parts[i] = full_partition(parts[i], '*')
return parts
def c_path_from_string(string):
result = full_partition(string, '.')
for i in xrange(0,len(result)):
result[i] = [result[i]]
return result
#
# Tests
#
if __name__ == '__main__':
#from KCtests import *
#s = 'A.B C.D [E.F G].C '
cpath = c_path_from_string('[string.lol].two.hello')
nmap = map_node_list(get_node_names(c_path_from_string('string.lol.two.hello')))
print cpath
print nmap
print_matrix(c_graph_from_connection_path_and_map(cpath, nmap))
| {
"repo_name": "mrkev/Netword",
"path": "network_speak.py",
"copies": "1",
"size": "3698",
"license": "mit",
"hash": 3331139863811786000,
"line_mean": 23.8187919463,
"line_max": 88,
"alpha_frac": 0.6563007031,
"autogenerated": false,
"ratio": 2.7271386430678466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8570022269847615,
"avg_score": 0.06268341526404625,
"num_lines": 149
} |
atom_attrs = ['name',
'atomic_num',
'bond_degree_no_Hs',
'bond_degree_with_Hs',
'total_bond_degree',
'explicit_valence',
'implicit_valence',
'total_valence',
'formal_charge',
'hybridization',
'is_aromatic',
'in_ring',
'isotope',
'mass',
'num_radical_electrons',
'element',
'num_Hs',
'monomer_type',
'pdb_name',
'pdb_insertion_code',
'pdb_occupancy'
'pdb_residue_name',
'pdb_residue_number',
'pdb_serial_number',
'pdb_temp_factor',]
def _atom_type_factory(atom_attrs, atom_type_name):
attributes = {attr : None for attr in AtomType.attributes}
# simply keep track of which attributes the input did not provide
for attr in AtomType.attributes:
try:
assert attr in atom_attrs.keys()
except AssertionError:
# logging
print("Attribute {0} not found in atom input.".format(attr))
# add the attributes that it has into the class
for attr, value in atom_attrs.items():
# make sure AtomType has an attribute that matches
try:
assert attr in AtomType.attributes
# if it doesn't then report it
except AssertionError:
# logging
print("Input attribute {0} not in AtomType attributes, ignoring.".format(attr))
# if no error then add it
else:
attributes[attr] = value
return type(atom_type_name, (AtomType,), attributes)
AtomType = type('AtomType', (object,), {attr : None for attr in atom_attrs})
AtomType.attributes = atom_attrs
AtomType.factory = _atom_type_factory
O_attrs = {'atomic_num' : 16, 'element' : 'O'}
O_ATOM_TYPE = AtomType.factory(O_attrs, 'OAtomType')
H_attrs = {'atomic_num' : 1, 'element' : 'H'}
H_ATOM_TYPE = AtomType.factory(H_attrs, 'HAtomType')
C_attrs = {'atomic_num' : 12, 'element' : 'C'}
C_ATOM_TYPE = AtomType.factory(C_attrs, 'CAtomType')
class MoleculeType(object):
def __init__(self, name=None, atom_types=None, ):
self._atom_type_library = set(atom_types)
self._features = None
self._feature_families = None
self._feature_types = None
self._bonds = None
self.name = name
self._atom_type_sequence = atom_types
@property
def atom_type_library(self):
return list(self._atom_type_library)
@property
def features(self):
return self._features
@property
def feature_families(self):
return self._feature_families
@property
def feature_types(self):
return self._feature_types
@property
def atom_types(self):
return self._atom_type_sequence
@property
def bonds(self):
return self._bonds
@property
def to_molecule(self, coords=None):
""" Construct a Molecule using input coordinates with mapped indices"""
coord_array = CoordArray(coords)
# Make atoms out of the coord array
self.make_atom_type_library()
atom_idxs = range(self.molecule.GetNumAtoms())
atoms = []
for atom_idx in atom_idxs:
atom_type = self.atom_type_library[atom_idx]
atom = Atom(atom_array=coord_array, array_idx=atom_idx, atom_type=atom_type)
atoms.append(atom)
# handle bonds
bonds = []
for bond in self.molecule.GetBonds():
bonds.append(Bond(atoms, (bond.GetBeginAtomIdx(), bond.GetEndAtomIdx())))
# TODO handle and create angles
angles = None
return Molecule(atoms, bonds, angles, mol_type=self)
@classmethod
def factory(cls, mol_type_name, name=None, atom_types=None):
mol_class = type(mol_type_name, (cls,), {})
mol_type = mol_class(name=name, atom_types=atom_types)
return mol_type
water_attrs = {'atom_types' : [H_ATOM_TYPE, O_ATOM_TYPE, H_ATOM_TYPE],
'name' : 'water'}
methanol_attrs = {'atom_types' : [H_ATOM_TYPE, O_ATOM_TYPE, C_ATOM_TYPE,
H_ATOM_TYPE, H_ATOM_TYPE, H_ATOM_TYPE],
'name' : 'methanol'}
WATER_TYPE = MoleculeType.factory('WaterType', **water_attrs)
METHANOL_TYPE = MoleculeType.factory('MethanolType', **methanol_attrs)
| {
"repo_name": "salotz/mast",
"path": "tmp/atom_type_test.py",
"copies": "1",
"size": "4474",
"license": "mit",
"hash": -3091541759368563000,
"line_mean": 29.6438356164,
"line_max": 91,
"alpha_frac": 0.5746535539,
"autogenerated": false,
"ratio": 3.7221297836938434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4796783337593843,
"avg_score": null,
"num_lines": null
} |
# atom class
from scipy.interpolate import interp1d
import numpy as np
import pandas as pd
import sys, os
here = os.path.dirname(os.path.abspath(__file__))
class atom:
'''
The atom class contains relevent parameters for determining the ionization
rate in a strong field. Parameters come from 'Fundamentals of Attosecond Optics' and
use the ADK formalism. It also contains information for calculation XUV dispersion and XUV
photoabsorption. It contains the index of refraction for the driving laser wavelenth.
Some parameters are stored in other classes but passed in during init. See comments of __init__.
'''
def __init__(self, Atom , Lam , Pressure , Temperature ):
self.Atom = Atom
#get from laser class, defaults given.
self.Lam = Lam
#get from gas class, defaults given.
self.Pressure = Pressure
self.Temperature = Temperature
# intensity dependent phase
self.alpha1 = 2
self.alpha2 = 22
# loaded using numpy
nrg, f1, f2 = np.genfromtxt(here + '/sf/' + self.Atom + '.txt', dtype = float, skip_header = 1,
usecols = (0,1,2), delimiter = '\t', unpack = True)
# load using pandas
# d = pd.read_csv('sf/' + self.Atom + '.txt', skiprows = 1, delimiter = '\t')
# nrg = d.values[:,0]
# f1 = d.values[:,1]
# f2 = d.values[:,2]
self.nrg = nrg
self.f1 = f1
self.f2 = f2
def adk_params(self):
'''
ADK parameters. See 'Fundamentals of Attosecond Optics'
return [F0, n_star, l_star, ang_l, ang_m, abs_Cnl_sq, G_lm, Ip]
'''
F0 = {'Xe': 0.84187, 'Kr': 1.04375, 'Ar': 1.24665, 'Ne': 1.99547, 'He': 2.42946}
n_star = {'Xe': 1.05906, 'Kr': 0.98583, 'Ar': 0.92915, 'Ne': 0.7943, 'He': 0.74387}
l_star = {'Xe': 0.05906, 'Kr': 0.98583, 'Ar': 0.92915, 'Ne': 0.7943, 'He': 0.74387}
ang_l = {'Xe': 1, 'Kr': 1, 'Ar': 1, 'Ne': 1, 'He': 0}
ang_m = {'Xe': 0, 'Kr': 0, 'Ar': 0, 'Ne': 0, 'He': 0}
abs_Cnl_sq = {'Xe': 3.88241, 'Kr': 4.02548,'Ar': 4.11564, 'Ne': 4.24355, 'He': 4.25575}
G_lm = {'Xe': 3, 'Kr': 3,'Ar': 3, 'Ne': 3, 'He': 3}
Ip = {'Xe': 12.129, 'Kr': 13.99,'Ar': 15.759, 'Ne': 21.564, 'He': 24.587}
alpha = {'Xe': 9, 'Kr': 9,'Ar': 9, 'Ne': 9, 'He': 7}
return {'F0':F0[self.Atom], 'n_star': n_star[self.Atom], 'l_star': l_star[self.Atom],
'ang_l': ang_l[self.Atom], 'ang_m': ang_m[self.Atom], 'abs_Cnl_sq':abs_Cnl_sq[self.Atom],
'G_lm': G_lm[self.Atom], 'Ip':Ip[self.Atom], 'alpha': alpha[self.Atom]}
def xuv_index(self, eV):
'''
Based on atomic scattering factors from LBNL.
returns a function for the index of refraction for a given photon energy.
'''
re = 2.8179 * 10 ** -15 #classical electron radius
kb = 1.3806488 * 10 ** -23 #Boltzmann constant
f1_interp = interp1d(self.nrg, self.f1)
f2_interp = interp1d(self.nrg, self.f2)
wl = 1240 / eV * 10 ** -9
dens = self.Pressure/kb/self.Temperature #density
return 1 - re * wl ** 2 / 2 / np.pi * dens * (f1_interp(eV) + 1j * f2_interp(eV))
def xuv_absorption(self, eV):
'''
Based on atomic scattering factors from LBNL
returns the absorption crossection for a given photon energy.
'''
re = 2.8179 * 10 ** -15
f2_interp = interp1d(self.nrg, self.f2)
wl = 1240 / eV * 10 **-9
return 2 * wl * re * f2_interp(eV)
def drive_index(self):
'''
Based on Börzsönyi APPLIED OPTICS / Vol. 47, No. 27 / 20 September 2008
returns the index of refraction of the driving laser for a given wavelength,
pressure and temperature.
'''
B1 = {'Xe': 103701.61 * 10 ** -8, 'Kr': 26102.88 * 10 ** -8, 'Ar': 20332.29 * 10 ** -8, 'Ne': 9154.48 * 10 ** -8, 'He': 4977.77 * 10 ** -8}
C1 = {'Xe': 12.75 * 10 ** -6, 'Kr': 2.01 * 10 ** -6, 'Ar': 206.12 * 10 ** -6, 'Ne': 656.97 * 10 ** -6, 'He': 28.54 * 10 ** -6}
B2 = {'Xe': 31228.61 * 10 ** -8, 'Kr': 56946.82 * 10 ** -8, 'Ar': 8.066 * 10 ** -8, 'Ne': 4018.63 * 10 ** -8, 'He': 1856.94 * 10 ** -8}
C2 = {'Xe': 0.561 * 10 ** -3, 'Kr': 10.043 * 10 ** -3, 'Ar': 1.24665 * 10 ** -3, 'Ne': 5.728 * 10 ** -3, 'He': 7.760 * 10 ** -3}
wl = self.Lam * 10 ** 6
return np.sqrt( 1 + ( self.Pressure * 273 / self.Temperature ) * (
B1[self.Atom] * wl ** 2 / (wl ** 2 - C1[self.Atom] ) +
B2[self.Atom] * wl ** 2 / (wl ** 2 - C2[self.Atom] ) ) )
def drive_index2(self):
'''
base on The Refractive Indices and Verdet Constants of the Inert Gases,
Proc. R. Soc. Lond. A 1960 259, doi: 10.1098/rspa.1960.0237
'''
A = {'Xe': 1.366e-3, 'Kr': 8.377e-4 , 'Ar': 5.547e-4 , 'Ne': 1.335e-4 , 'He': 6.927e-5 }
B1 = {'Xe': 9.02e5, 'Kr': 6.7e5, 'Ar': 5.15e5, 'Ne': 2.24e5, 'He': 2.24e5}
B2 = {'Xe': 1.81e12, 'Kr': 8.84e11, 'Ar': 4.19e11, 'Ne': 8.09e10, 'He': 5.94e10}
B3 = {'Xe': 4.89e18, 'Kr': 1.49e18, 'Ar': 4.09e17, 'Ne': 3.56e16, 'He': 1.72e16}
B4 = {'Xe': 1.45e25, 'Kr': 2.74e24, 'Ar': 4.32e23, 'Ne': 0, 'He': 0}
B5 = {'Xe': 4.34e31, 'Kr': 5.10e30, 'Ar': 0, 'Ne': 0, 'He': 0}
wl = self.Lam * 10 ** 10
return np.sqrt( 1 + A[self.Atom] * (1 + B1[self.Atom] / wl ** 2 + B2[self.Atom] / wl ** 4 + B3[self.Atom] / wl ** 6 + B4[self.Atom] / wl ** 8 + B5[self.Atom] / wl ** 10))
def eta_crit(self, eV):
'''
Critical ionization fraction.
'''
re = 2.8179 * 10 ** -15 #classical electron radius
kb = 1.3806488 * 10 ** -23 #Boltzmann constant
Natm = 1.013 * 10 ** 5 / kb / self.Temperature
dn = np.real(self.drive_index() - self.xuv_index(eV) )
eta_crit = 1 / (1 + Natm * re * self.Lam ** 2 / 2 / np.pi / dn)
return dn, eta_crit
def kp(self):
'''
Decay rate, see Allison et al. PRL (2011)
'''
kp = {'Xe': .08, 'Kr': .2 , 'Ar': .3 , 'Ne': .4 , 'He': .5 }
return kp[self.Atom]
| {
"repo_name": "c-benko/HHG_phasematching_fsEC",
"path": "src/atom.py",
"copies": "1",
"size": "6243",
"license": "mit",
"hash": 1651633357280067000,
"line_mean": 41.1689189189,
"line_max": 178,
"alpha_frac": 0.5093734978,
"autogenerated": false,
"ratio": 2.6924072476272647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37017807454272644,
"avg_score": null,
"num_lines": null
} |
"""ATOM feed feature
Generates ATOM feed of recent items entered into the application
"""
import flask
from database_setup import Item
from flask import request
from flask import url_for
from sqlalchemy import desc
from sqlalchemy.orm import sessionmaker
from urlparse import urljoin
from werkzeug.contrib.atom import AtomFeed
_DBSession = sessionmaker()
_DBH = _DBSession()
feed = flask.Blueprint('feed', __name__)
def make_external(url):
return urljoin(request.url_root, url)
@feed.route('/recent.atom')
def recent_feed():
"""Generate ATOM feed.
Generate ATOM feed containing the item label, description, date, and a URL
link to the item
"""
feed = AtomFeed('Item Catalog', feed_url=request.url, url=request.url_root)
items = _DBH.query(Item).order_by(desc(Item.date)).limit(15).all()
for item in items:
feed.add(
title=item.label,
content=item.description,
content_type='text',
url=make_external(url_for('category.showCategoryItem',
category_label=item.category.label,
item_label=item.label)),
updated=item.date
)
return feed.get_response()
| {
"repo_name": "novastorm/udacity-item-catalog",
"path": "vagrant/catalog/routes/feed.py",
"copies": "1",
"size": "1204",
"license": "bsd-2-clause",
"hash": 6449308096781487000,
"line_mean": 24.6170212766,
"line_max": 79,
"alpha_frac": 0.6677740864,
"autogenerated": false,
"ratio": 3.8838709677419354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5051645054141936,
"avg_score": null,
"num_lines": null
} |
"""ATOM Handler.
Returns CHP incident data in the ATOM format.
"""
import re
import time
import urllib
import webapp2
from xml.etree import ElementTree
from utils import incident_request
class AtomHandler(incident_request.RequestHandler):
def get(self):
self.get_incidents()
# 304 check
if self.is_not_modified():
return
# Build the ATOM XML
feed = ElementTree.Element('feed', {
'xmlns': 'http://www.w3.org/2005/Atom',
'xmlns:georss': 'http://www.georss.org/georss'
})
title = "CHP Traffic Incidents"
if self.request.get("dispatch") == "SACC":
title = "SacTraffic: Sacramento Area Traffic Incidents"
roads = self.request.get("roads")
# Change this for the West Sac News-Leger since no one appears home there
if "http://www.westsac.com/news-ledger" in self.request.headers['User-Agent']:
if roads == "i-5,i-80,i-50,i-99":
roads = "I5,I80,US50,Hwy 99"
if roads != "":
title = "%s (%s)" % (title, roads)
ElementTree.SubElement(feed, 'title').text = title
ElementTree.SubElement(feed, 'subtitle').text = 'Traffic incidents from the CHP'
ElementTree.SubElement(ElementTree.SubElement(feed, 'author'), 'name').text = 'The California Highway Patrol'
ElementTree.SubElement(feed, 'id').text = 'tag:traffic.lectroid.net,2010-06-24:100'
ElementTree.SubElement(feed, 'updated').text = self.incidents_last_mod.strftime("%Y-%m-%dT%H:%M:%SZ")
# site link...
ElementTree.SubElement(feed, 'link', {
'href': "http://www.sactraffic.org"
})
# self link...
self_href = "http://%s/atom" % self.request.environ['HTTP_HOST']
query_string = "?" + self.request.environ['QUERY_STRING']
if query_string != "?":
self_href += query_string
ElementTree.SubElement(feed, 'link', {
'href': self_href,
'rel': 'self',
'type': 'application/atom+xml'
})
# pubsubhubbub link...
# don't show for roads search feeds, since we don't ping for those
if roads == "":
ElementTree.SubElement(feed, 'link', {
'href': 'http://pubsubhubbub.appspot.com',
'rel': 'hub'
})
# logo & icon
ElementTree.SubElement(feed, 'icon').text = "http://%s/favicon.ico" % self.request.environ['HTTP_HOST']
for incident in self.incidents:
if roads != "":
road_match = re.search(roads.replace(",", "|"), incident.Location, flags=re.I)
if road_match is None:
continue
city = incident.city
if city is None:
city = incident.Area
title = "%s: %s, %s" % (incident.LogType, incident.Location, city)
details = incident.LogDetails
description = "<ul>"
for detail in details['details']:
description = "%s<li>%s: %s</li>" % (description, detail['DetailTime'], detail['IncidentDetail'])
description = "%s</ul>" % description
static_map_url = ""
if incident.geolocation is not None:
static_map_opts = urllib.urlencode({
"size": "200x200",
"markers": "color:0x165279|%f,%f" % (incident.geolocation.lat, incident.geolocation.lon),
"zoom": "12",
"maptype": "roadmap",
"sensor": "true"
})
static_map_url = "http://maps.google.com/maps/api/staticmap?%s" % static_map_opts
description = '%s<img src="%s" width="200" height="200" border="1"/>' % (description, static_map_url)
entry = ElementTree.SubElement (feed, 'entry')
ElementTree.SubElement(entry, 'title').text = title
ElementTree.SubElement(entry, 'id').text = 'tag:traffic.lectroid.net,2010-11-30:' + incident.LogID
ElementTree.SubElement(entry, 'content', {'type': 'html'}).text = description
ElementTree.SubElement(entry, 'published').text = incident.LogTime.strftime("%Y-%m-%dT%H:%M:%SZ")
ElementTree.SubElement(entry, 'updated').text = incident.updated.strftime("%Y-%m-%dT%H:%M:%SZ")
if incident.geolocation is not None:
ElementTree.SubElement(entry, 'link', {'rel': 'enclosure', 'type': 'image/png', 'href': static_map_url})
ElementTree.SubElement(entry, 'georss:point').text = str(incident.geolocation.lat) + " " + str(incident.geolocation.lon)
if incident.LogTypeID is not None:
ElementTree.SubElement(entry, 'category', {'term': incident.LogTypeID})
# Output
self.response.headers["Content-Type"] = "application/atom+xml"
self.send_conditional_headers()
self.response.write('<?xml version="1.0"?>') # oh this can't be right!
self.response.write(ElementTree.tostring(feed))
application = webapp2.WSGIApplication([('/atom', AtomHandler)], debug=True)
| {
"repo_name": "lectroidmarc/SacTraffic",
"path": "appengine/handlers/atom_data.py",
"copies": "1",
"size": "4408",
"license": "isc",
"hash": -9053671855219560000,
"line_mean": 33.7086614173,
"line_max": 124,
"alpha_frac": 0.6671960073,
"autogenerated": false,
"ratio": 3.048409405255878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4215605412555878,
"avg_score": null,
"num_lines": null
} |
'''Atomic-access classes: generic locked objects, atomic dictionaries (ducks!) and atomic lists.
'''
import threading
from collections import UserDict, UserList
class LockedObject(object):
def __init__(self):
self.object_lock = threading.RLock()
def __enter__(self):
self.object_lock.__enter__()
return self
def __exit__(self, otype, value, traceback):
self.object_lock.__exit__(otype, value, traceback)
class AtomicDuck(UserDict):
'''Atomically locked dictionary, with the dictionary interface. Has a very silly name, completely on purpose.
'''
def __init__(self, contents=None):
super().__init__(contents)
self.dict_lock = threading.RLock()
def __enter__(self):
self.dict_lock.__enter__()
return self.data
def __exit__(self, otype, value, traceback):
self.dict_lock.__exit__(otype, value, traceback)
class AtomicList(UserList):
'''Atomically locked list.
'''
def __init__(self, contents=None):
super().__init__(contents)
self.list_lock = threading.RLock()
def __enter__(self):
self.list_lock.__enter__()
return self.data
def __exit__(self, otype, value, traceback):
self.list_lock.__exit__(otype, value, traceback)
| {
"repo_name": "SublimeHaskell/SublimeHaskell",
"path": "internals/atomics.py",
"copies": "2",
"size": "1293",
"license": "mit",
"hash": -2937804385535685000,
"line_mean": 27.1086956522,
"line_max": 113,
"alpha_frac": 0.618716164,
"autogenerated": false,
"ratio": 4.02803738317757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011912063888991865,
"num_lines": 46
} |
# Atomic coordinate translation and rotation functions;
# Functions of calculations of distance of atom pair, angle of three atoms and torsion of four atoms were also included.
# -- by Lifeng Zhao, 02,4th,2009
import sys
from math import *
import numpy as np
from math import sqrt
from copy import deepcopy
class bcolors:
"""
Usage: " print bcolors.BLUE + 'text you want to print' + bcolors.ENDC"
"""
PINK = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.PINK = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.ENDC = ''
def distance(x12):
"""
Calculate atom pair distance.
x12: [[x1,y1,z1],[x2,y2,z2]]
"""
dx = x12[0][0]-x12[1][0]
dy = x12[0][1]-x12[1][1]
dz = x12[0][2]-x12[1][2]
result = sqrt(dx*dx+dy*dy+dz*dz)
return result
def middle(x12):
"""
Calculate the center of atom pair.
x12: [[x1,y1,z1],[x2,y2,z2]]
"""
x = [0.0, 0.0, 0.0]
for i in range(len(x12)):
x = [ x[k]+ x12[i][k] for k in range(3)]
x = [x[k]/len(x12) for k in range(3)]
#dx = (x12[0][0]+ x12[1][0])/2.0
#dy = (x12[0][1]+ x12[1][1])/2.0
#dz = (x12[0][2]+ x12[1][2])/2.0
#result = [dx,dy,dz]
return x
def distance2(x12):
"""
Calculate atom pair distance.
x12: [[x1,y1,z1],[x2,y2,z2]]
"""
dx = x12[0][0]-x12[1][0]
dy = x12[0][1]-x12[1][1]
dz = x12[0][2]-x12[1][2]
result = dx*dx+dy*dy+dz*dz
return result
def arraypointmatrix(array, matrix):
"""
Point multiplication function for an array to a matrix.
array:[x1,x2,...,xm]
matrix:[[y11,y12,...,y1n],...,[ym1,ym2,...,ymn]]
"""
array = np.array(array)
matrix = np.array(matrix)
TempRes = np.dot(array, matrix)
return list(TempRes)
def SpecialAngle(x0,x1):
"""
Calculate these two angles:
alpha: vector (x0--x1) with y axle
beta : dihedral between face 0 (vector (x0--x1) with y axle) and face 1 (yz face)
return: radians
"""
dx = [x1[0]-x0[0],x1[1]-x0[1],x1[2]-x0[2]]
r = sqrt(dx[0]*dx[0]+dx[1]*dx[1]+dx[2]*dx[2])
# Angle between y axle and r:
if x0[0]==x1[0] and x0[2]==x1[2]: alpha = 0.0
else: alpha = acos(dx[1]/r)
# Angle between face (with vector and y axle) and face YZ
if x0[0]==x1[0]: beta = 0.0
else:
tempr = dx[0]/(r*sin(alpha))
if tempr>=1.0 and tempr<1.00001: tempr = 1.0
if tempr<=-1.0 and tempr>-1.00001: tempr = -1.0
beta = asin(tempr)
#beta = asin(dx[0]/(r*sin(alpha)))
if dx[2]<0 and dx[0]>0: beta = pi-beta
elif dx[2]<0 and dx[0]<0: beta = -pi-beta
return [alpha,beta]
def translate(AtomCoord, dxyz):
"""
AtomCoord: [x,y,z]
dxyz: [dx,dy,dz]
"""
tempx = AtomCoord[0] + dxyz[0]
tempy = AtomCoord[1] + dxyz[1]
tempz = AtomCoord[2] + dxyz[2]
return [tempx,tempy,tempz]
def transmole(MoleCoord,dxyz):
"""
MoleCoord: [[x0,y0,z0],[x1,y1,z1],...]
dxyz: [dx,dy,dz]
"""
tempmole = deepcopy(MoleCoord)
for ii in range(len(tempmole)):
tempmole[ii] = translate(tempmole[ii],dxyz)
return tempmole
###############################
# Rotate a single atom around an "x", "y" or "z" by "angle" using *RIGHT* hand rule:
def rotate(AtomCoord, axle, angle):
"""
Rotate a single atom around an "x", "y" or "z" by "angle" using *RIGHT* hand rule:
AtomCoord: [x,y,z]
axle: 'x','y' or 'z'
angle: Unit: degree
"""
arrayorign = [AtomCoord[0], AtomCoord[1], AtomCoord[2], 1.0]
angle = radians(angle)
if axle == 'x':
matrix = [[1.0, 0.0, 0.0, 0.0],\
[0.0, cos(angle), sin(angle), 0.0],\
[0.0,-sin(angle), cos(angle), 0.0],\
[0.0, 0.0, 0.0, 1.0]]
elif axle == 'y':
matrix = [[cos(angle), 0.0,-sin(angle), 0.0],\
[0.0, 1.0, 0.0, 0.0],\
[sin(angle), 0.0, cos(angle), 0.0],\
[0.0, 0.0, 0.0, 1.0]]
elif axle == 'z':
matrix = [[cos(angle), sin(angle), 0.0, 0.0],\
[-sin(angle), cos(angle),0.0, 0.0],\
[0.0, 0.0, 1.0, 0.0],\
[0.0, 0.0, 0.0, 1.0]]
else:
err = 2
print 'Error(%d)' % err
sys.exit(err)
arraynew = arraypointmatrix(arrayorign, matrix)
#for ii in range(3): AtomCoord[ii] = arraynew[ii]
temp = arraynew[:3]
return temp
##############################
def rotmole(MoleCoord,point,axle,angle):
"""
Rotate molecule according to "point" and "axle"(x,y or z) by "angle":
MoleCoord: [[x0,y0,z0],[x1,y1,z1],...]
point: [x,y,z]
Unit: angle: degree
"""
tempcoord = deepcopy(MoleCoord)
dxyz = [-point[0],-point[1],-point[2]]
rescoord = []
for ii in tempcoord[:]:
temp = deepcopy(translate(ii,dxyz))
temp = deepcopy(rotate(temp,axle,angle))
rescoord.append(deepcopy(translate(temp,point)))
return rescoord
##############################
def rotmole2impose(MoleCoord,Vector):
"""
Rotate molecule according to a vector which was constructed by two point r1 and r2 (r1->r2)
and translate the molecule to r1.
MoleCoord: [[x0,y0,z0],[x1,y1,z1],...]
x0: [x,y,z]
x1: [x,y,z]
"""
angles = SpecialAngle(x0,x1)
temp_mole = rotmole(MoleCoord,[0,0,0],'y',angles[0])
temp_mole = rotmole(temp_mole,[0,0,0],'x',angles[1])
return temp_mole
##############################
def rotmole1(MoleCoord, x0, x1, angle):
"""
Rotate molecule according to a vector (x0->x1) by "angle"
InputUnit: angle: degree; x: angstrom
"""
## the backward direction of the vector:
dxyz = [-x0[0], -x0[1], -x0[2]]
tempx1 = [x1[0]-x0[0], x1[1]-x0[1], x1[2]-x0[2]]
temp_mole = deepcopy(MoleCoord)
## translate all atoms backward with -x0:
temp_mole = transmole(temp_mole, dxyz)
angles = SpecialAngle(x0,x1)
tempangle = torsion([[0.0, 0.0, 1.0],
[0.0,-1.0, 0.0],
[0.0, 1.0, 0.0],
tempx1])
angles = [degrees(angles[0]), tempangle]
## aline the vector (x0->x1) to "y":
temp_mole = rotmole(temp_mole, [0.0, 0.0, 0.0], 'y', -angles[1])
temp_mole = rotmole(temp_mole, [0.0, 0.0, 0.0], 'x', -angles[0])
## rotate "angle" according to "y":
temp_mole = rotmole(temp_mole, [0.0, 0.0, 0.0], 'y', angle)
## move all atoms back to vector (x0->x1) from "y":
temp_mole = rotmole(temp_mole, [0.0, 0.0, 0.0], 'x', angles[0])
temp_mole = rotmole(temp_mole, [0.0, 0.0, 0.0], 'y', angles[1])
## translate back to x0 from the origin:
temp_mole = transmole(temp_mole, x0)
return temp_mole
def angle(x123):
"""
Calculate angle of three atoms: 2 is the center atom.
An angle value in degree will return.
x123: [[x1,y1,z1],...,[x3,y3,z3]]
"""
xa = x123[0]
xb = x123[1]
xc = x123[2]
dxA = np.array(xb) - np.array(xc) #[xb[0]-xc[0],xb[1]-xc[1],xb[2]-xc[2]]
dxB = np.array(xc) - np.array(xa) #[xc[0]-xa[0],xc[1]-xa[1],xc[2]-xa[2]]
dxC = np.array(xa) - np.array(xb) #[xa[0]-xb[0],xa[1]-xb[1],xa[2]-xb[2]]
rA = sqrt(dxA[0]*dxA[0]+dxA[1]*dxA[1]+dxA[2]*dxA[2])
rB2 = dxB[0]*dxB[0]+dxB[1]*dxB[1]+dxB[2]*dxB[2]
rC = sqrt(dxC[0]*dxC[0]+dxC[1]*dxC[1]+dxC[2]*dxC[2])
result = acos((rA*rA + rC*rC - rB2)/(2*rA*rC))*180.0/pi
return result
#####################################
def torsion(x1234):
"""
Calculate torsion of four atoms:
x1234: [[x1,y1,z1],...,[x4,y4,z4]]
"""
temp = deepcopy(x1234)
[alpha01, beta01] = SpecialAngle(temp[1],temp[2])
for ii in temp[:]:
ii[:] = rotate(ii,'y',-degrees(beta01))
ii[:] = rotate(ii,'x',-degrees(alpha01))
if (temp[3][1]==temp[2][1] and temp[3][2]==temp[2][2]) or \
(temp[0][1]==temp[1][1] and temp[0][2]==temp[1][2]):
print 'Error: linear triad.'
sys.exit(1)
[alpha01, beta01] = SpecialAngle(temp[2],temp[3])
for ii in temp[:]:
ii[:] = rotate(ii,'y',-beta01*180.0/pi)
[alpha01, beta01] = SpecialAngle(temp[1],temp[0])
result = -degrees(beta01)
# temp = []
# for ii in range(4):
# temp0 = copy.deepcopy(x1234[ii][:])
# temp1 = [str(ii),str(ii)] + temp0
# temp.append(temp1)
#
# TorsMole = molecule('torstemp',temp)
# [alpha01, beta01] = SpecialAngle(x1234[1],x1234[2])
#
# TorsMole = TorsMole.rotate('y',-beta01)
# TorsMole = TorsMole.rotate('x',-alpha01)
#
# # The 1234.5 is used for capping of NAC -C(=O)-N- bond
# if x1234[3][0]!=1234.5:
# x3 = TorsMole.getxyz(2)
# x4 = TorsMole.getxyz(3)
#
# [alpha02, beta02] = SpecialAngle(x3,x4)
# TorsMole = TorsMole.rotate('y',-beta02)
#
# x2 = TorsMole.getxyz(1)
# x1 = TorsMole.getxyz(0)
#
# [alpha03, beta03] = SpecialAngle(x2,x1)
# result = -beta03*180.0/pi
return result
| {
"repo_name": "sethbrin/QM",
"path": "version1/python2/lib_atomcoords.py",
"copies": "2",
"size": "9170",
"license": "mit",
"hash": 3693669950180911000,
"line_mean": 26.4550898204,
"line_max": 120,
"alpha_frac": 0.5242093784,
"autogenerated": false,
"ratio": 2.578740157480315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4102949535880315,
"avg_score": null,
"num_lines": null
} |
# Copyright (c) 2012, 2015 Cisco Systems, Inc. and others. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
import binascii
import struct
import logging
_LOGGER = logging.getLogger('pcepy.message')
class Bits(object):
"""A fixed single field - an atomic value occupying a continuous sequence
of bits inside a Block"""
# Default default value
_value = None
# Size of field, possibly fixed by subclass
_size = None
def __init__(self, **kwargs):
super(Bits, self).__init__()
self.name = kwargs.get('name')
self._offset = kwargs.get('offset', 0)
if self._size is None:
self._size = kwargs['size']
# Default value
if 'value' in kwargs:
self._value = kwargs['value']
# The block class containing this field
self._block = None
def __get__(self, instance, block):
"""Current value for instance"""
if instance is None:
return self
else:
return instance.__dict__.get(self._ikey, self._value)
def __set__(self, instance, value):
"""Set value for instance"""
instance.__dict__[self._ikey] = self._set_value(value)
def _set_value(self, value):
"""Convert value-to-be-set to stored value"""
return value
def __delete__(self, instance):
"""Delete value in instance (effectively resets to default)"""
if self._ikey in instance.__dict__:
del instance.__dict__[self._ikey]
def _str_value(self, value):
"""Convert value to human-readable representation"""
return str(value)
def str(self, instance):
"""Get human-readable value in instance"""
return '%s: %s' % (
self._name,
self._str_value(self.__get__(instance, type(instance)))
)
def read(self, buf, off, instance):
"""Read value of this field for Block instance from buffer buf.
where the block starts at byte off and has network byte order.
"""
raise NotImplementedError()
def write(self, buf, off, instance):
"""Write value of this field for Block instance into buffer buf.
where the block starts at byte off and has network byte order.
The buffer must be pre-filled with NULs to the correct size.
"""
raise NotImplementedError()
@property
def offset(self):
"""The big-endian bit offset within owners block"""
return self._offset
@property
def size(self):
"""The number of bits occupied"""
return self._size
@property
def name(self):
"""The unique name for its block, and possibly higher.
By default, it is set to its identifier in the block."""
return self._name
@name.setter
def name(self, name):
self._name = name
self._ikey = None if not name else '_bits_%s' % name
def __lt__(self, other):
return self._name < other.name
class Flag(Bits):
_value = False
_size = 1
def __init__(self, **kwargs):
if 'value' not in kwargs:
kwargs['value'] = False
super(Flag, self).__init__(**kwargs)
def _set_value(self, value):
return bool(value)
def str(self, instance):
if instance.__dict__.get(self._ikey, self._value):
return self._name
return ''
def read(self, buf, off, instance):
off += self._offset // 8
bit = buf[off] & (1 << (7 - self._offset % 8))
instance.__dict__[self._ikey] = self._set_value(bit)
def write(self, buf, off, instance):
off += self._offset // 8
bit = 1 << (7 - self._offset % 8)
if instance.__dict__.get(self._ikey, self._value):
buf[off] |= bit
else:
# do not write a zero value
# buf[off] &= ~bit
pass
class Int(Bits):
_value = 0
def __init__(self, **kwargs):
super(Int, self).__init__(**kwargs)
self._sup = 1 << self._size
def _set_value(self, value):
value = int(value)
if value < 0:
raise ValueError("Value %d is negative for field %s" % (value, self._name))
if value >= self._sup:
raise ValueError("Value %d too large for field %s of bitlength %d" % (value, self._name, self._size))
return value
def read(self, buf, off, instance):
off += self._offset // 8
startbit = self._offset % 8
# how many bits do we occupy
span = startbit + self._size
value = 0
while span > 0:
value <<= 8
value |= buf[off]
span -= 8
off += 1
if span: # kill bits in last byte after us
value >>= -span
if startbit: # kill bits before startbit
value %= 1 << self._size
instance.__dict__[self._ikey] = value
def write(self, buf, off, instance):
off += self._offset // 8
startbit = self._offset % 8
# how many bits do we occupy
span = startbit + self._size
value = instance.__dict__.get(self._ikey, self._value)
if value == 0:
return
if value < 0:
_LOGGER.error("Value %d is negative for field %s" % (value, self._name))
value = 0
if value >= self._sup:
_LOGGER.error("Value %d too large for field %s of bitlength %d" % (value, self._name, self._size))
value %= self._sup
# copy bits from first byte
if startbit:
byte = buf[off] >> (8 - startbit)
byte <<= self._size
value |= byte
# move to last byte
off += span // 8
rest = span % 8
if not rest:
off -= 1
# copy bits from last byte
if rest:
rest = 8 - rest
value <<= rest
byte = buf[off]
# kill our bits
byte %= 1 << rest
value |= byte
# rewrite buffer
while span > 0:
buf[off] = value & 0xFF
value >>= 8
span -= 8
off -= 1
class Float(Bits):
_value = 0.0
_size = 32
def __init__(self, **kwargs):
if kwargs.get('offset') % 8:
raise ValueError('Floats must be byte-aligned')
super(Float, self).__init__(**kwargs)
def _set_value(self, value):
return float(value)
def _str_value(self, value):
return "%0.2f" % value
def read(self, buf, off, instance):
off += self._offset // 8
octets = bytes(buf[off:off+4])
value = struct.unpack(">f", octets)
instance.__dict__[self._ikey] = value
def write(self, buf, off, instance):
off += self._offset // 8
value = instance.__dict__.get(self._ikey, self._value)
if value == 0.0:
return
wbytes = struct.pack(">f", value)
buf[off:off+4] = wbytes
def _int_to_bytes(value, length):
"""Convert an int to the byte array it really represents."""
buf = bytearray(length)
for off in range(length):
buf[off] = value & 0xFF
value >>= 8
buf.reverse()
return buf
class Ipv4(Int):
"""An Int block representing an IPv4 address."""
_size = 32
def _str_value(self, value):
return to_hex(_int_to_bytes(value, self._size // 8))
class Ipv6(Int):
"""An Int block representing an IPv6 address."""
_size = 128
def _str_value(self, value):
return to_hex(_int_to_bytes(value, self._size // 8))
class Unset(Flag):
"""Bits added to each block to mark reserved/unassigned bits"""
def read(self, buf, off, instance):
super(Unset, self).read(buf, off, instance)
if instance.__dict__.get(self._ikey):
_LOGGER.warning("Reading at <%s>[%s]: Bit %s is set" % (id(buf), off, self._offset))
def write(self, buf, off, instance):
super(Unset, self).write(buf, off, instance)
if instance.__dict__.get(self._ikey):
_LOGGER.warning("Writing at <%s>[%s]: Bit %s is set" % (id(buf), off, self._offset))
def padlen(length, base):
"""Return the lowest multiple of base not smaller than length"""
rest = length % base
if rest:
length += base - rest
return length
def padded(buf, base=4, fill=b'\0', length=None):
"""Return buf padded with fill to length or to a multiple of base"""
original = len(buf)
if length is None:
length = padlen(original, base)
length -= original
assert length >= 0, (length, original, buf)
if length:
buf = buf + fill * length
return bytes(buf)
def to_hex(octets):
"""Return a human-readable hexadecimal representation of octets."""
as_hex = binascii.hexlify(bytes(octets))
items = list()
index = 0
lenhex = len(as_hex)
if lenhex == 0:
return ''
while index < lenhex:
if index % 8:
items.append(b' ')
else:
items.append(b'|')
end = index + 2
items.append(as_hex[index:end])
index = end
if lenhex % 8 == 0:
items.append(b'|')
return b''.join(items)
class SizeError(Exception):
"""Size Error Exception
Exception thrown when a Block refuses to read a buffer due to unsatisfied
size constraints. The calling function may then choose to replace this Block
with an Unknown Block of a shared superclass."""
pass
class _BlockMeta(type):
"""Metaclass for Block classes
Adds the _size, _bits attributes, and all Unset bits (named _unset_<offset>).
_size is the fixed size of the block, padded to _block_pad attribute (default 4).
_bits is a list of all Bits attributes defined for this block (including Unset)
"""
def __new__(mcs, name, bases, attrs):
"""Create new Block-based class"""
# Collect [assigned] Bits
assigned = set()
bits = list()
for attrname, attr in attrs.items():
if isinstance(attr, Bits):
assigned.update(
range(attr.offset, attr.offset + attr.size)
)
bits.append(attr)
if not attr.name:
attr.name = attrname
# Compute size
end = max(assigned) + 1 if assigned else 0
size = end // 8
if end % 8:
size += 1
pad = attrs.get('_block_pad', None)
if pad is None:
for base in bases:
pad = getattr(base, '_block_pad', None)
if pad is not None:
break
else:
pad = 4
size = padlen(size, pad)
# Add unset bits
for bit in range(size * 8):
if bit not in assigned:
unset = Unset(offset=bit, name='_unset_%d' % bit)
bits.append(unset)
attrs[unset.name] = unset
bits.sort(key=lambda item: item.name)
attrs['_size'] = size
attrs['_bits'] = bits
return super(_BlockMeta, mcs).__new__(mcs, name, bases, attrs)
# Kludge for python 2 and 3 syntax compatibility
_BlockBase = _BlockMeta('_BlockBase', (object, ), dict(
# updated by metaclass
_bits=None,
_size=None))
class Block(_BlockBase):
"""A group of Bits occupying a continuous sequence of bytes.
The current design does not allow a subclass of Block to have its own Bits
if the parent class does.
Blocks may be included inside parent blocks. Although not enforced by this
library, Blocks should not be shared among their parents, to prevent any
bugs or mix-ups.
Blocks may be unions of same-sized related structures; to this end, a field
with value equal to zero will not write this value to a buffer.
Conversely, all alternate values are read and set separately - do not write
a read block with a modified field without setting all others to zero.
Each block supports the clone method (and init parameter) that clone all
subblocks and copy all bits and related data.
"""
def clone(self):
"""Create a new instance of this Block, initialized by this instance."""
return type(self)(clone=self)
def __init__(self, clone=None):
super(Block, self).__init__()
if clone is not None:
cls = type(clone)
for bits in self._bits:
bits.__set__(self, bits.__get__(clone, cls))
def _get_size(self):
"""Redefinable getter for the size property.
Children may choose not to call any super implementations.
"""
return self._size
@property
def size(self):
"""The current size of the block. It must be padded to this Block's
padding and may be more than the padded size containing all its Bits.
It must be the actual size that will be written when serializing.
It is computed by _get_size(); this method should not refer to any Bits
containing only the reported size of items the Block comprises,
but must update them if necessary.
"""
return self._get_size()
def update(self, updates):
"""Update the fields contained by this block with a dict of values.
The dict may contain ignored keys that do not belong to this block.
Return number of fields that were updated.
"""
updated = 0
for bits in self._bits:
if bits.name in updates:
bits.__set__(self, updates[bits.name])
updated += 1
return updated
def read(self, buf, off, max_end):
"""Read the block from buf, starting at off, until max_end.
Return offset after this block - should equal and cannot exceed max_end.
Raise SizeError if number of bytes offered is wrong.
"""
end = off + self._size
if end > max_end:
_errmsg = ("Block[%s] cannot fit [%d:%d], it needs [%d:%d]" %
(self.__class__.__name__, off, max_end, off, end))
raise SizeError(_errmsg)
for bits in self._bits:
bits.read(buf, off, self)
return end
def write(self, buf, off):
"""Write the block into buf, starting at off.
Enough space pre-filled with NULs must be reserved for its size.
Return offset after this block."""
for bits in self._bits:
bits.write(buf, off, self)
return off + self._size
def __str__(self):
return self.show({})
def show(self, format, prefix=''):
unsets = list()
values = list()
self_type = type(self)
for bits in self._bits:
if isinstance(bits, Unset):
if bits.__get__(self, self_type):
unsets.append(bits.offset)
else:
values.append(bits.str(self))
if unsets:
unsets.sort()
values.append('UNSET: [%s]' % ' '.join([str(off) for off in unsets]))
separator = format.get('data', ', ')
prefix = prefix or ''
output = ''
for value in values:
if value:
output += prefix + value + separator
return output
# return prefix + separator.join(value for value in values if value)
class HeadBlock(Block):
"""Base class for Blocks that start with a fixed header
The header is a Block of its own and its size counts to the respective
HeadBlock's total size (as computed by the _get_size method).
The header may contain the total or partial size of the object; subclasses
should update this value in _get_size to ensure its correctness upon
sending. To report a false value when writing, set the report_length
property to a non-None value.
Though a HeadBlock is read and written from its header's offset,
all offsets of its own body start at 0.
Each subclass is responsible for reading its header.
A HeadBlock may also contain more than a header and its body; it is up
to a subclass to read and write additional data and track its length.
Header lengths need not be cloned. Reported lengths are never cloned.
"""
# the Block class for the header object
_header_class = None
@classmethod
def get_header(cls, clone=None):
return cls._header_class(clone=clone)
def __init__(self, clone=None):
super(HeadBlock, self).__init__(clone=clone)
self._header = self.get_header(
clone=None if clone is None else clone.header)
self._report_length = None
def _get_size(self):
return self._header.size + self._size
@property
def header(self):
return self._header
@property
def report_length(self):
"""The reported length when writing the Block."""
return self._report_length
@report_length.setter
def report_length(self, length):
if not hasattr(self._header, 'length'):
raise AttributeError('Header does not have length attribute')
self._report_length = length
def update(self, updates):
updated = (super(HeadBlock, self).update(updates) + self._header.update(updates))
if 'report_length' in updates:
self.report_length = updates['report_length']
updated += 1
return updated
def write(self, buf, off):
length = None
if self._report_length is not None:
length = self._header.length
_LOGGER.info('Block %s reporting length %s instead of %s' %
(self.__class__.__name__, self._report_length, length))
self._header.length = self._report_length
off = self._header.write(buf, off)
if length is not None:
self._header.length = length
return super(HeadBlock, self).write(buf, off)
def __str__(self):
return self.show({})
def show(self, format, prefix=''):
if self.__class__.__bases__[0].__name__ == 'Object':
separator = format.get('obj_sep', ' ')
elif self.__class__.__bases__[0].__name__ == 'Tlv' or self.__class__.__bases__[0].__name__ == 'Rsvp':
separator = format.get('item_sep', ' ')
else:
separator = ' '
n_prefix = None if prefix is None else prefix + ' '
prefix = prefix or ''
output = prefix + '=Data=' + separator
output += super(HeadBlock, self).show(format, n_prefix)
return output
class Blob(object):
"""Arbitrary data in place of any message Block."""
def __init__(self, octets=b'', size=None):
if size is not None:
octets = b'\0' * size
self.octets = octets
@property
def octets(self):
"""Data contained in the blob."""
return self._octets
@octets.setter
def octets(self, octets):
self._octets = octets
@property
def valid(self):
"""A blob is always invalid."""
return False
@property
def size(self):
"""Size of blob in bytes"""
return len(self.octets)
def update(self, updates):
if 'octets' in updates:
self.octets = updates['octets']
return 1
return 0
def read(self, buf, off, end):
"""Read blob from buffer and return end."""
_LOGGER.error("Reading blob in <%s>[%s:%s]" % (id(buf), off, end))
self.octets = buf[off:end]
return end
def write(self, buf, off):
end = off + len(self.octets)
_LOGGER.error("Writing blob to <%s>[%s:%s]" % (id(buf), off, end))
buf[off:end] = self.octets
return end
def __str__(self):
return 'Blob octets="%s"' % to_hex(self.octets)
| {
"repo_name": "bigdataops/bgpcep",
"path": "pcep/pcepy/message/data.py",
"copies": "2",
"size": "20032",
"license": "epl-1.0",
"hash": -4086094602018899500,
"line_mean": 30.2024922118,
"line_max": 113,
"alpha_frac": 0.5679412939,
"autogenerated": false,
"ratio": 4.0839959225280325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003259163686688898,
"num_lines": 642
} |
# Atomic Emission
# This is a Ruby program to determine the Atomic Emisssion details
# around Hydrogen. By exciting the electron into higher energy levels
# and measuring the Energy emitted afterwards when the electron returns
# to lower energy levels it is possible to determine whether the energy
# levels are characterized by realtively high, medium or low energy.
# First we wish to welcome the user and prompt them for their E value in
# Scientific Notation. This will be a bit tricky since we will prompt for the
# Numerical part and exponential part of the E value seperately then have the
# program assemble them into a single variable.
print('')
print('Welcome to the Amazing Atomic Emisssion application!')
print('This program will use the E value you measured and recorded in')
print('scientific Notation in order to determine the relative energy of')
print('the energy level transition along with also reporting the wavelength')
print('in meters and frequency in Hertz of the energy emission')
print('')
# First prompt for the significand, the inital number part of the scientific notation.
# and store the value as a variable named significand.
significand = input('Please input the significand value (the number at the beginning) of your E value and press return.')
# Now prompt for the exponential power of the sci notation ans store it as a variable.
exponent = input('PLease input the exponential power of your E value ( the number 10 is raised to ) and press return.')
# Use the data inputs from the user to assemble a value for E.
E = significand * 10**exponent
# Now we will declare some variables.
# We will begin by declaring Plank's Constant
h = 6.62606876 * 10**(-34)
# We will also declare the speed of light in m/s to be the variable C.
c = 299792458.0
# Now divide E by h and store the value as v. This value v represents
# the frequency or cycles per second of the energy wave emitted.
v = E/ h
# Divide C by v to determine the wavelength of a single cycle in meters.
# This value is then stored as the variable wavelength
wavelength = c / v
# Delaration of Visable Light and UV Limits for wavelength used to determine
# whether the energy emitted is low energy (IR), medium (vis light), or high (uv)
visLimit = 7 * 10**(-7)
uvLimit = 38 * 10**(-8)
# Also two lines to assign some ruby number formatting magic
# the two lines below will provide v and variable wavelength as floats in neat 3 digit scientific notation
vstring = ("%.2E" % (v))
lstring = ("%.2E" % (wavelength))
# Now run the comparisions and set variables based on results
if wavelength > visLimit:
type = 'Low' # A new block starts here this is the 'if' block
range = 'infared' # You can do lots of different things in a block
elif visLimit > wavelength and wavelength > uvLimit: # this is the "elseif" block it only runs when the
type = 'medium' # first "if" condition fails
range = 'Visable Light'
else: # This is the "else" block this block is excuted
type = 'High' # when the "if" condition and any "elseif" conditions
range = 'Ultra Violet'
# Return the data for the wavelength and frequency and type
print'' # These space only print statements are just for formatting
print('The frequency of the energy wave emitted was '+ vstring + ' Hertz (cycles/sec)')
print('')
print('The length of each wave was '+ lstring +'meters')
print('')
print('So based on the information above, it is determined')
print('that the energy emitted is ' + type + 'level in the' + range + ' range of the spectrum')
print('')
print('Thanks for using the Amazing Atomic Emisssion application!')
| {
"repo_name": "Jpowell10/firstrepo",
"path": "AtomicEmission.py",
"copies": "1",
"size": "3668",
"license": "cc0-1.0",
"hash": 2049667833132276700,
"line_mean": 47.9066666667,
"line_max": 121,
"alpha_frac": 0.7333696838,
"autogenerated": false,
"ratio": 3.856992639327024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5090362323127025,
"avg_score": null,
"num_lines": null
} |
# atomic model
import os
import logging
import cPickle as pickle
from collections import OrderedDict
import h5py
import numpy as np
import pandas as pd
from scipy import interpolate
from astropy import table, units, constants
from pandas import DataFrame
class AtomDataNotPreparedError(Exception):
pass
logger = logging.getLogger(__name__)
def data_path(fname):
return os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'data', fname)
default_atom_h5_path = data_path('atom_data.h5')
atomic_symbols_data = np.recfromtxt(data_path('atomic_symbols.dat'),
names=['atomic_number', 'symbol'])
symbol2atomic_number = OrderedDict(zip(atomic_symbols_data['symbol'],
atomic_symbols_data['atomic_number']))
atomic_number2symbol = OrderedDict(atomic_symbols_data)
@PendingDeprecationWarning
def read_atomic_data(fname=None):
return read_basic_atom_data(fname)
def read_hdf5_data(fname, dset_name):
"""This function reads the dataset (dset_name) from the hdf5 file (fname).
In addition it uses the attribute 'units' and parses it to the `~astropy.table.Table` constructor.
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
returns the respective
"""
h5_file = h5py.File(fname, 'r')
dataset = h5_file[dset_name]
data = np.asarray(dataset)
# data_units = dataset.attrs['units']
data_table = table.Table(data)
# for i, col_unit in enumerate(data_units):
# if col_unit == 'n':
# data_table.columns[i].units = None
# elif col_unit == '1':
# data_table.columns[i].units = units.Unit(1)
# else:
# data_table.columns[i].units = units.Unit(col_unit)
h5_file.close()
return data_table
def read_basic_atom_data(fname=None):
"""This function reads the atomic number, symbol, and mass from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], symbol, mass[u]
"""
data_table = read_hdf5_data(fname, 'basic_atom_data')
# data_table.columns['mass'] = units.Unit('u').to('g', data_table['mass'])
return data_table
def read_ionization_data(fname=None):
"""This function reads the atomic number, ion number, and ionization energy from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], ion[1], ionization_energy[eV]
.. note:: energy from unionized atoms to once-ionized atoms ion = 1, for once ionized
to twice ionized ion=2, etc.
"""
data_table = read_hdf5_data(fname, 'ionization_data')
#data_table.columns['ionization_energy'] = units.Unit('eV').to('erg', data_table.columns['ionization_energy'])
return data_table
def read_levels_data(fname=None):
"""This function reads atomic number, ion number, level_number, energy, g, metastable
information from hdf5 file.
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], ion[1], level_number, energy, g, metastable
"""
data_table = read_hdf5_data(fname, 'levels_data')
#data_table.columns['energy'].convert_units_to('erg')
#data_table.columns['energy'] = units.Unit('eV').to('erg', data_table.columns['energy'])
return data_table
def read_synpp_refs(fname):
data_table = h5py.File(fname, 'r')['synpp_refs']
return data_table.__array__()
def read_lines_data(fname=None):
"""
This function reads the wavelength, atomic number, ion number, f_ul, f_l and level id information
from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields wavelength, atomic_number, ion_number, f_ul, f_lu, level_id_lower, level_id_upper.
"""
data_table = read_hdf5_data(fname, 'lines_data')
#data_table.columns['ionization_energy'].convert_units_to('erg')
return data_table
def read_zeta_data(fname):
"""
This function reads the recombination coefficient data from the HDF5 file
:return:
"""
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname, 'r')
if 'zeta_data' not in h5_file.keys():
raise ValueError('zeta_data not available in this HDF5-data file. It can not be used with NebularAtomData')
zeta_data = h5_file['zeta_data']
t_rads = zeta_data.attrs['t_rad']
return pd.DataFrame(zeta_data[:,2:], index=pd.MultiIndex.from_arrays(zeta_data[:,:2].transpose().astype(int)),
columns=t_rads)
def read_collision_data(fname):
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname, 'r')
if 'collision_data' not in h5_file.keys():
raise ValueError('collision_data not available in this HDF5-data file. It can not be used with NLTE')
collision_data = np.array(h5_file['collision_data'])
collision_temperatures = h5_file['collision_data'].attrs['temperatures']
return collision_data, collision_temperatures
def read_ion_cx_data(fname):
try:
h5_file = h5py.File(fname, 'r')
ion_cx_th_data = h5_file['ionization_cx_threshold']
ion_cx_sp_data = h5_file['ionization_cx_support']
return ion_cx_th_data, ion_cx_sp_data
except IOError, err:
print(err.errno)
print(err)
logger.critical('Cannot import. Error opening the file to read ionization_cx')
def read_macro_atom_data(fname):
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname, 'r')
if 'macro_atom_data' not in h5_file.keys():
raise ValueError('Macro Atom Data (macro_atom_data) is not in this HDF5-data file. '
'It is needed for complex line interaction')
macro_atom_data = h5_file['macro_atom_data']
macro_atom_counts = h5_file['macro_atom_references']
return macro_atom_data, macro_atom_counts
class AtomData(object):
"""
Class for storing atomic data
AtomData
---------
Parameters
----------
basic_atom_data : `~astropy.table.Table`
containing the basic atom data: z, symbol, and mass
ionization_data : ~astropy.table.Table
containing the ionization data: z, ion, and ionization energy
::important to note here is that ion describes the final ion state
e.g. H I - H II is described with ion=2
levels : ~astropy.table.Table
containing the levels data: z, ion, level_number, energy, g
lines : ~astropy.table.Table
containing the lines data: wavelength, z, ion, levels_number_lower,
levels_number_upper, f_lu, f_ul
macro_atom_data : tuple of ~astropy.table.Table
default ~None, a tuple of the macro-atom data and macro-atom references
zeta_data : ~dict of interpolation objects
default ~None
"""
@classmethod
def from_hdf5(cls, fname=None):
"""
Function to read all the atom data from a special TARDIS HDF5 File.
Parameters
----------
fname: str, optional
the default for this is `None` and then it will use the very limited atomic_data shipped with TARDIS
For more complex atomic data please contact the authors.
use_macro_atom:
default `False`. Set to `True`, if you want to read in macro_atom_data
"""
if fname is None:
fname = default_atom_h5_path
if not os.path.exists(fname):
raise ValueError("Supplied Atomic Model Database %s does not exists" % fname)
atom_data = read_basic_atom_data(fname)
ionization_data = read_ionization_data(fname)
levels_data = read_levels_data(fname)
lines_data = read_lines_data(fname)
with h5py.File(fname, 'r') as h5_file:
h5_datasets = h5_file.keys()
if 'macro_atom_data' in h5_datasets:
macro_atom_data = read_macro_atom_data(fname)
else:
macro_atom_data = None
if 'zeta_data' in h5_datasets:
zeta_data = read_zeta_data(fname)
else:
zeta_data = None
if 'collision_data' in h5_datasets:
collision_data, collision_data_temperatures = read_collision_data(fname)
else:
collision_data, collision_data_temperatures = (None, None)
if 'synpp_refs' in h5_datasets:
synpp_refs = read_synpp_refs(fname)
else:
synpp_refs = None
if 'ion_cx_data' in h5_datasets and 'ion_cx_data' in h5_datasets:
ion_cx_data = read_ion_cx_data(fname)
else:
ion_cx_data = None
atom_data = cls(atom_data=atom_data, ionization_data=ionization_data, levels_data=levels_data,
lines_data=lines_data, macro_atom_data=macro_atom_data, zeta_data=zeta_data,
collision_data=(collision_data, collision_data_temperatures), synpp_refs=synpp_refs,
ion_cx_data=ion_cx_data)
with h5py.File(fname, 'r') as h5_file:
atom_data.uuid1 = h5_file.attrs['uuid1']
atom_data.md5 = h5_file.attrs['md5']
atom_data.version = h5_file.attrs.get('database_version', None)
if atom_data.version is not None:
atom_data.data_sources = pickle.loads(h5_file.attrs['data_sources'])
logger.info('Read Atom Data with UUID=%s and MD5=%s', atom_data.uuid1, atom_data.md5)
return atom_data
def __init__(self, atom_data, ionization_data, levels_data, lines_data, macro_atom_data=None, zeta_data=None,
collision_data=None, synpp_refs=None, ion_cx_data=None):
self.prepared = False
if levels_data is not None:
self.has_levels = True
else:
self.has_levels = False
if lines_data is not None:
self.has_lines = True
else:
self.has_lines = False
if macro_atom_data is not None:
self.has_macro_atom = True
self.macro_atom_data_all = DataFrame(macro_atom_data[0].__array__())
self.macro_atom_references_all = DataFrame(macro_atom_data[1].__array__())
else:
self.has_macro_atom = False
if ion_cx_data is not None:
self.has_ion_cx_data = True
#TODO:Farm a panda here
self.ion_cx_th_data = DataFrame(np.array(ion_cx_data[0]))
self.ion_cx_th_data.set_index(['atomic_number', 'ion_number', 'level_id'], inplace=True)
self.ion_cx_sp_data = DataFrame(np.array(ion_cx_data[1]))
self.ion_cx_sp_data.set_index(['atomic_number', 'ion_number', 'level_id'])
else:
self.has_ion_cx_data = False
if zeta_data is not None:
self.zeta_data = zeta_data
self.has_zeta_data = True
else:
self.has_zeta_data = False
if collision_data[0] is not None:
self.collision_data = DataFrame(collision_data[0])
self.collision_data_temperatures = collision_data[1]
self.collision_data.set_index(['atomic_number', 'ion_number', 'level_number_lower', 'level_number_upper'],
inplace=True)
self.has_collision_data = True
else:
self.has_collision_data = False
if synpp_refs is not None:
self.has_synpp_refs = True
self.synpp_refs = pd.DataFrame(synpp_refs)
self.synpp_refs.set_index(['atomic_number', 'ion_number'], inplace=True)
else:
self.has_synpp_refs = False
self.atom_data = DataFrame(atom_data.__array__())
self.atom_data.set_index('atomic_number', inplace=True)
# We have to use constants.u because astropy uses different values for the unit u and the constant.
# This is changed in later versions of astropy (the value of constants.u is used in all cases)
if units.u.cgs == constants.u.cgs:
self.atom_data.mass = units.Quantity(self.atom_data.mass.values, 'u').cgs
else:
self.atom_data.mass = constants.u.cgs * self.atom_data.mass.values
self.ionization_data = DataFrame(ionization_data.__array__())
self.ionization_data.set_index(['atomic_number', 'ion_number'], inplace=True)
self.ionization_data.ionization_energy = units.Quantity(self.ionization_data.ionization_energy.values, 'eV').cgs
self.levels = DataFrame(levels_data.__array__())
self.levels.energy = units.Quantity(self.levels.energy.values, 'eV').cgs
self.lines = DataFrame(lines_data.__array__())
self.lines['nu'] = units.Quantity(self.lines['wavelength'], 'angstrom').to('Hz', units.spectral())
self.lines['wavelength_cm'] = units.Quantity(self.lines['wavelength'], 'angstrom').cgs
#tmp_lines_index = pd.MultiIndex.from_arrays(self.lines)
#self.lines_inde
self.symbol2atomic_number = OrderedDict(zip(self.atom_data['symbol'].values, self.atom_data.index))
self.atomic_number2symbol = OrderedDict(zip(self.atom_data.index, self.atom_data['symbol']))
self.ion_cx_data = ion_cx_data
def prepare_atom_data(self, selected_atomic_numbers, line_interaction_type='scatter', max_ion_number=None,
nlte_species=[]):
"""
Prepares the atom data to set the lines, levels and if requested macro atom data.
This function mainly cuts the `levels` and `lines` by discarding any data that is not needed (any data
for atoms that are not needed
Parameters
----------
selected_atoms : `~set`
set of selected atom numbers, e.g. set([14, 26])
line_interaction_type : `~str`
can be 'scatter', 'downbranch' or 'macroatom'
max_ion_number : `~int`
maximum ion number to be included in the calculation
"""
if not self.prepared:
self.prepared = True
else:
raise AtomDataNotPreparedError("AtomData was already prepared")
self.selected_atomic_numbers = selected_atomic_numbers
self.nlte_species = nlte_species
self.levels = self.levels.reset_index(drop=True)
self.levels = self.levels[self.levels['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.levels = self.levels[self.levels['ion_number'] <= max_ion_number]
self.levels = self.levels.set_index(['atomic_number', 'ion_number', 'level_number'])
self.levels_index = pd.Series(np.arange(len(self.levels), dtype=int), index=self.levels.index)
#cutting levels_lines
self.lines = self.lines[self.lines['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.lines = self.lines[self.lines['ion_number'] <= max_ion_number]
# self.lines.sort(['wavelength', 'line_id'], inplace=True)
self.lines.sort(['wavelength'], inplace=True)
self.lines.set_index('line_id', inplace=True)
self.lines_index = pd.Series(np.arange(len(self.lines), dtype=int), index=self.lines.index)
tmp_lines_lower2level_idx = pd.MultiIndex.from_arrays([self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_lower']])
self.lines_lower2level_idx = self.levels_index.ix[tmp_lines_lower2level_idx].values.astype(np.int64)
tmp_lines_upper2level_idx = pd.MultiIndex.from_arrays([self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_upper']])
self.lines_upper2level_idx = self.levels_index.ix[tmp_lines_upper2level_idx].values.astype(np.int64)
self.atom_ion_index = None
self.levels_index2atom_ion_index = None
if self.has_macro_atom and not (line_interaction_type == 'scatter'):
self.macro_atom_data = self.macro_atom_data_all[
self.macro_atom_data_all['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.macro_atom_data = self.macro_atom_data[self.macro_atom_data['ion_number'] <= max_ion_number]
self.macro_atom_references = self.macro_atom_references_all[
self.macro_atom_references_all['atomic_number'].isin(
self.selected_atomic_numbers)]
if max_ion_number is not None:
self.macro_atom_references = self.macro_atom_references[
self.macro_atom_references['ion_number'] <= max_ion_number]
if line_interaction_type == 'downbranch':
self.macro_atom_data = self.macro_atom_data[(self.macro_atom_data['transition_type'] == -1).values]
self.macro_atom_references = self.macro_atom_references[self.macro_atom_references['count_down'] > 0]
self.macro_atom_references['count_total'] = self.macro_atom_references['count_down']
self.macro_atom_references['block_references'] = np.hstack((0,
np.cumsum(self.macro_atom_references[
'count_down'].values[:-1])))
elif line_interaction_type == 'macroatom':
block_references = np.hstack((0, np.cumsum(
self.macro_atom_references['count_total'].values[:-1])))
self.macro_atom_references.insert(len(
self.macro_atom_references.columns), 'block_references',
pd.Series(block_references,
index=self.macro_atom_references.index))
self.macro_atom_references.set_index(['atomic_number', 'ion_number', 'source_level_number'], inplace=True)
self.macro_atom_references.insert(len(
self.macro_atom_references.columns), 'references_idx',
pd.Series(np.arange(len(self.macro_atom_references)),
index=self.macro_atom_references.index))
self.macro_atom_data.insert(len(
self.macro_atom_data.columns), 'lines_idx',
pd.Series(self.lines_index.ix[self.macro_atom_data[
'transition_line_id']].values,
index=self.macro_atom_data.index))
tmp_lines_upper2level_idx = pd.MultiIndex.from_arrays(
[self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_upper']])
self.lines_upper2macro_reference_idx = self.macro_atom_references['references_idx'].ix[
tmp_lines_upper2level_idx].values.astype(np.int64)
tmp_macro_destination_level_idx = pd.MultiIndex.from_arrays([self.macro_atom_data['atomic_number'],
self.macro_atom_data['ion_number'],
self.macro_atom_data[
'destination_level_number']])
if line_interaction_type == 'macroatom':
#Sets all
self.macro_atom_data.insert(len(
self.macro_atom_data.columns), 'destination_level_idx',
pd.Series(self.macro_atom_references['references_idx'].ix[
tmp_macro_destination_level_idx].values.astype(
np.int64), index=self.macro_atom_data.index))
elif line_interaction_type == 'downbranch':
# Sets all the destination levels to -1 to indicate that they
# are not used in downbranch calculations
self.macro_atom_data.loc[:, 'destination_level_idx'] = (
np.ones(len(self.macro_atom_data)) * -1).astype(np.int64)
self.nlte_data = NLTEData(self, nlte_species)
def __repr__(self):
return "<Atomic Data UUID=%s MD5=%s Lines=%d Levels=%d>" % \
(self.uuid1, self.md5, self.lines.atomic_number.count(), self.levels.energy.count())
class NLTEData(object):
def __init__(self, atom_data, nlte_species):
self.atom_data = atom_data
self.lines = atom_data.lines.reset_index(drop=True)
self.nlte_species = nlte_species
if nlte_species:
logger.info('Preparing the NLTE data')
self._init_indices()
self._create_nlte_mask()
if atom_data.has_collision_data:
self._create_collision_coefficient_matrix()
else:
self._create_nlte_mask()
def _init_indices(self):
self.lines_idx = {}
self.lines_level_number_lower = {}
self.lines_level_number_upper = {}
self.A_uls = {}
self.B_uls = {}
self.B_lus = {}
for species in self.nlte_species:
lines_idx = np.where((self.lines.atomic_number == species[0]) &
(self.lines.ion_number == species[1]))
self.lines_idx[species] = lines_idx
self.lines_level_number_lower[species] = self.lines.level_number_lower.values[lines_idx].astype(int)
self.lines_level_number_upper[species] = self.lines.level_number_upper.values[lines_idx].astype(int)
self.A_uls[species] = self.atom_data.lines.A_ul.values[lines_idx]
self.B_uls[species] = self.atom_data.lines.B_ul.values[lines_idx]
self.B_lus[species] = self.atom_data.lines.B_lu.values[lines_idx]
def _create_nlte_mask(self):
self.nlte_levels_mask = np.zeros(self.atom_data.levels.energy.count()).astype(bool)
self.nlte_lines_mask = np.zeros(self.atom_data.lines.wavelength.count()).astype(bool)
for species in self.nlte_species:
current_levels_mask = (self.atom_data.levels.index.get_level_values(0) == species[0]) & \
(self.atom_data.levels.index.get_level_values(1) == species[1])
current_lines_mask = (self.atom_data.lines.atomic_number.values == species[0]) & \
(self.atom_data.lines.ion_number.values == species[1])
self.nlte_levels_mask |= current_levels_mask
self.nlte_lines_mask |= current_lines_mask
def _create_collision_coefficient_matrix(self):
self.C_ul_interpolator = {}
self.delta_E_matrices = {}
self.g_ratio_matrices = {}
collision_group = self.atom_data.collision_data.groupby(level=['atomic_number', 'ion_number'])
for species in self.nlte_species:
no_of_levels = self.atom_data.levels.ix[species].energy.count()
C_ul_matrix = np.zeros((no_of_levels, no_of_levels, len(self.atom_data.collision_data_temperatures)))
delta_E_matrix = np.zeros((no_of_levels, no_of_levels))
g_ratio_matrix = np.zeros((no_of_levels, no_of_levels))
for (atomic_number, ion_number, level_number_lower, level_number_upper), line in \
collision_group.get_group(species).iterrows():
C_ul_matrix[level_number_lower, level_number_upper, :] = line.values[2:]
delta_E_matrix[level_number_lower, level_number_upper] = line['delta_e']
#TODO TARDISATOMIC fix change the g_ratio to be the otherway round - I flip them now here.
g_ratio_matrix[level_number_lower, level_number_upper] = line['g_ratio']
self.C_ul_interpolator[species] = interpolate.interp1d(self.atom_data.collision_data_temperatures,
C_ul_matrix)
self.delta_E_matrices[species] = delta_E_matrix
self.g_ratio_matrices[species] = g_ratio_matrix
def get_collision_matrix(self, species, t_electrons):
c_ul_matrix = self.C_ul_interpolator[species](t_electrons)
no_of_levels = c_ul_matrix.shape[0]
c_ul_matrix[np.isnan(c_ul_matrix)] = 0.0
#TODO in tardisatomic the g_ratio is the other way round - here I'll flip it in prepare_collision matrix
c_lu_matrix = c_ul_matrix * np.exp(-self.delta_E_matrices[species].reshape((no_of_levels, no_of_levels, 1)) /
t_electrons.reshape((1, 1, t_electrons.shape[0]))) * \
self.g_ratio_matrices[species].reshape((no_of_levels, no_of_levels, 1))
return c_ul_matrix + c_lu_matrix.transpose(1, 0, 2)
| {
"repo_name": "orbitfold/tardis",
"path": "tardis/atomic.py",
"copies": "2",
"size": "25864",
"license": "bsd-3-clause",
"hash": -1458541272111465500,
"line_mean": 37.317037037,
"line_max": 120,
"alpha_frac": 0.599713888,
"autogenerated": false,
"ratio": 3.678566349025743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5278280237025743,
"avg_score": null,
"num_lines": null
} |
# atomic model
import logging
import numpy as np
import pandas as pd
from scipy import interpolate
from collections import OrderedDict
from astropy import units as u
from tardis import constants as const
from astropy.units import Quantity
from tardis.io.atom_data.util import resolve_atom_data_fname
class AtomDataNotPreparedError(Exception):
pass
class AtomDataMissingError(Exception):
pass
logger = logging.getLogger(__name__)
class AtomData(object):
"""
Class for storing atomic data
Parameters
----------
atom_data : pandas.DataFrame
A DataFrame containing the *basic atomic data* with:
index : atomic_number
columns : symbol, name, mass[u].
ionization_data : pandas.DataFrame
A DataFrame containing the *ionization data* with:
index : atomic_number, ion_number
columns : ionization_energy[eV].
It is important to note here is that `ion_number` describes the *final ion state*
e.g. H I - H II is described with ion=1
levels : pandas.DataFrame
A DataFrame containing the *levels data* with:
index : numerical index
columns : atomic_number, ion_number, level_number, energy[eV], g[1], metastable.
lines : pandas.DataFrame
A DataFrame containing the *lines data* with:
index : numerical index
columns : line_id, atomic_number, ion_number, level_number_lower, level_number_upper,
wavelength[angstrom], nu[Hz], f_lu[1], f_ul[1], B_ul[?], B_ul[?], A_ul[1/s].
macro_atom_data :
A DataFrame containing the *macro atom data* with:
index : numerical index
columns : atomic_number, ion_number, source_level_number, destination_level_number,
transition_line_id, transition_type, transition_probability;
macro_atom_references :
A DataFrame containing the *macro atom references* with:
index : numerical index
columns : atomic_number, ion_number, source_level_number, count_down, count_up, count_total.
Refer to the docs: http://tardis.readthedocs.io/en/latest/physics/plasma/macroatom.html
collision_data : (pandas.DataFrame, np.array)
A DataFrame containing the *electron collisions data* with:
index : atomic_number, ion_number, level_number_lower, level_number_upper
columns : e_col_id, delta_e, g_ratio, c_ul;
collision_data_temperatures : np.array
An array with the collision temperatures.
zeta_data :
A DataFrame containing the *zeta data* for the
nebular ionization calculation
(i.e., the fraction of recombinations that go directly to the
ground state) with:
index : atomic_number, ion_charge
columns : temperatures[K]
synpp_refs : ?
photoionization_data : pandas.DataFrame
A DataFrame containing the *photoionization data* with:
index : numerical index
columns : atomic_number, ion_number, level_number, nu[Hz], x_sect[cm^2]
two_photon_data : pandas.DataFrame
A DataFrame containing the *two photon decay data* with:
index: atomic_number, ion_number, level_number_lower, level_number_upper
columns: A_ul[1/s], nu0[Hz], alpha, beta, gamma
Attributes
----------
prepared : bool
atom_data : pandas.DataFrame
ionization_data : pandas.DataFrame
macro_atom_data_all : pandas.DataFrame
macro_atom_references_all : pandas.DataFrame
collision_data : pandas.DataFrame
collision_data_temperatures : numpy.array
zeta_data : pandas.DataFrame
synpp_refs : pandas.DataFrame
symbol2atomic_number : OrderedDict
atomic_number2symbol : OrderedDict
photoionization_data : pandas.DataFrame
two_photon_data : pandas.DataFrame
Methods
-------
from_hdf
prepare_atom_data
Notes
-----
1. The units of some columns are given in the square brackets. They are **NOT** the parts of columns' names!
"""
hdf_names = [
"atom_data",
"ionization_data",
"levels",
"lines",
"macro_atom_data",
"macro_atom_references",
"zeta_data",
"collision_data",
"collision_data_temperatures",
"synpp_refs",
"photoionization_data",
"yg_data",
"two_photon_data",
]
# List of tuples of the related dataframes.
# Either all or none of the related dataframes must be given
related_groups = [
("macro_atom_data_all", "macro_atom_references_all"),
("collision_data", "collision_data_temperatures"),
]
@classmethod
def from_hdf(cls, fname=None):
"""
Function to read the atom data from a TARDIS atom HDF Store
Parameters
----------
fname : str, optional
Path to the HDFStore file or name of known atom data file
(default: None)
"""
dataframes = dict()
nonavailable = list()
fname = resolve_atom_data_fname(fname)
with pd.HDFStore(fname, "r") as store:
for name in cls.hdf_names:
try:
dataframes[name] = store[name]
except KeyError:
nonavailable.append(name)
atom_data = cls(**dataframes)
try:
atom_data.uuid1 = store.root._v_attrs["uuid1"].decode("ascii")
except KeyError:
atom_data.uuid1 = None
try:
atom_data.md5 = store.root._v_attrs["md5"].decode("ascii")
except KeyError:
atom_data.md5 = None
try:
atom_data.version = store.root._v_attrs["database_version"]
except KeyError:
atom_data.version = None
# ToDo: strore data sources as attributes in carsus
logger.info(
f"\n\tReading Atom Data with:\n\tUUID = {atom_data.uuid1}\n\tMD5 = {atom_data.md5} "
)
if nonavailable:
logger.info(
f'\n\tNon provided atomic data:\n\t{", ".join(nonavailable)}'
)
return atom_data
def __init__(
self,
atom_data,
ionization_data,
levels=None,
lines=None,
macro_atom_data=None,
macro_atom_references=None,
zeta_data=None,
collision_data=None,
collision_data_temperatures=None,
synpp_refs=None,
photoionization_data=None,
yg_data=None,
two_photon_data=None,
):
self.prepared = False
# CONVERT VALUES TO CGS UNITS
# Convert atomic masses to CGS
# We have to use constants.u because astropy uses
# different values for the unit u and the constant.
# This is changed in later versions of astropy (
# the value of constants.u is used in all cases)
if u.u.cgs == const.u.cgs:
atom_data.loc[:, "mass"] = Quantity(
atom_data["mass"].values, "u"
).cgs
else:
atom_data.loc[:, "mass"] = atom_data["mass"].values * const.u.cgs
# Convert ionization energies to CGS
ionization_data = ionization_data.squeeze()
ionization_data[:] = Quantity(ionization_data[:], "eV").cgs
# Convert energy to CGS
levels.loc[:, "energy"] = Quantity(levels["energy"].values, "eV").cgs
# Create a new columns with wavelengths in the CGS units
lines["wavelength_cm"] = Quantity(lines["wavelength"], "angstrom").cgs
# SET ATTRIBUTES
self.atom_data = atom_data
self.ionization_data = ionization_data
self.levels = levels
self.lines = lines
# Rename these (drop "_all") when `prepare_atom_data` is removed!
self.macro_atom_data_all = macro_atom_data
self.macro_atom_references_all = macro_atom_references
self.zeta_data = zeta_data
self.collision_data = collision_data
self.collision_data_temperatures = collision_data_temperatures
self.synpp_refs = synpp_refs
self.photoionization_data = photoionization_data
self.yg_data = yg_data
self.two_photon_data = two_photon_data
self._check_related()
self.symbol2atomic_number = OrderedDict(
zip(self.atom_data["symbol"].values, self.atom_data.index)
)
self.atomic_number2symbol = OrderedDict(
zip(self.atom_data.index, self.atom_data["symbol"])
)
def _check_related(self):
"""
Check that either all or none of the related dataframes are given.
"""
for group in self.related_groups:
check_list = [name for name in group if getattr(self, name) is None]
if len(check_list) != 0 and len(check_list) != len(group):
raise AtomDataMissingError(
f'The following dataframes from the related group [{", ".join(group)}]'
f'were not given: {", ".join(check_list)}'
)
def prepare_atom_data(
self,
selected_atomic_numbers,
line_interaction_type="scatter",
nlte_species=[],
):
"""
Prepares the atom data to set the lines, levels and if requested macro
atom data. This function mainly cuts the `levels` and `lines` by
discarding any data that is not needed (any data for atoms that are not
needed
Parameters
----------
selected_atoms : set
set of selected atom numbers, e.g. set([14, 26])
line_interaction_type : str
can be 'scatter', 'downbranch' or 'macroatom'
"""
if not self.prepared:
self.prepared = True
else:
raise AtomDataNotPreparedError("AtomData was already prepared")
self.selected_atomic_numbers = selected_atomic_numbers
self._check_selected_atomic_numbers()
self.nlte_species = nlte_species
self.levels = self.levels[
self.levels.index.isin(
self.selected_atomic_numbers, level="atomic_number"
)
]
self.levels_index = pd.Series(
np.arange(len(self.levels), dtype=int), index=self.levels.index
)
# cutting levels_lines
self.lines = self.lines[
self.lines.index.isin(
self.selected_atomic_numbers, level="atomic_number"
)
]
self.lines.sort_values(by="wavelength", inplace=True)
self.lines_index = pd.Series(
np.arange(len(self.lines), dtype=int),
index=self.lines.set_index("line_id").index,
)
tmp_lines_lower2level_idx = self.lines.index.droplevel(
"level_number_upper"
)
self.lines_lower2level_idx = (
self.levels_index.loc[tmp_lines_lower2level_idx]
.astype(np.int64)
.values
)
tmp_lines_upper2level_idx = self.lines.index.droplevel(
"level_number_lower"
)
self.lines_upper2level_idx = (
self.levels_index.loc[tmp_lines_upper2level_idx]
.astype(np.int64)
.values
)
if (
self.macro_atom_data_all is not None
and not line_interaction_type == "scatter"
):
self.macro_atom_data = self.macro_atom_data_all.loc[
self.macro_atom_data_all["atomic_number"].isin(
self.selected_atomic_numbers
)
].copy()
self.macro_atom_references = self.macro_atom_references_all[
self.macro_atom_references_all.index.isin(
self.selected_atomic_numbers, level="atomic_number"
)
].copy()
if line_interaction_type == "downbranch":
self.macro_atom_data = self.macro_atom_data.loc[
self.macro_atom_data["transition_type"] == -1
]
self.macro_atom_references = self.macro_atom_references.loc[
self.macro_atom_references["count_down"] > 0
]
self.macro_atom_references.loc[
:, "count_total"
] = self.macro_atom_references["count_down"]
self.macro_atom_references.loc[
:, "block_references"
] = np.hstack(
(
0,
np.cumsum(
self.macro_atom_references["count_down"].values[:-1]
),
)
)
elif line_interaction_type == "macroatom":
self.macro_atom_references.loc[
:, "block_references"
] = np.hstack(
(
0,
np.cumsum(
self.macro_atom_references["count_total"].values[
:-1
]
),
)
)
self.macro_atom_references.loc[:, "references_idx"] = np.arange(
len(self.macro_atom_references)
)
self.macro_atom_data.loc[:, "lines_idx"] = self.lines_index.loc[
self.macro_atom_data["transition_line_id"]
].values
self.lines_upper2macro_reference_idx = (
self.macro_atom_references.loc[
tmp_lines_upper2level_idx, "references_idx"
]
.astype(np.int64)
.values
)
if line_interaction_type == "macroatom":
# Sets all
tmp_macro_destination_level_idx = pd.MultiIndex.from_arrays(
[
self.macro_atom_data["atomic_number"],
self.macro_atom_data["ion_number"],
self.macro_atom_data["destination_level_number"],
]
)
tmp_macro_source_level_idx = pd.MultiIndex.from_arrays(
[
self.macro_atom_data["atomic_number"],
self.macro_atom_data["ion_number"],
self.macro_atom_data["source_level_number"],
]
)
self.macro_atom_data.loc[:, "destination_level_idx"] = (
self.macro_atom_references.loc[
tmp_macro_destination_level_idx, "references_idx"
]
.astype(np.int64)
.values
)
self.macro_atom_data.loc[:, "source_level_idx"] = (
self.macro_atom_references.loc[
tmp_macro_source_level_idx, "references_idx"
]
.astype(np.int64)
.values
)
elif line_interaction_type == "downbranch":
# Sets all the destination levels to -1 to indicate that they
# are not used in downbranch calculations
self.macro_atom_data.loc[:, "destination_level_idx"] = -1
if self.yg_data is not None:
self.yg_data = self.yg_data.loc[self.selected_atomic_numbers]
self.nlte_data = NLTEData(self, nlte_species)
def _check_selected_atomic_numbers(self):
selected_atomic_numbers = self.selected_atomic_numbers
available_atomic_numbers = np.unique(
self.ionization_data.index.get_level_values(0)
)
atomic_number_check = np.isin(
selected_atomic_numbers, available_atomic_numbers
)
if not all(atomic_number_check):
missing_atom_mask = np.logical_not(atomic_number_check)
missing_atomic_numbers = selected_atomic_numbers[missing_atom_mask]
missing_numbers_str = ",".join(missing_atomic_numbers.astype("str"))
msg = f"For atomic numbers {missing_numbers_str} there is no atomic data."
raise AtomDataMissingError(msg)
def __repr__(self):
return f"<Atomic Data UUID={self.uuid1} MD5={self.md5} Lines={self.lines.line_id.count():d} Levels={self.levels.energy.count():d}>"
class NLTEData(object):
def __init__(self, atom_data, nlte_species):
self.atom_data = atom_data
self.lines = atom_data.lines.reset_index()
self.nlte_species = nlte_species
if nlte_species:
logger.info("Preparing the NLTE data")
self._init_indices()
if atom_data.collision_data is not None:
self._create_collision_coefficient_matrix()
def _init_indices(self):
self.lines_idx = {}
self.lines_level_number_lower = {}
self.lines_level_number_upper = {}
self.A_uls = {}
self.B_uls = {}
self.B_lus = {}
for species in self.nlte_species:
lines_idx = np.where(
(self.lines.atomic_number == species[0])
& (self.lines.ion_number == species[1])
)
self.lines_idx[species] = lines_idx
self.lines_level_number_lower[
species
] = self.lines.level_number_lower.values[lines_idx].astype(int)
self.lines_level_number_upper[
species
] = self.lines.level_number_upper.values[lines_idx].astype(int)
self.A_uls[species] = self.atom_data.lines.A_ul.values[lines_idx]
self.B_uls[species] = self.atom_data.lines.B_ul.values[lines_idx]
self.B_lus[species] = self.atom_data.lines.B_lu.values[lines_idx]
def _create_collision_coefficient_matrix(self):
self.C_ul_interpolator = {}
self.delta_E_matrices = {}
self.g_ratio_matrices = {}
collision_group = self.atom_data.collision_data.groupby(
level=["atomic_number", "ion_number"]
)
for species in self.nlte_species:
no_of_levels = self.atom_data.levels.loc[species].energy.count()
C_ul_matrix = np.zeros(
(
no_of_levels,
no_of_levels,
len(self.atom_data.collision_data_temperatures),
)
)
delta_E_matrix = np.zeros((no_of_levels, no_of_levels))
g_ratio_matrix = np.zeros((no_of_levels, no_of_levels))
for (
(
atomic_number,
ion_number,
level_number_lower,
level_number_upper,
),
line,
) in collision_group.get_group(species).iterrows():
# line.columns : delta_e, g_ratio, temperatures ...
C_ul_matrix[
level_number_lower, level_number_upper, :
] = line.values[2:]
delta_E_matrix[level_number_lower, level_number_upper] = line[
"delta_e"
]
# TODO TARDISATOMIC fix change the g_ratio to be the otherway round - I flip them now here.
g_ratio_matrix[level_number_lower, level_number_upper] = (
1 / line["g_ratio"]
)
self.C_ul_interpolator[species] = interpolate.interp1d(
self.atom_data.collision_data_temperatures, C_ul_matrix
)
self.delta_E_matrices[species] = delta_E_matrix
self.g_ratio_matrices[species] = g_ratio_matrix
def get_collision_matrix(self, species, t_electrons):
"""
Creat collision matrix by interpolating the C_ul values for
the desired temperatures.
"""
c_ul_matrix = self.C_ul_interpolator[species](t_electrons)
no_of_levels = c_ul_matrix.shape[0]
c_ul_matrix[np.isnan(c_ul_matrix)] = 0.0
# TODO in tardisatomic the g_ratio is the other way round - here I'll flip it in prepare_collision matrix
c_lu_matrix = (
c_ul_matrix
* np.exp(
-self.delta_E_matrices[species].reshape(
(no_of_levels, no_of_levels, 1)
)
/ t_electrons.reshape((1, 1, t_electrons.shape[0]))
)
* self.g_ratio_matrices[species].reshape(
(no_of_levels, no_of_levels, 1)
)
)
return c_ul_matrix + c_lu_matrix.transpose(1, 0, 2)
| {
"repo_name": "tardis-sn/tardis",
"path": "tardis/io/atom_data/base.py",
"copies": "1",
"size": "20609",
"license": "bsd-3-clause",
"hash": -1922457817749226000,
"line_mean": 33.4056761269,
"line_max": 139,
"alpha_frac": 0.550390606,
"autogenerated": false,
"ratio": 4.039396315170522,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5089786921170522,
"avg_score": null,
"num_lines": null
} |
# atomic model
#TODO revisit import statements and reorganize
from scipy import interpolate
import numpy as np
import logging
import os
import h5py
import cPickle as pickle
from astropy import table, units
from collections import OrderedDict
from pandas import DataFrame
import pandas as pd
logger = logging.getLogger(__name__)
default_atom_h5_path = os.path.join(os.path.dirname(__file__), 'data', 'atom_data.h5')
def data_path(fname):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, fname)
atomic_symbols_data = np.recfromtxt(data_path('atomic_symbols.dat'),
names=['atomic_number', 'symbol'])
symbol2atomic_number = OrderedDict(zip(atomic_symbols_data['symbol'], atomic_symbols_data['atomic_number']))
atomic_number2symbol = OrderedDict(atomic_symbols_data)
@PendingDeprecationWarning
def read_atomic_data(fname=None):
return read_basic_atom_data(fname)
def read_hdf5_data(fname, dset_name):
"""This function reads the dataset (dset_name) from the hdf5 file (fname).
In addition it uses the attribute 'units' and parses it to the `~astropy.table.Table` constructor.
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
returns the respective
"""
h5_file = h5py.File(fname, 'r')
dataset = h5_file[dset_name]
data = np.asarray(dataset)
# data_units = dataset.attrs['units']
data_table = table.Table(data)
# for i, col_unit in enumerate(data_units):
# if col_unit == 'n':
# data_table.columns[i].units = None
# elif col_unit == '1':
# data_table.columns[i].units = units.Unit(1)
# else:
# data_table.columns[i].units = units.Unit(col_unit)
h5_file.close()
return data_table
def read_basic_atom_data(fname=None):
"""This function reads the atomic number, symbol, and mass from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], symbol, mass[u]
"""
data_table = read_hdf5_data(fname, 'basic_atom_data')
# data_table.columns['mass'] = units.Unit('u').to('g', data_table['mass'])
return data_table
def read_ionization_data(fname=None):
"""This function reads the atomic number, ion number, and ionization energy from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], ion[1], ionization_energy[eV]
.. note:: energy from unionized atoms to once-ionized atoms ion = 1, for once ionized
to twice ionized ion=2, etc.
"""
data_table = read_hdf5_data(fname, 'ionization_data')
#data_table.columns['ionization_energy'] = units.Unit('eV').to('erg', data_table.columns['ionization_energy'])
return data_table
def read_levels_data(fname=None):
"""This function reads atomic number, ion number, level_number, energy, g, metastable
information from hdf5 file.
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], ion[1], level_number, energy, g, metastable
"""
data_table = read_hdf5_data(fname, 'levels_data')
#data_table.columns['energy'].convert_units_to('erg')
#data_table.columns['energy'] = units.Unit('eV').to('erg', data_table.columns['energy'])
return data_table
def read_synpp_refs(fname):
data_table = h5py.File(fname, 'r')['synpp_refs']
return data_table.__array__()
def read_lines_data(fname=None):
"""
This function reads the wavelength, atomic number, ion number, f_ul, f_l and level id information
from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields wavelength, atomic_number, ion_number, f_ul, f_lu, level_id_lower, level_id_upper.
"""
data_table = read_hdf5_data(fname, 'lines_data')
#data_table.columns['ionization_energy'].convert_units_to('erg')
return data_table
def read_zeta_data(fname):
"""
This function reads the recombination coefficient data from the HDF5 file
:return:
"""
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname, 'r')
if 'zeta_data' not in h5_file.keys():
raise ValueError('zeta_data not available in this HDF5-data file. It can not be used with NebularAtomData')
zeta_data = h5_file['zeta_data']
t_rads = zeta_data.attrs['t_rad']
return pd.DataFrame(zeta_data[:,2:], index=pd.MultiIndex.from_arrays(zeta_data[:,:2].transpose().astype(int)),
columns=t_rads)
def read_collision_data(fname):
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname, 'r')
if 'collision_data' not in h5_file.keys():
raise ValueError('collision_data not available in this HDF5-data file. It can not be used with NLTE')
collision_data = np.array(h5_file['collision_data'])
collision_temperatures = h5_file['collision_data'].attrs['temperatures']
return collision_data, collision_temperatures
def read_ion_cx_data(fname):
try:
h5_file = h5py.File(fname, 'r')
ion_cx_th_data = h5_file['ionization_cx_threshold']
ion_cx_sp_data = h5_file['ionization_cx_support']
return ion_cx_th_data, ion_cx_sp_data
except IOError, err:
print(err.errno)
print(err)
logger.critical('Cannot import. Error opening the file to read ionization_cx')
def read_macro_atom_data(fname):
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname, 'r')
if 'macro_atom_data' not in h5_file.keys():
raise ValueError('Macro Atom Data (macro_atom_data) is not in this HDF5-data file. '
'It is needed for complex line interaction')
macro_atom_data = h5_file['macro_atom_data']
macro_atom_counts = h5_file['macro_atom_references']
return macro_atom_data, macro_atom_counts
class AtomData(object):
"""
Class for storing atomic data
AtomData
---------
Parameters
----------
basic_atom_data : `~astropy.table.Table`
containing the basic atom data: z, symbol, and mass
ionization_data : ~astropy.table.Table
containing the ionization data: z, ion, and ionization energy
::important to note here is that ion describes the final ion state
e.g. H I - H II is described with ion=2
levels : ~astropy.table.Table
containing the levels data: z, ion, level_number, energy, g
lines : ~astropy.table.Table
containing the lines data: wavelength, z, ion, levels_number_lower,
levels_number_upper, f_lu, f_ul
macro_atom_data : tuple of ~astropy.table.Table
default ~None, a tuple of the macro-atom data and macro-atom references
zeta_data : ~dict of interpolation objects
default ~None
"""
@classmethod
def from_hdf5(cls, fname=None):
"""
Function to read all the atom data from a special TARDIS HDF5 File.
Parameters
----------
fname: str, optional
the default for this is `None` and then it will use the very limited atomic_data shipped with TARDIS
For more complex atomic data please contact the authors.
use_macro_atom:
default `False`. Set to `True`, if you want to read in macro_atom_data
"""
if fname is None:
fname = default_atom_h5_path
if not os.path.exists(fname):
raise ValueError("Supplied Atomic Model Database %s does not exists" % fname)
atom_data = read_basic_atom_data(fname)
ionization_data = read_ionization_data(fname)
levels_data = read_levels_data(fname)
lines_data = read_lines_data(fname)
with h5py.File(fname, 'r') as h5_file:
h5_datasets = h5_file.keys()
if 'macro_atom_data' in h5_datasets:
macro_atom_data = read_macro_atom_data(fname)
else:
macro_atom_data = None
if 'zeta_data' in h5_datasets:
zeta_data = read_zeta_data(fname)
else:
zeta_data = None
if 'collision_data' in h5_datasets:
collision_data, collision_data_temperatures = read_collision_data(fname)
else:
collision_data, collision_data_temperatures = (None, None)
if 'synpp_refs' in h5_datasets:
synpp_refs = read_synpp_refs(fname)
else:
synpp_refs = None
if 'ion_cx_data' in h5_datasets and 'ion_cx_data' in h5_datasets:
ion_cx_data = read_ion_cx_data(fname)
else:
ion_cx_data = None
atom_data = cls(atom_data=atom_data, ionization_data=ionization_data, levels_data=levels_data,
lines_data=lines_data, macro_atom_data=macro_atom_data, zeta_data=zeta_data,
collision_data=(collision_data, collision_data_temperatures), synpp_refs=synpp_refs,
ion_cx_data=ion_cx_data)
with h5py.File(fname, 'r') as h5_file:
atom_data.uuid1 = h5_file.attrs['uuid1']
atom_data.md5 = h5_file.attrs['md5']
atom_data.version = h5_file.attrs.get('database_version', None)
if atom_data.version is not None:
atom_data.data_sources = pickle.loads(h5_file.attrs['data_sources'])
logger.info('Read Atom Data with UUID=%s and MD5=%s', atom_data.uuid1, atom_data.md5)
return atom_data
def __init__(self, atom_data, ionization_data, levels_data, lines_data, macro_atom_data=None, zeta_data=None,
collision_data=None, synpp_refs=None, ion_cx_data=None):
if levels_data is not None:
self.has_levels = True
else:
self.has_levels = False
if lines_data is not None:
self.has_lines = True
else:
self.has_lines = False
if macro_atom_data is not None:
self.has_macro_atom = True
self.macro_atom_data_all = DataFrame(macro_atom_data[0].__array__())
self.macro_atom_references_all = DataFrame(macro_atom_data[1].__array__())
else:
self.has_macro_atom = False
if ion_cx_data is not None:
self.has_ion_cx_data = True
#TODO:Farm a panda here
self.ion_cx_th_data = DataFrame(np.array(ion_cx_data[0]))
self.ion_cx_th_data.set_index(['atomic_number', 'ion_number', 'level_id'], inplace=True)
self.ion_cx_sp_data = DataFrame(np.array(ion_cx_data[1]))
self.ion_cx_sp_data.set_index(['atomic_number', 'ion_number', 'level_id'])
else:
self.has_ion_cx_data = False
if zeta_data is not None:
self.zeta_data = zeta_data
self.has_zeta_data = True
else:
self.has_zeta_data = False
if collision_data[0] is not None:
self.collision_data = DataFrame(collision_data[0])
self.collision_data_temperatures = collision_data[1]
self.collision_data.set_index(['atomic_number', 'ion_number', 'level_number_lower', 'level_number_upper'],
inplace=True)
self.has_collision_data = True
else:
self.has_collision_data = False
if synpp_refs is not None:
self.has_synpp_refs = True
self.synpp_refs = pd.DataFrame(synpp_refs)
self.synpp_refs.set_index(['atomic_number', 'ion_number'], inplace=True)
else:
self.has_synpp_refs = False
self.atom_data = DataFrame(atom_data.__array__())
self.atom_data.set_index('atomic_number', inplace=True)
self.atom_data.mass = units.Unit('u').to('g', self.atom_data.mass.values)
self.ionization_data = DataFrame(ionization_data.__array__())
self.ionization_data.set_index(['atomic_number', 'ion_number'], inplace=True)
self.ionization_data.ionization_energy = units.Unit('eV').to('erg',
self.ionization_data.ionization_energy.values)
self._levels = DataFrame(levels_data.__array__())
self._levels.energy = units.Unit('eV').to('erg', self._levels.energy.values)
self._lines = DataFrame(lines_data.__array__())
self._lines.set_index('line_id', inplace=True)
self._lines['nu'] = units.Unit('angstrom').to('Hz', self._lines['wavelength'], units.spectral())
self._lines['wavelength_cm'] = units.Unit('angstrom').to('cm', self._lines['wavelength'])
#tmp_lines_index = pd.MultiIndex.from_arrays(self.lines)
#self.lines_inde
self.symbol2atomic_number = OrderedDict(zip(self.atom_data['symbol'].values, self.atom_data.index))
self.atomic_number2symbol = OrderedDict(zip(self.atom_data.index, self.atom_data['symbol']))
self.ion_cx_data = ion_cx_data
def prepare_atom_data(self, selected_atomic_numbers, line_interaction_type='scatter', max_ion_number=None,
nlte_excitation_species=[], nlte_ionization_species=[]):
"""
Prepares the atom data to set the lines, levels and if requested macro atom data.
This function mainly cuts the `levels` and `lines` by discarding any data that is not needed (any data
for atoms that are not needed
Parameters
----------
selected_atoms : `~set`
set of selected atom numbers, e.g. set([14, 26])
line_interaction_type : `~str`
can be 'scatter', 'downbranch' or 'macroatom'
max_ion_number : `~int`
maximum ion number to be included in the calculation
"""
self.selected_atomic_numbers = selected_atomic_numbers
self.nlte_excitation_species = nlte_excitation_species
self.nlte_ionization_species = nlte_ionization_species
self._levels = self._levels.reset_index()
self.levels = self._levels.copy()
self.levels = self.levels[self.levels['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.levels = self.levels[self.levels['ion_number'] <= max_ion_number]
self.levels = self.levels.set_index(['atomic_number', 'ion_number', 'level_number'])
self.levels_index = pd.Series(np.arange(len(self.levels), dtype=int), index=self.levels.index)
#cutting levels_lines
self.lines = self._lines.copy()
self.lines = self.lines[self.lines['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.lines = self.lines[self.lines['ion_number'] <= max_ion_number]
self.lines.sort('wavelength', inplace=True)
self.lines_index = pd.Series(np.arange(len(self.lines), dtype=int), index=self.lines.index)
tmp_lines_lower2level_idx = pd.MultiIndex.from_arrays([self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_lower']])
self.lines_lower2level_idx = self.levels_index.ix[tmp_lines_lower2level_idx].values.astype(np.int64)
tmp_lines_upper2level_idx = pd.MultiIndex.from_arrays([self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_upper']])
self.lines_upper2level_idx = self.levels_index.ix[tmp_lines_upper2level_idx].values.astype(np.int64)
self.atom_ion_index = None
self.levels_index2atom_ion_index = None
if self.has_macro_atom and not (line_interaction_type == 'scatter'):
self.macro_atom_data = self.macro_atom_data_all[
self.macro_atom_data_all['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.macro_atom_data = self.macro_atom_data[self.macro_atom_data['ion_number'] <= max_ion_number]
self.macro_atom_references = self.macro_atom_references_all[
self.macro_atom_references_all['atomic_number'].isin(
self.selected_atomic_numbers)]
if max_ion_number is not None:
self.macro_atom_references = self.macro_atom_references[
self.macro_atom_references['ion_number'] <= max_ion_number]
if line_interaction_type == 'downbranch':
self.macro_atom_data = self.macro_atom_data[(self.macro_atom_data['transition_type'] == -1).values]
self.macro_atom_references = self.macro_atom_references[self.macro_atom_references['count_down'] > 0]
self.macro_atom_references['count_total'] = self.macro_atom_references['count_down']
self.macro_atom_references['block_references'] = np.hstack((0,
np.cumsum(self.macro_atom_references[
'count_down'].values[:-1])))
elif line_interaction_type == 'macroatom':
self.macro_atom_references['block_references'] = np.hstack((0,
np.cumsum(self.macro_atom_references[
'count_total'].values[:-1])))
self.macro_atom_references.set_index(['atomic_number', 'ion_number', 'source_level_number'], inplace=True)
self.macro_atom_references['references_idx'] = np.arange(len(self.macro_atom_references))
self.macro_atom_data['lines_idx'] = self.lines_index.ix[self.macro_atom_data['transition_line_id']].values
tmp_lines_upper2level_idx = pd.MultiIndex.from_arrays(
[self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_upper']])
self.lines_upper2macro_reference_idx = self.macro_atom_references['references_idx'].ix[
tmp_lines_upper2level_idx].values.astype(np.int64)
tmp_macro_destination_level_idx = pd.MultiIndex.from_arrays([self.macro_atom_data['atomic_number'],
self.macro_atom_data['ion_number'],
self.macro_atom_data[
'destination_level_number']])
if line_interaction_type == 'macroatom':
self.macro_atom_data['destination_level_idx'] = self.macro_atom_references['references_idx'].ix[
tmp_macro_destination_level_idx].values.astype(np.int64)
elif line_interaction_type == 'downbranch':
self.macro_atom_data['destination_level_idx'] = (np.ones(len(self.macro_atom_data)) * -1).astype(
np.int64)
self.nlte_excitation_data = NLTEExcitationData(self, nlte_excitation_species)
self.nlte_ionization_data = NLTEIonizationData(self, nlte_ionization_species)
def __repr__(self):
return "<Atomic Data UUID=%s MD5=%s Lines=%d Levels=%d>" % \
(self.uuid1, self.md5, self.lines.atomic_number.count(), self.levels.energy.count())
class NLTEExcitationData(object):
def __init__(self, atom_data, nlte_excitation_species):
self.atom_data = atom_data
self.lines = atom_data.lines.reset_index()
self.nlte_excitation_species = nlte_excitation_species
if nlte_excitation_species:
logger.info('Preparing the NLTE excitation data')
self._init_indices()
self._create_nlte_excitation_mask()
if atom_data.has_collision_data:
self._create_collision_coefficient_matrix()
else:
self._create_nlte_excitation_mask()
def _init_indices(self):
self.lines_idx = {}
self.lines_level_number_lower = {}
self.lines_level_number_upper = {}
self.A_uls = {}
self.B_uls = {}
self.B_lus = {}
for species in self.nlte_excitation_species:
lines_idx = np.where((self.lines.atomic_number == species[0]) &
(self.lines.ion_number == species[1]))
self.lines_idx[species] = lines_idx
self.lines_level_number_lower[species] = self.lines.level_number_lower.values[lines_idx].astype(int)
self.lines_level_number_upper[species] = self.lines.level_number_upper.values[lines_idx].astype(int)
self.A_uls[species] = self.atom_data.lines.A_ul.values[lines_idx]
self.B_uls[species] = self.atom_data.lines.B_ul.values[lines_idx]
self.B_lus[species] = self.atom_data.lines.B_lu.values[lines_idx]
def _create_nlte_excitation_mask(self):
self.nlte_excitation_levels_mask = np.zeros(self.atom_data.levels.energy.count()).astype(bool)
self.nlte_excitation_lines_mask = np.zeros(self.atom_data.lines.wavelength.count()).astype(bool)
for species in self.nlte_excitation_species:
current_levels_mask = (self.atom_data.levels.index.get_level_values(0) == species[0]) & \
(self.atom_data.levels.index.get_level_values(1) == species[1])
current_lines_mask = (self.atom_data.lines.atomic_number.values == species[0]) & \
(self.atom_data.lines.ion_number.values == species[1])
self.nlte_excitation_levels_mask |= current_levels_mask
self.nlte_excitation_lines_mask |= current_lines_mask
def _create_collision_coefficient_matrix(self):
self.C_ul_interpolator = {}
self.delta_E_matrices = {}
self.g_ratio_matrices = {}
collision_group = self.atom_data.collision_data.groupby(level=['atomic_number', 'ion_number'])
for species in self.nlte_excitation_species:
no_of_levels = self.atom_data.levels.ix[species].energy.count()
C_ul_matrix = np.zeros((no_of_levels, no_of_levels, len(self.atom_data.collision_data_temperatures)))
delta_E_matrix = np.zeros((no_of_levels, no_of_levels))
g_ratio_matrix = np.zeros((no_of_levels, no_of_levels))
for (atomic_number, ion_number, level_number_lower, level_number_upper), line in \
collision_group.get_group(species).iterrows():
C_ul_matrix[level_number_lower, level_number_upper, :] = line.values[2:]
delta_E_matrix[level_number_lower, level_number_upper] = line['delta_e']
#TODO TARDISATOMIC fix change the g_ratio to be the otherway round - I flip them now here.
g_ratio_matrix[level_number_lower, level_number_upper] = line['g_ratio']
self.C_ul_interpolator[species] = interpolate.interp1d(self.atom_data.collision_data_temperatures,
C_ul_matrix)
self.delta_E_matrices[species] = delta_E_matrix
self.g_ratio_matrices[species] = g_ratio_matrix
def get_collision_matrix(self, species, t_electrons):
c_ul_matrix = self.C_ul_interpolator[species](t_electrons)
no_of_levels = c_ul_matrix.shape[0]
c_ul_matrix[np.isnan(c_ul_matrix)] = 0.0
#TODO in tardisatomic the g_ratio is the other way round - here I'll flip it in prepare_collision matrix
c_lu_matrix = c_ul_matrix * np.exp(-self.delta_E_matrices[species].reshape((no_of_levels, no_of_levels, 1)) /
t_electrons.reshape((1, 1, t_electrons.shape[0]))) * \
self.g_ratio_matrices[species].reshape((no_of_levels, no_of_levels, 1))
return c_ul_matrix + c_lu_matrix.transpose(1, 0, 2)
class NLTEIonizationData(object):
def __init__(self, atom_data, nlte_ionization_species):
pass
| {
"repo_name": "aoifeboyle/tardis",
"path": "tardis/atomic.py",
"copies": "1",
"size": "25229",
"license": "bsd-3-clause",
"hash": 716640913223025300,
"line_mean": 37.4588414634,
"line_max": 119,
"alpha_frac": 0.6014903484,
"autogenerated": false,
"ratio": 3.654787773431841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.973457373246615,
"avg_score": 0.004340877873138305,
"num_lines": 656
} |
# atomic model
#TODO revisit import statements and reorganize
from scipy import interpolate
import numpy as np
import logging
import os
import h5py
from astropy import table, units
from collections import OrderedDict
from pandas import DataFrame
import pandas as pd
try:
import sqlparse
sqlparse_available = True
except ImportError:
sqlparse_available = False
logger = logging.getLogger(__name__)
default_atom_h5_path = os.path.join(os.path.dirname(__file__), 'data', 'atom_data.h5')
def data_path(fname):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, fname)
atomic_symbols_data = np.recfromtxt(data_path('atomic_symbols.dat'),
names=['atomic_number', 'symbol'])
symbol2atomic_number = OrderedDict(zip(atomic_symbols_data['symbol'], atomic_symbols_data['atomic_number']))
atomic_number2symbol = OrderedDict(atomic_symbols_data)
@PendingDeprecationWarning
def read_atomic_data(fname=None):
return read_basic_atom_data(fname)
def read_hdf5_data(fname, dset_name):
"""This function reads the dataset (dset_name) from the hdf5 file (fname).
In addition it uses the attribute 'units' and parses it to the `~astropy.table.Table` constructor.
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
returns the respective
"""
h5_file = h5py.File(fname)
dataset = h5_file[dset_name]
data = np.asarray(dataset)
# data_units = dataset.attrs['units']
data_table = table.Table(data)
# for i, col_unit in enumerate(data_units):
# if col_unit == 'n':
# data_table.columns[i].units = None
# elif col_unit == '1':
# data_table.columns[i].units = units.Unit(1)
# else:
# data_table.columns[i].units = units.Unit(col_unit)
h5_file.close()
return data_table
def read_basic_atom_data(fname=None):
"""This function reads the atomic number, symbol, and mass from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], symbol, mass[u]
"""
data_table = read_hdf5_data(fname, 'basic_atom_data')
# data_table.columns['mass'] = units.Unit('u').to('g', data_table['mass'])
return data_table
def read_ionization_data(fname=None):
"""This function reads the atomic number, ion number, and ionization energy from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], ion[1], ionization_energy[eV]
.. note:: energy from unionized atoms to once-ionized atoms ion = 1, for once ionized
to twice ionized ion=2, etc.
"""
data_table = read_hdf5_data(fname, 'ionization_data')
#data_table.columns['ionization_energy'] = units.Unit('eV').to('erg', data_table.columns['ionization_energy'])
return data_table
def read_levels_data(fname=None):
"""This function reads atomic number, ion number, level_number, energy, g, metastable
information from hdf5 file.
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], ion[1], level_number, energy, g, metastable
"""
data_table = read_hdf5_data(fname, 'levels_data')
#data_table.columns['energy'].convert_units_to('erg')
#data_table.columns['energy'] = units.Unit('eV').to('erg', data_table.columns['energy'])
return data_table
def read_synpp_refs(fname):
data_table = h5py.File(fname)['synpp_refs']
return data_table.__array__()
def read_lines_data(fname=None):
"""
This function reads the wavelength, atomic number, ion number, f_ul, f_l and level id information
from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields wavelength, atomic_number, ion_number, f_ul, f_lu, level_id_lower, level_id_upper.
"""
data_table = read_hdf5_data(fname, 'lines_data')
#data_table.columns['ionization_energy'].convert_units_to('erg')
return data_table
def read_zeta_data(fname):
"""
This function reads the recombination coefficient data from the HDF5 file
:return:
"""
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname)
if 'zeta_data' not in h5_file.keys():
raise ValueError('zeta_data not available in this HDF5-data file. It can not be used with NebularAtomData')
zeta_data = h5_file['zeta_data']
t_rads = zeta_data.attrs['t_rad']
return pd.DataFrame(zeta_data[:,2:], index=pd.MultiIndex.from_arrays(zeta_data[:,:2].transpose().astype(int)),
columns=t_rads)
def read_collision_data(fname):
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname)
if 'collision_data' not in h5_file.keys():
raise ValueError('collision_data not available in this HDF5-data file. It can not be used with NLTE')
collision_data = np.array(h5_file['collision_data'])
collision_temperatures = h5_file['collision_data'].attrs['temperatures']
return collision_data, collision_temperatures
def read_ion_cx_data(fname):
try:
h5_file = h5py.File(fname)
ion_cx_th_data = h5_file['ionization_cx_threshold']
ion_cx_sp_data = h5_file['ionization_cx_support']
return ion_cx_th_data, ion_cx_sp_data
except IOError, err:
print(err.errno)
print(err)
logger.critical('Cannot import. Error opening the file to read ionization_cx')
def read_macro_atom_data(fname):
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname)
if 'macro_atom_data' not in h5_file.keys():
raise ValueError('Macro Atom Data (macro_atom_data) is not in this HDF5-data file. '
'It is needed for complex line interaction')
macro_atom_data = h5_file['macro_atom_data']
macro_atom_counts = h5_file['macro_atom_references']
return macro_atom_data, macro_atom_counts
class AtomData(object):
"""
Class for storing atomic data
AtomData
---------
Parameters
----------
basic_atom_data : `~astropy.table.Table`
containing the basic atom data: z, symbol, and mass
ionization_data : ~astropy.table.Table
containing the ionization data: z, ion, and ionization energy
::important to note here is that ion describes the final ion state
e.g. H I - H II is described with ion=2
levels_data : ~astropy.table.Table
containing the levels data: z, ion, level_number, energy, g
lines_data : ~astropy.table.Table
containing the lines data: wavelength, z, ion, levels_number_lower,
levels_number_upper, f_lu, f_ul
macro_atom_data : tuple of ~astropy.table.Table
default ~None, a tuple of the macro-atom data and macro-atom references
zeta_data : ~dict of interpolation objects
default ~None
"""
@classmethod
def from_hdf5(cls, fname=None):
"""
Function to read all the atom data from a special TARDIS HDF5 File.
Parameters
----------
fname: str, optional
the default for this is `None` and then it will use the very limited atomic_data shipped with TARDIS
For more complex atomic data please contact the authors.
use_macro_atom:
default `False`. Set to `True`, if you want to read in macro_atom_data
"""
if fname is None:
fname = default_atom_h5_path
if not os.path.exists(fname):
raise ValueError("Supplied Atomic Model Database %s does not exists" % fname)
atom_data = read_basic_atom_data(fname)
ionization_data = read_ionization_data(fname)
levels_data = read_levels_data(fname)
lines_data = read_lines_data(fname)
with h5py.File(fname) as h5_file:
h5_datasets = h5_file.keys()
if 'macro_atom_data' in h5_datasets:
macro_atom_data = read_macro_atom_data(fname)
else:
macro_atom_data = None
if 'zeta_data' in h5_datasets:
zeta_data = read_zeta_data(fname)
else:
zeta_data = None
if 'collision_data' in h5_datasets:
collision_data, collision_data_temperatures = read_collision_data(fname)
else:
collision_data, collision_data_temperatures = (None, None)
if 'synpp_refs' in h5_datasets:
synpp_refs = read_synpp_refs(fname)
else:
synpp_refs = None
if 'ion_cx_data' in h5_datasets and 'ion_cx_data' in h5_datasets:
ion_cx_data = read_ion_cx_data(fname)
else:
ion_cx_data = None
atom_data = cls(atom_data=atom_data, ionization_data=ionization_data, levels_data=levels_data,
lines_data=lines_data, macro_atom_data=macro_atom_data, zeta_data=zeta_data,
collision_data=(collision_data, collision_data_temperatures), synpp_refs=synpp_refs,
ion_cx_data=ion_cx_data)
with h5py.File(fname) as h5_file:
atom_data.uuid1 = h5_file.attrs['uuid1']
atom_data.md5 = h5_file.attrs['md5']
logger.info('Read Atom Data with UUID=%s and MD5=%s', atom_data.uuid1, atom_data.md5)
return atom_data
def __init__(self, atom_data, ionization_data, levels_data, lines_data, macro_atom_data=None, zeta_data=None,
collision_data=None, synpp_refs=None, ion_cx_data=None):
if macro_atom_data is not None:
self.has_macro_atom = True
self.macro_atom_data_all = DataFrame(macro_atom_data[0].__array__())
self.macro_atom_references_all = DataFrame(macro_atom_data[1].__array__())
else:
self.has_macro_atom = False
if ion_cx_data is not None:
self.has_ion_cx_data = True
#TODO:Farm a panda here
self.ion_cx_th_data = DataFrame(np.array(ion_cx_data[0]))
self.ion_cx_th_data.set_index(['atomic_number', 'ion_number', 'level_id'], inplace=True)
self.ion_cx_sp_data = DataFrame(np.array(ion_cx_data[1]))
self.ion_cx_sp_data.set_index(['atomic_number', 'ion_number', 'level_id'])
else:
self.has_ion_cx_data = False
if zeta_data is not None:
self.zeta_data = zeta_data
self.has_zeta_data = True
else:
self.has_zeta_data = False
if collision_data[0] is not None:
self.collision_data = DataFrame(collision_data[0])
self.collision_data_temperatures = collision_data[1]
self.collision_data.set_index(['atomic_number', 'ion_number', 'level_number_lower', 'level_number_upper'],
inplace=True)
self.has_collision_data = True
else:
self.has_collision_data = False
if synpp_refs is not None:
self.has_synpp_refs = True
self.synpp_refs = pd.DataFrame(synpp_refs)
self.synpp_refs.set_index(['atomic_number', 'ion_number'], inplace=True)
else:
self.has_synpp_refs = False
self.atom_data = DataFrame(atom_data.__array__())
self.atom_data.set_index('atomic_number', inplace=True)
self.atom_data.mass = units.Unit('u').to('g', self.atom_data.mass.values)
self.ionization_data = DataFrame(ionization_data.__array__())
self.ionization_data.set_index(['atomic_number', 'ion_number'], inplace=True)
self.ionization_data.ionization_energy = units.Unit('eV').to('erg',
self.ionization_data.ionization_energy.values)
self.levels_data = DataFrame(levels_data.__array__())
self.levels_data.energy = units.Unit('eV').to('erg', self.levels_data.energy.values)
self.lines_data = DataFrame(lines_data.__array__())
self.lines_data.set_index('line_id', inplace=True)
self.lines_data['nu'] = units.Unit('angstrom').to('Hz', self.lines_data['wavelength'], units.spectral())
self.lines_data['wavelength_cm'] = units.Unit('angstrom').to('cm', self.lines_data['wavelength'])
#tmp_lines_index = pd.MultiIndex.from_arrays(self.lines_data)
#self.lines_inde
self.symbol2atomic_number = OrderedDict(zip(self.atom_data['symbol'].values, self.atom_data.index))
self.atomic_number2symbol = OrderedDict(zip(self.atom_data.index, self.atom_data['symbol']))
def prepare_atom_data(self, selected_atomic_numbers, line_interaction_type='scatter', max_ion_number=None,
nlte_species=[]):
"""
Prepares the atom data to set the lines, levels and if requested macro atom data.
This function mainly cuts the `levels_data` and `lines_data` by discarding any data that is not needed (any data
for atoms that are not needed
Parameters
----------
selected_atoms : `~set`
set of selected atom numbers, e.g. set([14, 26])
line_interaction_type : `~str`
can be 'scatter', 'downbranch' or 'macroatom'
max_ion_number : `~int`
maximum ion number to be included in the calculation
"""
self.selected_atomic_numbers = selected_atomic_numbers
self.nlte_species = nlte_species
self.levels = self.levels_data[self.levels_data['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.levels = self.levels[self.levels['ion_number'] <= max_ion_number]
self.levels = self.levels.set_index(['atomic_number', 'ion_number', 'level_number'])
self.levels_index = pd.Series(np.arange(len(self.levels), dtype=int), index=self.levels.index)
#cutting levels_lines
self.lines = self.lines_data[self.lines_data['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.lines = self.lines[self.lines['ion_number'] <= max_ion_number]
self.lines.sort('wavelength', inplace=True)
self.lines_index = pd.Series(np.arange(len(self.lines), dtype=int), index=self.lines.index)
tmp_lines_lower2level_idx = pd.MultiIndex.from_arrays([self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_lower']])
self.lines_lower2level_idx = self.levels_index.ix[tmp_lines_lower2level_idx].values.astype(np.int64)
tmp_lines_upper2level_idx = pd.MultiIndex.from_arrays([self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_upper']])
self.lines_upper2level_idx = self.levels_index.ix[tmp_lines_upper2level_idx].values.astype(np.int64)
self.atom_ion_index = None
self.levels_index2atom_ion_index = None
if self.has_macro_atom and not (line_interaction_type == 'scatter'):
self.macro_atom_data = self.macro_atom_data_all[
self.macro_atom_data_all['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.macro_atom_data = self.macro_atom_data[self.macro_atom_data['ion_number'] <= max_ion_number]
self.macro_atom_references = self.macro_atom_references_all[
self.macro_atom_references_all['atomic_number'].isin(
self.selected_atomic_numbers)]
if max_ion_number is not None:
self.macro_atom_references = self.macro_atom_references[
self.macro_atom_references['ion_number'] <= max_ion_number]
if line_interaction_type == 'downbranch':
self.macro_atom_data = self.macro_atom_data[(self.macro_atom_data['transition_type'] == -1).values]
self.macro_atom_references = self.macro_atom_references[self.macro_atom_references['count_down'] > 0]
self.macro_atom_references['count_total'] = self.macro_atom_references['count_down']
self.macro_atom_references['block_references'] = np.hstack((0,
np.cumsum(self.macro_atom_references[
'count_down'].values[:-1])))
elif line_interaction_type == 'macroatom':
self.macro_atom_references['block_references'] = np.hstack((0,
np.cumsum(self.macro_atom_references[
'count_total'].values[:-1])))
self.macro_atom_references.set_index(['atomic_number', 'ion_number', 'source_level_number'], inplace=True)
self.macro_atom_references['references_idx'] = np.arange(len(self.macro_atom_references))
self.macro_atom_data['lines_idx'] = self.lines_index.ix[self.macro_atom_data['transition_line_id']].values
tmp_lines_upper2level_idx = pd.MultiIndex.from_arrays(
[self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_upper']])
self.lines_upper2macro_reference_idx = self.macro_atom_references['references_idx'].ix[
tmp_lines_upper2level_idx].values.astype(np.int64)
tmp_macro_destination_level_idx = pd.MultiIndex.from_arrays([self.macro_atom_data['atomic_number'],
self.macro_atom_data['ion_number'],
self.macro_atom_data[
'destination_level_number']])
if line_interaction_type == 'macroatom':
self.macro_atom_data['destination_level_idx'] = self.macro_atom_references['references_idx'].ix[
tmp_macro_destination_level_idx].values.astype(np.int64)
elif line_interaction_type == 'downbranch':
self.macro_atom_data['destination_level_idx'] = (np.ones(len(self.macro_atom_data)) * -1).astype(
np.int64)
self.nlte_data = NLTEData(self, nlte_species)
def __repr__(self):
return "<Atomic Data UUID=%s MD5=%s Lines=%d Levels=%d>" % \
(self.uuid1, self.md5, self.lines_data.atomic_number.count(), self.levels_data.energy.count())
class NLTEData(object):
def __init__(self, atom_data, nlte_species):
self.atom_data = atom_data
self.lines = atom_data.lines.reset_index()
self.nlte_species = nlte_species
if nlte_species:
logger.info('Preparing the NLTE data')
self._init_indices()
self._create_nlte_mask()
if atom_data.has_collision_data:
self._create_collision_coefficient_matrix()
else:
self._create_nlte_mask()
def _init_indices(self):
self.lines_idx = {}
self.lines_level_number_lower = {}
self.lines_level_number_upper = {}
self.A_uls = {}
self.B_uls = {}
self.B_lus = {}
for species in self.nlte_species:
lines_idx = np.where((self.lines.atomic_number == species[0]) &
(self.lines.ion_number == species[1]))
self.lines_idx[species] = lines_idx
self.lines_level_number_lower[species] = self.lines.level_number_lower.values[lines_idx].astype(int)
self.lines_level_number_upper[species] = self.lines.level_number_upper.values[lines_idx].astype(int)
self.A_uls[species] = self.atom_data.lines.A_ul.values[lines_idx]
self.B_uls[species] = self.atom_data.lines.B_ul.values[lines_idx]
self.B_lus[species] = self.atom_data.lines.B_lu.values[lines_idx]
def _create_nlte_mask(self):
self.nlte_levels_mask = np.zeros(self.atom_data.levels.energy.count()).astype(bool)
self.nlte_lines_mask = np.zeros(self.atom_data.lines.wavelength.count()).astype(bool)
for species in self.nlte_species:
current_levels_mask = (self.atom_data.levels.index.get_level_values(0) == species[0]) & \
(self.atom_data.levels.index.get_level_values(1) == species[1])
current_lines_mask = (self.atom_data.lines.atomic_number.values == species[0]) & \
(self.atom_data.lines.ion_number.values == species[1])
self.nlte_levels_mask |= current_levels_mask
self.nlte_lines_mask |= current_lines_mask
def _create_collision_coefficient_matrix(self):
self.C_ul_interpolator = {}
self.delta_E_matrices = {}
self.g_ratio_matrices = {}
collision_group = self.atom_data.collision_data.groupby(level=['atomic_number', 'ion_number'])
for species in self.nlte_species:
no_of_levels = self.atom_data.levels.ix[species].energy.count()
C_ul_matrix = np.zeros((no_of_levels, no_of_levels, len(self.atom_data.collision_data_temperatures)))
delta_E_matrix = np.zeros((no_of_levels, no_of_levels))
g_ratio_matrix = np.zeros((no_of_levels, no_of_levels))
for (atomic_number, ion_number, level_number_lower, level_number_upper), line in \
collision_group.get_group(species).iterrows():
C_ul_matrix[level_number_lower, level_number_upper, :] = line.values[2:]
delta_E_matrix[level_number_lower, level_number_upper] = line['delta_e']
#TODO TARDISATOMIC fix change the g_ratio to be the otherway round - I flip them now here.
g_ratio_matrix[level_number_lower, level_number_upper] = line['g_ratio']
self.C_ul_interpolator[species] = interpolate.interp1d(self.atom_data.collision_data_temperatures,
C_ul_matrix)
self.delta_E_matrices[species] = delta_E_matrix
self.g_ratio_matrices[species] = g_ratio_matrix
def get_collision_matrix(self, species, t_electrons):
c_ul_matrix = self.C_ul_interpolator[species](t_electrons)
no_of_levels = c_ul_matrix.shape[0]
c_ul_matrix[np.isnan(c_ul_matrix)] = 0.0
#TODO in tardisatomic the g_ratio is the other way round - here I'll flip it in prepare_collision matrix
c_lu_matrix = c_ul_matrix * np.exp(-self.delta_E_matrices[species].reshape((no_of_levels, no_of_levels, 1)) /
t_electrons.reshape((1, 1, t_electrons.shape[0]))) * \
self.g_ratio_matrices[species].reshape((no_of_levels, no_of_levels, 1))
return c_ul_matrix + c_lu_matrix.transpose(1, 0, 2)
| {
"repo_name": "mklauser/tardis",
"path": "tardis/atomic.py",
"copies": "2",
"size": "24202",
"license": "bsd-3-clause",
"hash": -6643197128063644000,
"line_mean": 37.661341853,
"line_max": 120,
"alpha_frac": 0.6006528386,
"autogenerated": false,
"ratio": 3.651478575739288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004069055058112002,
"num_lines": 626
} |
"""AtomNumber module.
An ndarray to store atom densities with string, integer, or slice indexing.
"""
from collections import OrderedDict
import numpy as np
class AtomNumber:
"""Stores local material compositions (atoms of each nuclide).
Parameters
----------
local_mats : list of str
Material IDs
nuclides : list of str
Nuclides to be tracked
volume : dict
Volume of each material in [cm^3]
n_nuc_burn : int
Number of nuclides to be burned.
Attributes
----------
index_mat : dict
A dictionary mapping material ID as string to index.
index_nuc : dict
A dictionary mapping nuclide name to index.
volume : numpy.ndarray
Volume of each material in [cm^3]. If a volume is not found, it defaults
to 1 so that reading density still works correctly.
number : numpy.ndarray
Array storing total atoms for each material/nuclide
materials : list of str
Material IDs as strings
nuclides : list of str
All nuclide names
burnable_nuclides : list of str
Burnable nuclides names. Used for sorting the simulation.
n_nuc_burn : int
Number of burnable nuclides.
n_nuc : int
Number of nuclides.
"""
def __init__(self, local_mats, nuclides, volume, n_nuc_burn):
self.index_mat = OrderedDict((mat, i) for i, mat in enumerate(local_mats))
self.index_nuc = OrderedDict((nuc, i) for i, nuc in enumerate(nuclides))
self.volume = np.ones(len(local_mats))
for mat, val in volume.items():
if mat in self.index_mat:
ind = self.index_mat[mat]
self.volume[ind] = val
self.n_nuc_burn = n_nuc_burn
self.number = np.zeros((len(local_mats), len(nuclides)))
def __getitem__(self, pos):
"""Retrieves total atom number from AtomNumber.
Parameters
----------
pos : tuple
A two-length tuple containing a material index and a nuc index.
These indexes can be strings (which get converted to integers via
the dictionaries), integers used directly, or slices.
Returns
-------
numpy.ndarray
The value indexed from self.number.
"""
mat, nuc = pos
if isinstance(mat, str):
mat = self.index_mat[mat]
if isinstance(nuc, str):
nuc = self.index_nuc[nuc]
return self.number[mat, nuc]
def __setitem__(self, pos, val):
"""Sets total atom number into AtomNumber.
Parameters
----------
pos : tuple
A two-length tuple containing a material index and a nuc index.
These indexes can be strings (which get converted to integers via
the dictionaries), integers used directly, or slices.
val : float
The value [atom] to set the array to.
"""
mat, nuc = pos
if isinstance(mat, str):
mat = self.index_mat[mat]
if isinstance(nuc, str):
nuc = self.index_nuc[nuc]
self.number[mat, nuc] = val
@property
def materials(self):
return self.index_mat.keys()
@property
def nuclides(self):
return self.index_nuc.keys()
@property
def n_nuc(self):
return len(self.index_nuc)
@property
def burnable_nuclides(self):
return [nuc for nuc, ind in self.index_nuc.items()
if ind < self.n_nuc_burn]
def get_atom_density(self, mat, nuc):
"""Accesses atom density instead of total number.
Parameters
----------
mat : str, int or slice
Material index.
nuc : str, int or slice
Nuclide index.
Returns
-------
numpy.ndarray
Density in [atom/cm^3]
"""
if isinstance(mat, str):
mat = self.index_mat[mat]
if isinstance(nuc, str):
nuc = self.index_nuc[nuc]
return self[mat, nuc] / self.volume[mat]
def set_atom_density(self, mat, nuc, val):
"""Sets atom density instead of total number.
Parameters
----------
mat : str, int or slice
Material index.
nuc : str, int or slice
Nuclide index.
val : numpy.ndarray
Array of densities to set in [atom/cm^3]
"""
if isinstance(mat, str):
mat = self.index_mat[mat]
if isinstance(nuc, str):
nuc = self.index_nuc[nuc]
self[mat, nuc] = val * self.volume[mat]
def get_mat_slice(self, mat):
"""Gets atom quantity indexed by mats for all burned nuclides
Parameters
----------
mat : str, int or slice
Material index.
Returns
-------
numpy.ndarray
The slice requested in [atom].
"""
if isinstance(mat, str):
mat = self.index_mat[mat]
return self[mat, :self.n_nuc_burn]
def set_mat_slice(self, mat, val):
"""Sets atom quantity indexed by mats for all burned nuclides
Parameters
----------
mat : str, int or slice
Material index.
val : numpy.ndarray
The slice to set in [atom]
"""
if isinstance(mat, str):
mat = self.index_mat[mat]
self[mat, :self.n_nuc_burn] = val
def set_density(self, total_density):
"""Sets density.
Sets the density in the exact same order as total_density_list outputs,
allowing for internal consistency
Parameters
----------
total_density : list of numpy.ndarray
Total atoms.
"""
for i, density_slice in enumerate(total_density):
self.set_mat_slice(i, density_slice)
| {
"repo_name": "shikhar413/openmc",
"path": "openmc/deplete/atom_number.py",
"copies": "8",
"size": "5881",
"license": "mit",
"hash": 8249292659338720000,
"line_mean": 26.4813084112,
"line_max": 82,
"alpha_frac": 0.5584084339,
"autogenerated": false,
"ratio": 4.115465360391882,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8673873794291882,
"avg_score": null,
"num_lines": null
} |
"""AtomNumber module.
An ndarray to store atom densities with string, integer, or slice indexing.
"""
from collections import OrderedDict
import numpy as np
class AtomNumber(object):
"""Stores local material compositions (atoms of each nuclide).
Parameters
----------
local_mats : list of str
Material IDs
nuclides : list of str
Nuclides to be tracked
volume : dict
Volume of each material in [cm^3]
n_nuc_burn : int
Number of nuclides to be burned.
Attributes
----------
index_mat : dict
A dictionary mapping material ID as string to index.
index_nuc : dict
A dictionary mapping nuclide name to index.
volume : numpy.ndarray
Volume of each material in [cm^3]. If a volume is not found, it defaults
to 1 so that reading density still works correctly.
number : numpy.ndarray
Array storing total atoms for each material/nuclide
materials : list of str
Material IDs as strings
nuclides : list of str
All nuclide names
burnable_nuclides : list of str
Burnable nuclides names. Used for sorting the simulation.
n_nuc_burn : int
Number of burnable nuclides.
n_nuc : int
Number of nuclides.
"""
def __init__(self, local_mats, nuclides, volume, n_nuc_burn):
self.index_mat = OrderedDict((mat, i) for i, mat in enumerate(local_mats))
self.index_nuc = OrderedDict((nuc, i) for i, nuc in enumerate(nuclides))
self.volume = np.ones(len(local_mats))
for mat, val in volume.items():
if mat in self.index_mat:
ind = self.index_mat[mat]
self.volume[ind] = val
self.n_nuc_burn = n_nuc_burn
self.number = np.zeros((len(local_mats), len(nuclides)))
def __getitem__(self, pos):
"""Retrieves total atom number from AtomNumber.
Parameters
----------
pos : tuple
A two-length tuple containing a material index and a nuc index.
These indexes can be strings (which get converted to integers via
the dictionaries), integers used directly, or slices.
Returns
-------
numpy.ndarray
The value indexed from self.number.
"""
mat, nuc = pos
if isinstance(mat, str):
mat = self.index_mat[mat]
if isinstance(nuc, str):
nuc = self.index_nuc[nuc]
return self.number[mat, nuc]
def __setitem__(self, pos, val):
"""Sets total atom number into AtomNumber.
Parameters
----------
pos : tuple
A two-length tuple containing a material index and a nuc index.
These indexes can be strings (which get converted to integers via
the dictionaries), integers used directly, or slices.
val : float
The value [atom] to set the array to.
"""
mat, nuc = pos
if isinstance(mat, str):
mat = self.index_mat[mat]
if isinstance(nuc, str):
nuc = self.index_nuc[nuc]
self.number[mat, nuc] = val
@property
def materials(self):
return self.index_mat.keys()
@property
def nuclides(self):
return self.index_nuc.keys()
@property
def n_nuc(self):
return len(self.index_nuc)
@property
def burnable_nuclides(self):
return [nuc for nuc, ind in self.index_nuc.items()
if ind < self.n_nuc_burn]
def get_atom_density(self, mat, nuc):
"""Accesses atom density instead of total number.
Parameters
----------
mat : str, int or slice
Material index.
nuc : str, int or slice
Nuclide index.
Returns
-------
numpy.ndarray
Density in [atom/cm^3]
"""
if isinstance(mat, str):
mat = self.index_mat[mat]
if isinstance(nuc, str):
nuc = self.index_nuc[nuc]
return self[mat, nuc] / self.volume[mat]
def set_atom_density(self, mat, nuc, val):
"""Sets atom density instead of total number.
Parameters
----------
mat : str, int or slice
Material index.
nuc : str, int or slice
Nuclide index.
val : numpy.ndarray
Array of densities to set in [atom/cm^3]
"""
if isinstance(mat, str):
mat = self.index_mat[mat]
if isinstance(nuc, str):
nuc = self.index_nuc[nuc]
self[mat, nuc] = val * self.volume[mat]
def get_mat_slice(self, mat):
"""Gets atom quantity indexed by mats for all burned nuclides
Parameters
----------
mat : str, int or slice
Material index.
Returns
-------
numpy.ndarray
The slice requested in [atom].
"""
if isinstance(mat, str):
mat = self.index_mat[mat]
return self[mat, :self.n_nuc_burn]
def set_mat_slice(self, mat, val):
"""Sets atom quantity indexed by mats for all burned nuclides
Parameters
----------
mat : str, int or slice
Material index.
val : numpy.ndarray
The slice to set in [atom]
"""
if isinstance(mat, str):
mat = self.index_mat[mat]
self[mat, :self.n_nuc_burn] = val
def set_density(self, total_density):
"""Sets density.
Sets the density in the exact same order as total_density_list outputs,
allowing for internal consistency
Parameters
----------
total_density : list of numpy.ndarray
Total atoms.
"""
for i, density_slice in enumerate(total_density):
self.set_mat_slice(i, density_slice)
| {
"repo_name": "johnnyliu27/openmc",
"path": "openmc/deplete/atom_number.py",
"copies": "3",
"size": "5889",
"license": "mit",
"hash": 5851957125132762000,
"line_mean": 26.5186915888,
"line_max": 82,
"alpha_frac": 0.5586687044,
"autogenerated": false,
"ratio": 4.115303983228512,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6173972687628513,
"avg_score": null,
"num_lines": null
} |
# atom.py
from tmps.fio import IO
from tmps.units import h, muB, Li7_props
import numpy as np
from scipy.interpolate import interp1d
from numpy.linalg import eigvals
from sympy.physics.wigner import clebsch_gordan
import matplotlib.pyplot as plt
class Atom(IO):
def __init__(
self,
props=Li7_props,
L=0.0,
J=1.0 / 2.0,
Bmin=0,
Bmax=2 * 1e-12,
NB=300,
dir=None,
nickname="Li7_B_0-2-300",
recalc=False,
save=True,
):
self.Bmin = Bmin
self.Bmax = Bmax
self.NB = NB
self.props = props
super().__init__(
dir=dir,
nickname=nickname,
recalc=recalc,
uid_keys=["props", "Bmin", "Bmax", "NB"])
if self.recalc:
Es, Bs = self.make_E_interp(L=L, J=J, Bmin=Bmin, Bmax=Bmax, NB=NB)
self.make_dEdB_interp(Es, Bs)
print(self.Efuncs)
if save and not self.loaded:
self.save()
def hyper_fine_zeeman(self, B, L, J, gI=-0.001182213):
a = self.props[L][J]["a"]
b = self.props[L][J]["b"]
gJ = self.props[L][J]["gJ"]
I = self.props["I"]
F = np.arange(np.abs(I - J), I + J + 1)
# c basis: coupled states
cstates = np.array([[f, mf] for f in F for mf in np.arange(-f, f + 1)])
# u basis: uncoupled states
ustates = np.array(
[[mj, mi] for mj in np.arange(-J, J + 1) for mi in np.arange(-I, I + 1)]
)
# number of states
n = int((2 * J + 1) * (2 * I + 1))
# hfs proportional to I.J, easy in c basis
IdotJ = (
1.0
/ 2.0
* np.array(
[(f * (f + 1) - J * (J + 1) - I * (I + 1)) for f in cstates[::, 0]]
)
)
# CG's to go from c to u basis
CG = np.array(
[
[clebsch_gordan(J, I, f, mj, mi, mf).n() for f, mf in cstates]
for mj, mi in ustates
]
)
# I.J in u basis.
IdotJij = np.array([[IdotJ.dot(CGi * CGj) for CGi in CG] for CGj in CG])
# Build hyper fine + zeeman hamiltonian
H = np.zeros((n, n))
for M, (mj, mi) in enumerate(ustates):
for N, (mjp, mip) in enumerate(ustates):
Hmn = h * a * IdotJij[M, N]
if b is not None:
Hmn += (
h
* b
* 3
* IdotJij[M, N]
* (IdotJij[M, N] + 1)
/ (2 * I * (2 * I - 1) * 2 * J * (2 * J - 1))
)
if M == N:
Hmn += +(gJ * mj + gI * mi) * muB * B
H[M, N] = Hmn
E = sorted(eigvals(H))
return E
# diagonalize hamiltonian to get energy
def make_E(self, L, J, Bmin, Bmax, NB):
I = self.props["I"]
Bs = np.linspace(Bmin, Bmax, NB)
n = int((2 * J + 1) * (2 * I + 1))
Es = np.zeros((len(Bs), n))
for i, B in enumerate(Bs):
E = self.hyper_fine_zeeman(B, L, J)
Es[i, ::] = E
return Bs, Es
def make_dEdB_interp(self, Es, Bs):
print("Calculating energy-field derivatives...")
dB = Bs[1] - Bs[0]
N = Es.shape[1]
dEdBs = np.array([np.gradient(Es[::, i], dB) for i in range(N)]).T
self.dEdBfuncs = {
f: {
mf: interp1d(Bs, dEdBs[::, i * (int(2 * (f - 1) + 1)) + j], kind=2)
for j, mf in enumerate(np.arange(-f, f + 1, 1.0))
}
for i, f in enumerate([1.0, 2.0])
}
def make_E_interp(self, L=0.0, J=1.0 / 2.0, Bmin=0, Bmax=2 * 1e-12, NB=300):
print("Calculating energies in magnetic fields...")
I = self.props["I"]
Bs, Es = self.make_E(L, J, Bmin, Bmax, NB)
self.Efuncs = {
f: {
mf: interp1d(Bs, Es[::, i * (int(2 * (f - 1) + 1)) + j], kind=2)
for j, mf in enumerate(np.arange(-f, f + 1, 1.0))
}
for i, f in enumerate([1.0, 2.0])
}
return Es, Bs
def plot_spectrum(self, axs=None, Bmin=0.0, Bmax=0.1 * 1e-12, NB=100, logx=False):
if axs is None:
fig, axs = plt.subplots(2, 1, figsize=(7, 7))
else:
fig = plt.gcf()
Efuncs = self.Efuncs
dEdBfuncs = self.dEdBfuncs
Bs = np.linspace(Bmin, Bmax, NB)
ax1, ax2 = axs
for f, c in zip([1.0, 2.0], ["r", "k"]):
for mf in np.arange(-f, f + 1, 1.0):
if logx:
ax1.semilogx(Bs * 1e12, Efuncs[f][mf](Bs) / h, c=c)
else:
ax1.plot(Bs * 1e12, Efuncs[f][mf](Bs) / h, c=c)
for f, c in zip([1.0, 2.0], ["r", "k"]):
for mf in np.arange(-f, f + 1, 1.0):
if logx:
ax2.semilogx(Bs * 1e12, 1e-12 * dEdBfuncs[f][mf](Bs) / h, c=c)
else:
ax2.plot(Bs * 1e12, 1e-12 * dEdBfuncs[f][mf](Bs) / h, c=c)
ax2.axhline(1e-12 * muB / h)
ax2.axhline(-1e-12 * muB / h)
ax1.set_xlabel("B [T]")
ax1.set_ylabel("frequency shift [MHz]")
ax2.set_xlabel("B [T]")
ax2.set_ylabel("derivative [MHz / T]")
return fig, axs
if __name__ == "__main__":
atom = Atom(Li7_props, recalc=True)
print()
atom = Atom(Li7_props, recalc=False)
atom.plot_spectrum()
plt.show()
| {
"repo_name": "lhillber/tmps",
"path": "tmps/atom.py",
"copies": "1",
"size": "5570",
"license": "mit",
"hash": -3093035417001571300,
"line_mean": 31.9585798817,
"line_max": 86,
"alpha_frac": 0.4393177738,
"autogenerated": false,
"ratio": 2.907098121085595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3846415894885595,
"avg_score": null,
"num_lines": null
} |
"""atom.py: ...
"""
import numpy as np
from numpy import nan
import scipy.linalg as spl
import sympy.physics.units as u
import piratechem as pc
class Atom(pc.atom.Atom):
"""Allow each atom to contain more specific quantum chemical
properties than piratechem can currently handle.
"""
def __init__(self, index, name, r):
pc.atom.Atom.__init__(self, name, r)
self.index = index
self.nmr = NMR()
self.hyperfine = Hyperfine()
self.efg = EFG()
self.euler = Euler()
def __str__(self):
s = "Atom(%d, %s, [%6.3f, %6.3f, %6.3f])"
return s % (self.index, self.name, self.posx, self.posy, self.posz)
class Euler(object):
"""Store all possible Euler angle information for a single atom.
"""
def __init__(self):
self.hyperfine = self.Hyperfine()
self.efg = self.EFG()
class Hyperfine:
"""Store the Euler angle information for the atom's hyperfine tensor.
"""
def __init__(self):
self.alpha = self.beta = self.gamma = nan
self.ax = self.ay = self.az = nan
def __str__(self):
s = "EulerHyperfine([{0}, {1}, {2}]; [{3} {4} {5}])"
return s.format(self.alpha, self.beta, self.gamma,
self.ax, self.ay, self.az)
def return_angles(self):
"""Return the three Euler angles as a NumPy row vector.
"""
return np.array([self.alpha, self.beta, self.gamma])
class EFG:
"""Store the Euler angle information for the atom's electric field
gradient (EFG) tensor.
"""
def __init__(self):
self.alpha = self.beta = self.gamma = nan
self.efgx = self.efgy = self.efgz = nan
def __str__(self):
s = "EulerEFG([{0}, {1}, {2}]; [{3} {4} {5}])"
return s.format(self.alpha, self.beta, self.gamma,
self.efgx, self.efgy, self.efgz)
def return_angles(self):
"""Return the three Euler angles as a NumPy row vector.
"""
return np.array([self.alpha, self.beta, self.gamma])
class NMR:
"""Hold all of the fields that may be present in the output file from
an NMR shift calculation.
"""
def __init__(self):
self.shiftmat = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.sdso = np.array([nan, nan, nan])
self.spso = np.array([nan, nan, nan])
self.shiftpri = np.array([nan, nan, nan])
self.sdso_iso = nan
self.spso_iso = nan
self.shiftiso = nan
self.shiftori = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.eigvals = np.array([nan, nan, nan])
self.iso = nan
def __str__(self):
s = "NMR([{0} {1} {2}]; {3})"
return s.format(self.shiftpri[0],
self.shiftpri[1],
self.shiftpri[2],
self.shiftiso)
def _scale(self):
"""Convert the absolute values given by ORCA to ppm (mutate in
place).
"""
abs_to_ppm = 1e6
self.shiftmat *= abs_to_ppm
self.sdso *= abs_to_ppm
self.spso *= abs_to_ppm
self.shiftpri *= abs_to_ppm
self.sdso_iso *= abs_to_ppm
self.spso_iso *= abs_to_ppm
self.shiftiso *= abs_to_ppm
def _diag(self):
"""Diagonalize the raw shift matrix to get the three principal shift
values, and use them to calculate an isotropic result.
"""
self.eigvals = np.sqrt(spl.eigvals(
np.dot(self.shiftmat.T, self.shiftmat)).real)
self.iso = np.sum(self.eigvals) / 3.0
class Hyperfine:
"""Hold all of the fields that may be present in the output file from
an electron-nuclear hyperfine interaction calculation.
"""
def __init__(self):
self.aiso = nan
self.atensor = np.array([nan, nan, nan])
self.amatrix = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.afc = np.array([nan, nan, nan])
self.asd = np.array([nan, nan, nan])
self.aso = np.array([nan, nan, nan])
self.apc = nan
self.aori = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.rho = nan
self.tdip = nan
def __str__(self):
s = "Hyperfine([{0} {1} {2}]; {3})"
return s.format(self.atensor[0],
self.atensor[1],
self.atensor[2],
self.aiso)
def _calc_eff_spin_params(self):
"""Calculate the rho and T_dip terms that appear [...]
"""
Axx, Ayy, Azz = self.atensor[0], self.atensor[1], self.atensor[2]
Aiso = self.aiso
rho = (3*Aiso - 2*Axx - Azz)/(Aiso - Azz)
# rho = (-3*Aiso + 2*Ayy + Azz)/(Aiso - Azz)
# need to add an assertion that both of these are equal
# tdip = (-Aiso + Axx)/(rho - 1)
# tdip = (Aiso - Ayy)/(rho + 1)
tdip = (Azz - Aiso)/2
# need to add an assertion that these three are equal
self.rho = rho
self.tdip = tdip
class EFG:
"""Hold all of the fields that may be present in the output file from
an electric field gradient calculation.
"""
def __init__(self):
self.vmatrix = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.vel = np.array([nan, nan, nan])
self.vnuc = np.array([nan, nan, nan])
self.vtot = np.array([nan, nan, nan])
self.vori = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.nqcc = nan
self.k = nan
self.eta = nan
self.px = nan
self.py = nan
self.pz = nan
self.p = np.array([nan, nan, nan])
def __str__(self):
s = "EFG([{0} {1} {2}]; {3})"
return s.format(self.vtot[0],
self.vtot[1],
self.vtot[2],
self.nqcc)
def _calc_nqi_tensor(self):
"""Calculate the diagonal representation of the NQI tensor as
I*Q*I = e**2qQ/(4I(2I-1))*[-(1-eta),-(1+eta),2].
"""
self.px = self.k * (-(1-self.eta))
self.py = self.k * (-(1+self.eta))
self.pz = self.k * 2
self.p = np.array([self.px, self.py, self.pz])
# eta = (self.px - self.py)/self.pz
def _diag(self):
"""...
"""
eigvals = spl.eigvalsh(self.vmaxtrix)
# needs an assertion against self.vtot
V_xx, V_yy, V_zz = sorted(eigvals, key = lambda x: abs(x))
# needs an assertion against self.eta
eta = (V_xx - V_yy) / V_zz
e = float(u.eV / u.J)
planck = float(u.planck / (u.J * u.s))
barn = 10e-28
| {
"repo_name": "berquist/orcaparse",
"path": "orcaparse/atom.py",
"copies": "1",
"size": "7226",
"license": "mpl-2.0",
"hash": 1116140616378187600,
"line_mean": 29.4894514768,
"line_max": 77,
"alpha_frac": 0.488928868,
"autogenerated": false,
"ratio": 3.4117091595845137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44006380275845136,
"avg_score": null,
"num_lines": null
} |
# Copyright (C) 1999 CAMP and Jakob Schiotz <schiotz@fysik.dtu.dk>
"""Implements a list of atoms where the data is stored in multiarrays.
Two versions are implemented, one where all information is stored in
cartesian coordinates, and one where they are stored in scaled space.
In both versions most access is done through __getattr__ to data
specified as arguments to the constructor. If the object was
constructed with xxxx=array then they can be accessed through
GetXxxx() and SetXxxx(). The exception is positions, momenta and
forces that are accessed through GetCartesianXxxx() and
SetCartesianXxxx().
In Asap version 2.9 and later, this module is only used to create
empty atoms for use in MakeParallelAtoms.
"""
import string
import types
import Numeric
import LinearAlgebra
#import AtomInList
#import DocumentedAttributes
#from Structures.TypesFromAtomicNumbers import TypesFromAtomicNumbers
#from Structures.FakeUnitCell import FakeUnitCell
# Delayed imports:
# Visualization.Avatars.RasMol is imported in AtomsInCmFile.GetPlot()
class ListOfCartesianAtoms:
"""A list of atoms where the storage is in the form of multiarrays.
The object is created like this:
ListOfCartesianAtoms(positions=data, momenta=data, .....)
The data is then accessed through the usual access functions, i.e.
GetCartesianPositions(), GetCartesianMomenta(),
GetCartesianForces(). Other information can be given when the
object is initialized, for example stresses can be given as
stesses=data. They can then be accessed with GetStresses() and
changed with SetStresses().
"""
_specials = {"positions":1, "momenta":1, "forces":1}
def __init__(self, periodicboundaries=None, **kargs):
# Check that positions are specified, and that they are a 3xN
# multiarray
if not kargs.has_key("positions"):
raise ArgumentError, "positions missing"
try:
shape = kargs["positions"].shape
except AttributeError:
raise TypeError, "positions should be a multiarray"
if shape[1] != 3:
raise ValueError, "positions should be an Nx3 multiarray"
if periodicboundaries is not None:
self.SetPeriodicBoundaries(periodicboundaries)
len = shape[0]
self._len = len
self._data = {}
for arg in kargs.keys():
if kargs[arg] is not None:
shape = kargs[arg].shape
if shape[0] != len:
msg = ("%s has shape %t, expected (%d, ?)." %
(arg, shape, len))
raise ValueError, msg
self._data[arg] = kargs[arg]
# List functions
def __len__(self):
return self._len
def __getitem__(self, n):
if n < 0 or n >= self._len:
raise IndexError
return AtomInList.AtomInList(self, n)
def __getslice__(self, low, high):
# Select the data to be used in the slice object
newdata = {}
for key in self._data.keys():
newdata[key] = self._data[key][low:high]
# Now create a new list of atoms object passing the data to the
# constructor
return apply(self.__class__, (), newdata)
def extend(self, other):
"""Extend the list of atoms by adding atoms from another list.
The other list must at least contain the same type of data as this
one.
"""
newdata = {}
for key in self._data.keys():
accessfunc = "GetCartesian"+string.capitalize(key)
try:
newdata[key] = getattr(other, accessfunc)()
except AttributeError:
accessfunc = "Get"+string.capitalize(key)
newdata[key] = getattr(other, accessfunc)()
for key in self._data.keys():
self._data[key] = Numeric.concatenate((self._data[key], newdata[key]))
def __add__(self, other):
"""Add two lists of atoms."""
newdata = {}
for key in self._data.keys():
accessfunc = "GetCartesian"+string.capitalize(key)
try:
otherdata = getattr(other, accessfunc)()
except AttributeError:
accessfunc = "Get"+string.capitalize(key)
otherdata = getattr(other, accessfunc)()
selfdata = self._data[key]
newdata[key] = Numeric.concatenate((selfdata, otherdata))
return apply(self.__class__, (self.GetPeriodicBoundaries(),), newdata)
def __delitem__(self, n):
"""Delete an atom."""
if n < 0 or n >= self._len:
raise IndexError, "list deletion index out of range."
for key in self._data.keys():
d = self._data[key]
self._data[key] = Numeric.concatenate((d[:n], d[n+1:]))
self._len = self._len - 1
# We should also implement access functions that change/delete the
# elements in the list
# Some access functions are not handled by the generic ones
def GetPeriodicBoundaries(self):
"""Get the periodic boundary conditions."""
return self._periodicboundaries
def SetPeriodicBoundaries(self, pbc):
"""Set the periodic boundary conditions."""
if not isinstance(pbc, types.TupleType):
raise TypeError, "Not a tuple"
if len(pbc) != 3:
raise ValueError, "Not a tuple of length 3"
self._periodicboundaries = pbc
# Visualization
def GetPlot(self, interactive=0, debug=0):
"""Get a RasMol avatar plotting the simulation."""
from Visualization.Avatars.RasMol import RasMol
return RasMol(self, interactive=interactive, debug=debug)
# Generic access functions
def _getdata(self):
if self._data.has_key(self._attrname):
return self._data[self._attrname]
else:
# Attempting to access a missing "attribute"
raise AttributeError, self._requested
def _setdata(self, coordinates):
a = self._attrname
if self._data.has_key(a):
if coordinates.shape != self._data[a].shape:
raise ValueError, "wrong shape"
else:
# Introducing a new sort of data
if coordinates.shape[0] != self._len:
raise ValueError, "wrong shape"
self._data[a] = coordinates
def _getsingledata(self, n):
if self._data.has_key(self._attrname):
return self._data[self._attrname][n]
else:
# Attempting to access a missing "attribute"
raise AttributeError, self._requested
def _setsingledata(self, n, coordinates):
if self._data.has_key(self._attrname):
self._data[self._attrname][n] = coordinates
else:
# Attempting to access a missing "attribute"
raise AttributeError, self._requested
def _isspecial(self):
"""Checks if access through [GS]etCartesianXXXX is meaningful."""
if self._attrname == "momentums":
self._attrname = "momenta"
if not self._specials.has_key(self._attrname):
# The access function is not allowed to exist
raise AttributeError, self._requested
def __getattr__(self, name):
"""Implement the access functions.
Catches names of the type GetXXXX or GetCartesianXXXX
or the similar SetXXXX names.
"""
self._requested = name
if name[:18] == "GetSingleCartesian":
self._attrname = string.lower(name[18:])+"s"
self._isspecial()
return self._getsingledata
elif name[:18] == "SetSingleCartesian":
self._attrname = string.lower(name[18:])+"s"
self._isspecial()
return self._setsingledata
elif name[:9] == "GetSingle":
self._attrname = string.lower(name[9:])+"s"
return self._getsingledata
elif name[:9] == "SetSingle":
self._attrname = string.lower(name[9:])+"s"
return self._setsingledata
if name[:12] == "GetCartesian":
self._attrname = string.lower(name[12:])
self._isspecial()
return self._getdata
elif name[:12] == "SetCartesian":
self._attrname = string.lower(name[12:])
self._isspecial()
return self._setdata
elif name[:3] == "Get":
self._attrname = string.lower(name[3:])
return self._getdata
elif name[:3] == "Set":
self._attrname = string.lower(name[3:])
return self._setdata
else:
raise AttributeError, name
def _lowerplural(str):
"""Returns the plural of an attribute name converted to lower case."""
try:
return string.lower(_pluralmap[str])
except KeyError:
return string.lower(str+"s")
Attributes = (
("Position", "The position."),
("Momentum", "The momentum of the atom.", "Momenta"),
("Force", "The forces on the atoms."),
("Class", "An integer 'classifying' the atoms.", "Classes"),
("Type",
"An object describing the type of the atoms, e.g. the chemical element."),
("AtomicNumber", "The atomic number of the atom."),
("Energy", "The energy of the atom", "Energies"),
("Mass", "The mass of the atom", "Masses"),
("Velocity", "The derivative of the position.", "Velocities"))
def _pl(t):
l = len(t)
assert l == 2 or l == 3
if l == 2:
return (t[0], t[0]+"s")
elif l == 3:
return (t[0], t[2])
else:
raise AssertionError, "Internal error in _pl, length is not 2 or 3."
Plurals = map(_pl, Attributes)
del _pl
_pluralmap = {}
for _i in Plurals:
_pluralmap[_i[0]] = _i[1]
del _i
| {
"repo_name": "auag92/n2dm",
"path": "Asap-3.8.4/Python/asap3/Internal/AtomsInArrays.py",
"copies": "1",
"size": "9614",
"license": "mit",
"hash": 5146565814414365000,
"line_mean": 34.0875912409,
"line_max": 79,
"alpha_frac": 0.6177449553,
"autogenerated": false,
"ratio": 3.903369874137231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9896815414248347,
"avg_score": 0.024859883037776843,
"num_lines": 274
} |
# a toolbox for music songs similarity weighting, song clustering, and so on
import csv
import logging
import os
import shutil
import numpy as np
import scipy
from matplotlib.pyplot import specgram
from scipy.io import wavfile
from sklearn.cluster import KMeans
SONGS_DIR = '/home/lucasx/Documents/Dataset/CloudMusic/1'
FFT_NPY_DIR = '/home/lucasx/Documents/Dataset/CloudMusic/fft_npy'
def generate_data_and_label(songs_dir):
"""
genertate dataset with its label
:param songs_dir:
:return:
"""
song_data = dict()
for label_dir in os.listdir(songs_dir):
if label_dir == '1':
for _ in os.listdir(os.path.join(songs_dir, label_dir)):
song_filepath = os.path.join(songs_dir, label_dir, _)
# print(song_filepath)
song_data[_] = 1
elif label_dir == '0':
for _ in os.listdir(os.path.join(songs_dir, label_dir)):
song_filepath = os.path.join(songs_dir, label_dir, _)
# print(song_filepath)
song_data[_] = 0
with open('dataset.csv', 'wt', encoding='UTF-8', newline='') as csvfile:
fieldnames = ['songname', 'label']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for key, value in song_data.items():
writer.writerow({'songname': key.split('/')[-1], 'label': value})
print('CSV file Pre-processing done!!!')
def create_fft(filename):
sample_rate, X = wavfile.read(filename)
fft_features = abs(scipy.fft(X)[0:1000])
base_fn, ext = os.path.splitext(filename)
data_fn = FFT_NPY_DIR + base_fn.split('/')[-1] + '.fft'
np.save(data_fn, fft_features)
# draw the spec gram figure
print(sample_rate, X.shape)
# specgram(X, Fs=sample_rate, xextent=(0, 30))
def batch_create_fft():
if os.path.exists(FFT_NPY_DIR):
shutil.rmtree(FFT_NPY_DIR)
os.makedirs(FFT_NPY_DIR)
for _ in os.listdir(SONGS_DIR):
create_fft(os.path.join(SONGS_DIR, _))
logging.log(logging.INFO, 'All music files have been processed successfully~~~')
def read_fft(fft_npy_file_dir):
X = []
y = []
for fft_npy_file in os.listdir(fft_npy_file_dir):
y.append(1)
if fft_npy_file.endswith('.fft.npy'):
X.append(np.load(os.path.join(fft_npy_file_dir, fft_npy_file))[:1000])
else:
logging.error('unsupported format for file %s' % fft_npy_file)
return np.array(X), np.array(y)
def batch_rename(dir_):
num = 1
for _ in os.listdir(dir_):
os.rename(os.path.join(dir_, _), os.path.join(dir_, '%d.mp3' % num))
num += 1
print('All mp3 files have been renamed...')
if __name__ == '__main__':
# generate_data_and_label(SONGS_DIR)
# batch_create_fft()
X, y = read_fft(FFT_NPY_DIR)
kmeans_model = KMeans(n_clusters=8, random_state=1).fit(X)
labels = kmeans_model.labels_
print(labels)
| {
"repo_name": "EclipseXuLu/DataHouse",
"path": "DataHouse/music/music_util.py",
"copies": "1",
"size": "2961",
"license": "mit",
"hash": 2072908451617201200,
"line_mean": 30.5,
"line_max": 84,
"alpha_frac": 0.608578183,
"autogenerated": false,
"ratio": 3.15,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9257311539454605,
"avg_score": 0.00025332870907912,
"num_lines": 94
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.