text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import myhdl
from myhdl import (Signal, ResetSignal, intbv, always_seq, always,
always_comb)
@myhdl.block
def blinky(led, clock, reset=None):
assert len(led) >= 2
nled = len(led)
maxcnt = int(clock.frequency)
cnt = Signal(intbv(0,min=0,max=maxcnt))
toggle = Signal(bool(0))
@always_seq(clock.posedge, reset=reset)
def rtl():
if cnt == maxcnt-1:
cnt.next = 0
toggle.next = not toggle
else:
cnt.next = cnt + 1
@always_comb
def rtl_assign():
led.next[0] = toggle
led.next[1] = not toggle
for ii in range(2, nled):
led.next[ii] = 0
if reset is None:
reset = ResetSignal(0, active=0, isasync=False)
@always(clock.posedge)
def rtl_reset():
reset.next = not reset.active
g = (rtl, rtl_assign, rtl_reset,)
else:
g = (rtl, rtl_assign,)
return g
|
{
"content_hash": "4f04ccb9ba4d92aab66886de8479e811",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 66,
"avg_line_length": 23.414634146341463,
"alnum_prop": 0.5385416666666667,
"repo_name": "cfelton/rhea",
"id": "9b9b5f8828aaa597674c4505c52409497e95a231",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/build/blink.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2185"
},
{
"name": "Python",
"bytes": "672143"
},
{
"name": "Shell",
"bytes": "1590"
},
{
"name": "VHDL",
"bytes": "10452"
},
{
"name": "Verilog",
"bytes": "22193"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ahjo', '0009_auto_20160420_1528'),
]
database_operations = [
migrations.AlterModelTable('comment', 'comments_comment'),
]
state_operations = [
migrations.DeleteModel(
name='Comment',
),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=database_operations,
state_operations=state_operations)
]
|
{
"content_hash": "20392fb678577e883b74c763c78c03ba",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 21.692307692307693,
"alnum_prop": 0.6205673758865248,
"repo_name": "okffi/decisions",
"id": "bc6b9016670e5c01876a78156d4c488d02ac50ed",
"size": "636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/decisions/ahjo/migrations/0010_auto_20160420_1558.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90"
},
{
"name": "HTML",
"bytes": "11708"
},
{
"name": "JavaScript",
"bytes": "3885"
},
{
"name": "Python",
"bytes": "49063"
}
],
"symlink_target": ""
}
|
import traceback
import multiprocessing
import numpy as np
from numba import cuda
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
has_concurrent_futures = False
else:
has_concurrent_futures = True
has_mp_get_context = hasattr(multiprocessing, 'get_context')
def check_concurrent_compiling():
@cuda.jit
def foo(x):
x[0] += 1
def use_foo(x):
foo(x)
return x
arrays = [np.arange(10) for i in range(10)]
expected = np.arange(10)
expected[0] += 1
with ThreadPoolExecutor(max_workers=4) as e:
for ary in e.map(use_foo, arrays):
np.testing.assert_equal(ary, expected)
def spawn_process_entry(q):
try:
check_concurrent_compiling()
except:
msg = traceback.format_exc()
q.put('\n'.join(['', '=' * 80, msg]))
else:
q.put(None)
@skip_on_cudasim('disabled for cudasim')
class TestMultiThreadCompiling(unittest.TestCase):
@unittest.skipIf(not has_concurrent_futures, "no concurrent.futures")
def test_concurrent_compiling(self):
check_concurrent_compiling()
@unittest.skipIf(not has_mp_get_context, "no multiprocessing.get_context")
def test_spawn_concurrent_compilation(self):
# force CUDA context init
cuda.get_current_device()
# use "spawn" to avoid inheriting the CUDA context
ctx = multiprocessing.get_context('spawn')
q = ctx.Queue()
p = ctx.Process(target=spawn_process_entry, args=(q,))
p.start()
try:
err = q.get()
finally:
p.join()
if err is not None:
raise AssertionError(err)
self.assertEqual(p.exitcode, 0, 'test failed in child process')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "4049969329553bce6e815cd3869e6453",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 78,
"avg_line_length": 26,
"alnum_prop": 0.6348788198103267,
"repo_name": "stefanseefeld/numba",
"id": "2adca399a0481cf2255f4f38443e98319d0d989f",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/cuda/tests/cudapy/test_multithreads.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5535"
},
{
"name": "C",
"bytes": "303376"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "Jupyter Notebook",
"bytes": "110325"
},
{
"name": "Python",
"bytes": "3946372"
},
{
"name": "Shell",
"bytes": "2414"
}
],
"symlink_target": ""
}
|
import numpy as np
from nn.base import NNBase
from nn.math import softmax, make_onehot
from misc import random_weight_matrix
##
# Evaluation code; do not change this
##
from sklearn import metrics
def full_report(y_true, y_pred, tagnames):
cr = metrics.classification_report(y_true, y_pred,
target_names=tagnames)
print(cr)
def eval_performance(y_true, y_pred, tagnames):
pre, rec, f1, support = metrics.precision_recall_fscore_support(y_true, y_pred)
print("=== Performance (omitting 'O' class) ===")
print("Mean precision: %.02f%%" % (100*sum(pre[1:] * support[1:])/sum(support[1:])))
print("Mean recall: %.02f%%" % (100*sum(rec[1:] * support[1:])/sum(support[1:])))
print("Mean F1: %.02f%%" % (100*sum(f1[1:] * support[1:])/sum(support[1:])))
##
# Implement this!
##
class WindowMLP(NNBase):
"""Single hidden layer, plus representation learning."""
def __init__(self, wv, windowsize=3,
dims=[None, 100, 5],
reg=0.001, alpha=0.01, rseed=10):
"""
Initialize classifier model.
Arguments:
wv : initial word vectors (array |V| x n)
note that this is the transpose of the n x |V| matrix L
described in the handout; you'll want to keep it in
this |V| x n form for efficiency reasons, since numpy
stores matrix rows continguously.
windowsize : int, size of context window
dims : dimensions of [input, hidden, output]
input dimension can be computed from wv.shape
reg : regularization strength (lambda)
alpha : default learning rate
rseed : random initialization seed
"""
# Set regularization
self.lreg = float(reg)
self.alpha = alpha # default training rate
dims[0] = windowsize * wv.shape[1] # input dimension
param_dims = dict(W=(dims[1], dims[0]),
b1=(dims[1],),
U=(dims[2], dims[1]),
b2=(dims[2],),
)
param_dims_sparse = dict(L=wv.shape)
# initialize parameters: don't change this line
NNBase.__init__(self, param_dims, param_dims_sparse)
random.seed(rseed) # be sure to seed this for repeatability!
#### YOUR CODE HERE ####
# any other initialization you need
self.params.W = random_weight_matrix(*self.params.W.shape)
self.params.U = random_weight_matrix(*self.params.U.shape)
self.sparams.L = wv.copy()
#### END YOUR CODE ####
def _acc_grads(self, window, label):
"""
Accumulate gradients, given a training point
(window, label) of the format
window = [x_{i-1} x_{i} x_{i+1}] # three ints
label = {0,1,2,3,4} # single int, gives class
Your code should update self.grads and self.sgrads,
in order for gradient_check and training to work.
So, for example:
self.grads.U += (your gradient dJ/dU)
self.sgrads.L[i] = (gradient dJ/dL[i]) # this adds an update for that index
"""
#### YOUR CODE HERE ####
##
# Forward propagation
words = np.array([self.params.L[x] for x in window])
x = np.reshape(words, -1)
layer1 = np.tanh(self.params.W.dot(x) + self.params.b1)
probs = softmax(self.params.U.dot(layer1) + self.params.b2)
##
# Backpropagation
y = make_onehot(label, len(probs))
dx = probs - y
dU = np.outer(dx, layer1)
delta2 = np.multiply((1 - np.square(dU)),
self.params.U.T.dot(dx))
dW = np.outer(delta2, x)
db1 = delta2
dL = self.params.W.T.dot(delta2)
dL = np.reshape(dL, (3, self.params.L.shape[1]))
dW += self.lreg * self.params.W
dU += self.lreg * self.params.U
self.grads.U += dU
self.grads.W += dW
self.grads.b2 += dx
self.grads.b1 += delta2
self.sgrads.L[window[0]] = dL[0]
self.sgrads.L[window[1]] = dL[1]
self.sgrads.L[window[2]] = dL[2]
#### END YOUR CODE ####
def predict_proba(self, windows):
"""
Predict class probabilities.
Should return a matrix P of probabilities,
with each row corresponding to a row of X.
windows = array (n x windowsize),
each row is a window of indices
"""
# handle singleton input by making sure we have
# a list-of-lists
if not hasattr(windows[0], "__iter__"):
windows = [windows]
#### YOUR CODE HERE ####
idx_array = np.array(windows)
words = np.array(self.sparams.L[idx_array])
#### END YOUR CODE ####
return P # rows are output for each input
def predict(self, windows):
"""
Predict most likely class.
Returns a list of predicted class indices;
input is same as to predict_proba
"""
#### YOUR CODE HERE ####
probs = self.predict_proba(windows)
c = np.argmax(probs, axis=1)
#### END YOUR CODE ####
return c # list of predicted classes
def compute_loss(self, windows, labels):
"""
Compute the loss for a given dataset.
windows = same as for predict_proba
labels = list of class labels, for each row of windows
"""
#### YOUR CODE HERE ####
#### END YOUR CODE ####
return J
|
{
"content_hash": "512650ab08dc115f24c8eed784f98a57",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 89,
"avg_line_length": 32.54385964912281,
"alnum_prop": 0.5525606469002695,
"repo_name": "kingtaurus/cs224d",
"id": "c2e0ca0951d7e86438e5052284ef658f4e796268",
"size": "5565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_assignments/assignment2/nerwindow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1008145"
},
{
"name": "Python",
"bytes": "309038"
},
{
"name": "Shell",
"bytes": "2234"
},
{
"name": "TeX",
"bytes": "110142"
}
],
"symlink_target": ""
}
|
import os
import os.path
class File:
def __init__(self, path):
self.path = path
self.name = os.path.basename(path)
def rename(self, new_name):
new_path = os.path.join(os.path.dirname(self.path), new_name)
os.rename(self.path, new_path)
# Simple class for working with directories
#
# >>> from folder import Folder
# >>> pictures = Folder("C:/Cat Pics")
# >>> pictures.get_files()
# ['tabby.png', 'kitten.jpg', 'meow.gif']
class Folder:
def __init__(self, path):
self.path = path
def get_files(self):
files = []
for f in os.listdir(self.path):
if os.path.isfile(os.path.join(self.path, f)):
files.append(f)
return files
|
{
"content_hash": "3f6c1183f3245c0c7f9e494dd80317be",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 69,
"avg_line_length": 26.107142857142858,
"alnum_prop": 0.5841313269493844,
"repo_name": "VoxelDavid/bard",
"id": "259a9f5d11d26e86bd9cb68e9a7dd835fab523e9",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bard/fs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3112"
}
],
"symlink_target": ""
}
|
from collections import Counter
from datetime import datetime, timedelta
from tornado import gen
from whoosh.query import Every, DateRange
from whoosh.qparser import MultifieldParser, OrGroup, QueryParser, WildcardPlugin
from whoosh.qparser.dateparse import DateParserPlugin
from whoosh.sorting import MultiFacet
from summer.ext.search import clean_results
from summer.utils import DotDict
@gen.coroutine
def get_all_topics_and_kw(idx):
with idx.searcher() as search:
# collections to return as the result
keywords = Counter()
topics = set()
# search through every document
results = search.search(Every(), limit=None)
for document in results:
# and extract the Topic and Keywords, how we want them
topics.add(document['topic'])
keywords.update([x.strip() for x in document.get('keywords', '').split(',')])
return {'keywords': keywords.most_common(None), 'topics': topics}
@gen.coroutine
def get_all_documents(idx, limit=None):
with idx.searcher() as search:
results = search.search(Every(), limit=limit)
return clean_results(idx, results)
@gen.coroutine
def get_one_document(idx, by_id=None):
with idx.searcher() as search:
q = QueryParser('uuid', idx.schema).parse(by_id)
results = search.search(q)
related = results[0].more_like_this('keywords', top=3, numterms=10)
if len(related.top_n) == 0:
# if we can't find anything with related keywords, we want
# to extend our search into the content with lots of possible terms.
# the goal is to have 3 related articles...
related = results[0].more_like_this('content', top=3, numterms=80)
return clean_results(idx, results), clean_results(idx, related)
@gen.coroutine
def documents_last_year(idx):
posts = yield generic(idx, qs="modified:-365 days to now")
return posts
@gen.coroutine
def documents_last_month(idx):
posts = yield generic(idx, qs="modified:-30 days to now")
return posts
@gen.coroutine
def generic(idx, qs=None, q=None, limit=5, parser=None, page=1):
if qs is q is None:
raise ValueError('cannot have a null querystring and query')
if parser is None:
parser = MultifieldParser(
['title', 'keywords', 'summary', 'content', 'author'], idx.schema, group=OrGroup)
# add better date parsing support
parser.add_plugin(DateParserPlugin())
parser.remove_plugin_class(WildcardPlugin)
with idx.searcher() as search:
# generate the Query object
if qs:
query = parser.parse(qs)
else:
query = q
facet = MultiFacet()
facet.add_score()
facet.add_field('modified', reverse=True)
facet.add_field('title')
results = search.search_page(query, pagenum=page, sortedby=facet, pagelen=limit)
res = clean_results(idx, results, query)
# pagination attributes on `search_page` method
res.page_number = results.pagenum # current page number
res.page_total = results.pagecount # total pages in results
res.offset = results.offset # first result of current page
res.pagelen = results.pagelen # the number of max results per page
return res
|
{
"content_hash": "fec209446fe6aff959184111a88ed0ea",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 97,
"avg_line_length": 36.9,
"alnum_prop": 0.659741041854863,
"repo_name": "blakev/sowing-seasons",
"id": "3c4cc893ab965c1cc7ba0aecbad10f6ee74c6f6a",
"size": "3321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "summer/ext/search/queries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3185"
},
{
"name": "HTML",
"bytes": "29821"
},
{
"name": "Python",
"bytes": "36386"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class HostingEnvironmentDeploymentInfo(Model):
"""Information needed to create resources on an App Service Environment.
:param name: Name of the App Service Environment.
:type name: str
:param location: Location of the App Service Environment.
:type location: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(self, name=None, location=None):
super(HostingEnvironmentDeploymentInfo, self).__init__()
self.name = name
self.location = location
|
{
"content_hash": "9f5ad08fed57eb8bf767776872c5bf06",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.6401869158878505,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "c625f9361fb94ef10cfeec99093c2713782d10af",
"size": "1116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-web/azure/mgmt/web/models/hosting_environment_deployment_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""Simple-to-use Python interface to The TVDB's API (www.thetvdb.com)
Example usage:
>>> from tvdb_api import Tvdb
>>> t = Tvdb()
>>> t['Lost'][4][11]['episodename']
u'Cabin Fever'
"""
__author__ = "dbr/Ben"
__version__ = "1.6.4"
import os
import urllib
import urllib2
import StringIO
import tempfile
import warnings
import logging
import datetime
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
try:
import gzip
except ImportError:
gzip = None
from tvdb_cache import CacheHandler
from tvdb_ui import BaseUI, ConsoleUI
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
lastTimeout = None
def log():
return logging.getLogger("tvdb_api")
class ShowContainer(dict):
"""Simple dict that holds a series of Show instances
"""
pass
class Show(dict):
"""Holds a dict of seasons, and show data.
"""
def __init__(self):
dict.__init__(self)
self.data = {}
def __repr__(self):
return "<Show %s (containing %s seasons)>" % (
self.data.get(u'seriesname', 'instance'),
len(self)
)
def __getitem__(self, key):
if key in self:
# Key is an episode, return it
return dict.__getitem__(self, key)
if key in self.data:
# Non-numeric request is for show-data
return dict.__getitem__(self.data, key)
# Data wasn't found, raise appropriate error
if isinstance(key, int) or key.isdigit():
# Episode number x was not found
raise tvdb_seasonnotfound("Could not find season %s" % (repr(key)))
else:
# If it's not numeric, it must be an attribute name, which
# doesn't exist, so attribute error.
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def airedOn(self, date):
ret = self.search(str(date), 'firstaired')
if len(ret) == 0:
raise tvdb_episodenotfound("Could not find any episodes that aired on %s" % date)
return ret
def search(self, term = None, key = None):
"""
Search all episodes in show. Can search all data, or a specific key (for
example, episodename)
Always returns an array (can be empty). First index contains the first
match, and so on.
Each array index is an Episode() instance, so doing
search_results[0]['episodename'] will retrieve the episode name of the
first match.
Search terms are converted to lower case (unicode) strings.
# Examples
These examples assume t is an instance of Tvdb():
>>> t = Tvdb()
>>>
To search for all episodes of Scrubs with a bit of data
containing "my first day":
>>> t['Scrubs'].search("my first day")
[<Episode 01x01 - My First Day>]
>>>
Search for "My Name Is Earl" episode named "Faked His Own Death":
>>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename')
[<Episode 01x04 - Faked His Own Death>]
>>>
To search Scrubs for all episodes with "mentor" in the episode name:
>>> t['scrubs'].search('mentor', key = 'episodename')
[<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>]
>>>
# Using search results
>>> results = t['Scrubs'].search("my first")
>>> print results[0]['episodename']
My First Day
>>> for x in results: print x['episodename']
My First Day
My First Step
My First Kill
>>>
"""
results = []
for cur_season in self.values():
searchresult = cur_season.search(term = term, key = key)
if len(searchresult) != 0:
results.extend(searchresult)
#end for cur_season
return results
class Season(dict):
def __init__(self, show = None):
"""The show attribute points to the parent show
"""
self.show = show
def __repr__(self):
return "<Season instance (containing %s episodes)>" % (
len(self.keys())
)
def __getitem__(self, episode_number):
if episode_number not in self:
raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number)))
else:
return dict.__getitem__(self, episode_number)
def search(self, term = None, key = None):
"""Search all episodes in season, returns a list of matching Episode
instances.
>>> t = Tvdb()
>>> t['scrubs'][1].search('first day')
[<Episode 01x01 - My First Day>]
>>>
See Show.search documentation for further information on search
"""
results = []
for ep in self.values():
searchresult = ep.search(term = term, key = key)
if searchresult is not None:
results.append(
searchresult
)
return results
class Episode(dict):
def __init__(self, season = None):
"""The season attribute points to the parent season
"""
self.season = season
def __repr__(self):
seasno = int(self.get(u'seasonnumber', 0))
epno = int(self.get(u'episodenumber', 0))
epname = self.get(u'episodename')
if epname is not None:
return "<Episode %02dx%02d - %s>" % (seasno, epno, epname)
else:
return "<Episode %02dx%02d>" % (seasno, epno)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def search(self, term = None, key = None):
"""Search episode data for term, if it matches, return the Episode (self).
The key parameter can be used to limit the search to a specific element,
for example, episodename.
This primarily for use use by Show.search and Season.search. See
Show.search for further information on search
Simple example:
>>> e = Episode()
>>> e['episodename'] = "An Example"
>>> e.search("examp")
<Episode 00x00 - An Example>
>>>
Limiting by key:
>>> e.search("examp", key = "episodename")
<Episode 00x00 - An Example>
>>>
"""
if term == None:
raise TypeError("must supply string to search for (contents)")
term = unicode(term).lower()
for cur_key, cur_value in self.items():
cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
if key is not None and cur_key != key:
# Do not search this key
continue
if cur_value.find( unicode(term).lower() ) > -1:
return self
#end if cur_value.find()
#end for cur_key, cur_value
class Actors(list):
"""Holds all Actor instances for a show
"""
pass
class Actor(dict):
"""Represents a single actor. Should contain..
id,
image,
name,
role,
sortorder
"""
def __repr__(self):
return "<Actor \"%s\">" % (self.get("name"))
class Tvdb:
"""Create easy-to-use interface to name of season/episode name
>>> t = Tvdb()
>>> t['Scrubs'][1][24]['episodename']
u'My Last Day'
"""
def __init__(self,
interactive = False,
select_first = False,
debug = False,
cache = True,
banners = False,
actors = False,
custom_ui = None,
language = None,
search_all_languages = False,
apikey = None,
forceConnect=False):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
When False, the first search result is used.
select_first (True/False):
Automatically selects the first series search result (rather
than showing the user a list of more than one series).
Is overridden by interactive = False, or specifying a custom_ui
debug (True/False) DEPRECATED:
Replaced with proper use of logging module. To show debug messages:
>>> import logging
>>> logging.basicConfig(level = logging.DEBUG)
cache (True/False/str/unicode/urllib2 opener):
Retrieved XML are persisted to to disc. If true, stores in
tvdb_api folder under your systems TEMP_DIR, if set to
str/unicode instance it will use this as the cache
location. If False, disables caching. Can also be passed
an arbitrary Python object, which is used as a urllib2
opener, which should be created by urllib2.build_opener
banners (True/False):
Retrieves the banners for a show. These are accessed
via the _banners key of a Show(), for example:
>>> Tvdb(banners=True)['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
actors (True/False):
Retrieves a list of the actors for a show. These are accessed
via the _actors key of a Show(), for example:
>>> t = Tvdb(actors=True)
>>> t['scrubs']['_actors'][0]['name']
u'Zach Braff'
custom_ui (tvdb_ui.BaseUI subclass):
A callable subclass of tvdb_ui.BaseUI (overrides interactive option)
language (2 character language abbreviation):
The language of the returned data. Is also the language search
uses. Default is "en" (English). For full list, run..
>>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
['da', 'fi', 'nl', ...]
search_all_languages (True/False):
By default, Tvdb will only search in the language specified using
the language option. When this is True, it will search for the
show in and language
apikey (str/unicode):
Override the default thetvdb.com API key. By default it will use
tvdb_api's own key (fine for small scripts), but you can use your
own key if desired - this is recommended if you are embedding
tvdb_api in a larger application)
See http://thetvdb.com/?tab=apiregister to get your own key
forceConnect (bool):
If true it will always try to connect to theTVDB.com even if we
recently timed out. By default it will wait one minute before
trying again, and any requests within that one minute window will
return an exception immediately.
"""
global lastTimeout
# if we're given a lastTimeout that is less than 1 min just give up
if not forceConnect and lastTimeout != None and datetime.datetime.now() - lastTimeout < datetime.timedelta(minutes=1):
raise tvdb_error("We recently timed out, so giving up early this time")
self.shows = ShowContainer() # Holds all Show classes
self.corrections = {} # Holds show-name to show_id mapping
self.config = {}
if apikey is not None:
self.config['apikey'] = apikey
else:
self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key
self.config['debug_enabled'] = debug # show debugging messages
self.config['custom_ui'] = custom_ui
self.config['interactive'] = interactive # prompt for correct series?
self.config['select_first'] = select_first
self.config['search_all_languages'] = search_all_languages
if cache is True:
self.config['cache_enabled'] = True
self.config['cache_location'] = self._getTempDir()
self.urlopener = urllib2.build_opener(
CacheHandler(self.config['cache_location'])
)
elif cache is False:
self.config['cache_enabled'] = False
self.urlopener = urllib2.build_opener() # default opener with no caching
elif isinstance(cache, basestring):
self.config['cache_enabled'] = True
self.config['cache_location'] = cache
self.urlopener = urllib2.build_opener(
CacheHandler(self.config['cache_location'])
)
elif isinstance(cache, urllib2.OpenerDirector):
# If passed something from urllib2.build_opener, use that
log().debug("Using %r as urlopener" % cache)
self.config['cache_enabled'] = True
self.urlopener = cache
else:
raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))
self.config['banners_enabled'] = banners
self.config['actors_enabled'] = actors
if self.config['debug_enabled']:
warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
"To enable debug messages, use the following code before importing: "
"import logging; logging.basicConfig(level=logging.DEBUG)")
logging.basicConfig(level=logging.DEBUG)
# List of language from http://www.thetvdb.com/api/0629B785CE550C8D/languages.xml
# Hard-coded here as it is realtively static, and saves another HTTP request, as
# recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
self.config['valid_languages'] = [
"da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr",
"ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no"
]
# thetvdb.com should be based around numeric language codes,
# but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
# requires the language ID, thus this mapping is required (mainly
# for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}
if language is None:
self.config['language'] = 'en'
else:
if language not in self.config['valid_languages']:
raise ValueError("Invalid language %s, options are: %s" % (
language, self.config['valid_languages']
))
else:
self.config['language'] = language
# The following url_ configs are based of the
# http://thetvdb.com/wiki/index.php/Programmers_API
self.config['base_url'] = "http://www.thetvdb.com"
if self.config['search_all_languages']:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=all" % self.config
else:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=%(language)s" % self.config
self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config
self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config
#end __init__
def _getTempDir(self):
"""Returns the [system temp dir]/tvdb_api
"""
return os.path.join(tempfile.gettempdir(), "tvdb_api")
def _loadUrl(self, url, recache = False):
global lastTimeout
try:
log().debug("Retrieving URL %s" % url)
resp = self.urlopener.open(url)
if 'x-local-cache' in resp.headers:
log().debug("URL %s was cached in %s" % (
url,
resp.headers['x-local-cache'])
)
if recache:
log().debug("Attempting to recache %s" % url)
resp.recache()
except (IOError, urllib2.URLError), errormsg:
if not str(errormsg).startswith('HTTP Error'):
lastTimeout = datetime.datetime.now()
raise tvdb_error("Could not connect to server: %s" % (errormsg))
#end try
# handle gzipped content,
# http://dbr.lighthouseapp.com/projects/13342/tickets/72-gzipped-data-patch
if 'gzip' in resp.headers.get("Content-Encoding", ''):
if gzip:
stream = StringIO.StringIO(resp.read())
gz = gzip.GzipFile(fileobj=stream)
return gz.read()
raise tvdb_error("Received gzip data from thetvdb.com, but could not correctly handle it")
return resp.read()
def _getetsrc(self, url):
"""Loads a URL using caching, returns an ElementTree of the source
"""
src = self._loadUrl(url)
try:
# TVDB doesn't sanitize \r (CR) from user input in some fields,
# remove it to avoid errors. Change from SickBeard, from will14m
return ElementTree.fromstring(src.rstrip("\r"))
except SyntaxError:
src = self._loadUrl(url, recache=True)
try:
return ElementTree.fromstring(src.rstrip("\r"))
except SyntaxError, exceptionmsg:
errormsg = "There was an error with the XML retrieved from thetvdb.com:\n%s" % (
exceptionmsg
)
if self.config['cache_enabled']:
errormsg += "\nFirst try emptying the cache folder at..\n%s" % (
self.config['cache_location']
)
errormsg += "\nIf this does not resolve the issue, please try again later. If the error persists, report a bug on"
errormsg += "\nhttp://dbr.lighthouseapp.com/projects/13342-tvdb_api/overview\n"
raise tvdb_error(errormsg)
#end _getetsrc
def _setItem(self, sid, seas, ep, attrib, value):
"""Creates a new episode, creating Show(), Season() and
Episode()s as required. Called by _getShowData to populate show
Since the nice-to-use tvdb[1][24]['name] interface
makes it impossible to do tvdb[1][24]['name] = "name"
and still be capable of checking if an episode exists
so we can raise tvdb_shownotfound, we have a slightly
less pretty method of setting items.. but since the API
is supposed to be read-only, this is the best way to
do it!
The problem is that calling tvdb[1][24]['episodename'] = "name"
calls __getitem__ on tvdb[1], there is no way to check if
tvdb.__dict__ should have a key "1" before we auto-create it
"""
if sid not in self.shows:
self.shows[sid] = Show()
if seas not in self.shows[sid]:
self.shows[sid][seas] = Season(show = self.shows[sid])
if ep not in self.shows[sid][seas]:
self.shows[sid][seas][ep] = Episode(season = self.shows[sid][seas])
self.shows[sid][seas][ep][attrib] = value
#end _set_item
def _setShowData(self, sid, key, value):
"""Sets self.shows[sid] to a new Show instance, or sets the data
"""
if sid not in self.shows:
self.shows[sid] = Show()
self.shows[sid].data[key] = value
def _cleanData(self, data):
"""Cleans up strings returned by TheTVDB.com
Issues corrected:
- Replaces & with &
- Trailing whitespace
"""
data = data.replace(u"&", u"&")
data = data.strip()
return data
#end _cleanData
def _getSeries(self, series):
"""This searches TheTVDB.com for the series name,
If a custom_ui UI is configured, it uses this to select the correct
series. If not, and interactive == True, ConsoleUI is used, if not
BaseUI is used to select the first result.
"""
series = urllib.quote(series.encode("utf-8"))
log().debug("Searching for show %s" % series)
seriesEt = self._getetsrc(self.config['url_getSeries'] % (series))
allSeries = []
for series in seriesEt:
result = dict((k.tag.lower(), k.text) for k in series.getchildren())
result['id'] = int(result['id'])
result['lid'] = self.config['langabbv_to_id'][result['language']]
log().debug('Found series %(seriesname)s' % result)
allSeries.append(result)
#end for series
if len(allSeries) == 0:
log().debug('Series result returned zero')
raise tvdb_shownotfound("Show-name search returned zero results (cannot find show on TVDB)")
if self.config['custom_ui'] is not None:
log().debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
ui = self.config['custom_ui'](config = self.config)
else:
if not self.config['interactive']:
log().debug('Auto-selecting first search result using BaseUI')
ui = BaseUI(config = self.config)
else:
log().debug('Interactively selecting show using ConsoleUI')
ui = ConsoleUI(config = self.config)
#end if config['interactive]
#end if custom_ui != None
return ui.selectSeries(allSeries)
#end _getSeries
def _parseBanners(self, sid):
"""Parses banners XML, from
http://www.thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml
Banners are retrieved using t['show name]['_banners'], for example:
>>> t = Tvdb(banners = True)
>>> t['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
>>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath']
u'http://www.thetvdb.com/banners/posters/76156-2.jpg'
>>>
Any key starting with an underscore has been processed (not the raw
data from the XML)
This interface will be improved in future versions.
"""
log().debug('Getting season banners for %s' % (sid))
bannersEt = self._getetsrc( self.config['url_seriesBanner'] % (sid) )
banners = {}
for cur_banner in bannersEt.findall('Banner'):
bid = cur_banner.find('id').text
btype = cur_banner.find('BannerType')
btype2 = cur_banner.find('BannerType2')
if btype is None or btype2 is None:
continue
btype, btype2 = btype.text, btype2.text
if not btype in banners:
banners[btype] = {}
if not btype2 in banners[btype]:
banners[btype][btype2] = {}
if not bid in banners[btype][btype2]:
banners[btype][btype2][bid] = {}
for cur_element in cur_banner.getchildren():
tag = cur_element.tag.lower()
value = cur_element.text
if tag is None or value is None:
continue
tag, value = tag.lower(), value.lower()
banners[btype][btype2][bid][tag] = value
for k, v in banners[btype][btype2][bid].items():
if k.endswith("path"):
new_key = "_%s" % (k)
log().debug("Transforming %s to %s" % (k, new_key))
new_url = self.config['url_artworkPrefix'] % (v)
banners[btype][btype2][bid][new_key] = new_url
self._setShowData(sid, "_banners", banners)
def _parseActors(self, sid):
"""Parsers actors XML, from
http://www.thetvdb.com/api/[APIKEY]/series/[SERIES ID]/actors.xml
Actors are retrieved using t['show name]['_actors'], for example:
>>> t = Tvdb(actors = True)
>>> actors = t['scrubs']['_actors']
>>> type(actors)
<class 'tvdb_api.Actors'>
>>> type(actors[0])
<class 'tvdb_api.Actor'>
>>> actors[0]
<Actor "Zach Braff">
>>> sorted(actors[0].keys())
['id', 'image', 'name', 'role', 'sortorder']
>>> actors[0]['name']
u'Zach Braff'
>>> actors[0]['image']
u'http://www.thetvdb.com/banners/actors/43640.jpg'
Any key starting with an underscore has been processed (not the raw
data from the XML)
"""
log().debug("Getting actors for %s" % (sid))
actorsEt = self._getetsrc(self.config['url_actorsInfo'] % (sid))
cur_actors = Actors()
for curActorItem in actorsEt.findall("Actor"):
curActor = Actor()
for curInfo in curActorItem:
tag = curInfo.tag.lower()
value = curInfo.text
if value is not None:
if tag == "image":
value = self.config['url_artworkPrefix'] % (value)
else:
value = self._cleanData(value)
curActor[tag] = value
cur_actors.append(curActor)
self._setShowData(sid, '_actors', cur_actors)
def _getShowData(self, sid, language):
"""Takes a series ID, gets the epInfo URL and parses the TVDB
XML file into the shows dict in layout:
shows[series_id][season_number][episode_number]
"""
if self.config['language'] is None:
log().debug('Config language is none, using show language')
if language is None:
raise tvdb_error("config['language'] was None, this should not happen")
getShowInLanguage = language
else:
log().debug(
'Configured language %s override show language of %s' % (
self.config['language'],
language
)
)
getShowInLanguage = self.config['language']
# Parse show information
log().debug('Getting all series data for %s' % (sid))
seriesInfoEt = self._getetsrc(
self.config['url_seriesInfo'] % (sid, getShowInLanguage)
)
for curInfo in seriesInfoEt.findall("Series")[0]:
tag = curInfo.tag.lower()
value = curInfo.text
if value is not None:
if tag in ['banner', 'fanart', 'poster']:
value = self.config['url_artworkPrefix'] % (value)
else:
value = self._cleanData(value)
self._setShowData(sid, tag, value)
#end for series
# Parse banners
if self.config['banners_enabled']:
self._parseBanners(sid)
# Parse actors
if self.config['actors_enabled']:
self._parseActors(sid)
# Parse episode data
log().debug('Getting all episodes of %s' % (sid))
epsEt = self._getetsrc( self.config['url_epInfo'] % (sid, language) )
for cur_ep in epsEt.findall("Episode"):
seas_no = int(cur_ep.find('SeasonNumber').text)
ep_no = int(cur_ep.find('EpisodeNumber').text)
for cur_item in cur_ep.getchildren():
tag = cur_item.tag.lower()
value = cur_item.text
if value is not None:
if tag == 'filename':
value = self.config['url_artworkPrefix'] % (value)
else:
value = self._cleanData(value)
self._setItem(sid, seas_no, ep_no, tag, value)
#end for cur_ep
#end _geEps
def _nameToSid(self, name):
"""Takes show name, returns the correct series ID (if the show has
already been grabbed), or grabs all episodes and returns
the correct SID.
"""
if name in self.corrections:
log().debug('Correcting %s to %s' % (name, self.corrections[name]) )
sid = self.corrections[name]
else:
log().debug('Getting show %s' % (name))
selected_series = self._getSeries( name )
sname, sid = selected_series['seriesname'], selected_series['id']
log().debug('Got %(seriesname)s, id %(id)s' % selected_series)
self.corrections[name] = sid
self._getShowData(selected_series['id'], selected_series['language'])
#end if name in self.corrections
return sid
#end _nameToSid
def __getitem__(self, key):
"""Handles tvdb_instance['seriesname'] calls.
The dict index should be the show id
"""
if isinstance(key, (int, long)):
# Item is integer, treat as show id
if key not in self.shows:
self._getShowData(key, self.config['language'])
return self.shows[key]
key = key.lower() # make key lower case
sid = self._nameToSid(key)
log().debug('Got series id %s' % (sid))
return self.shows[sid]
#end __getitem__
def __repr__(self):
return str(self.shows)
#end __repr__
#end Tvdb
def main():
"""Simple example of using tvdb_api - it just
grabs an episode name interactively.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
tvdb_instance = Tvdb(interactive=True, cache=False)
print tvdb_instance['Lost']['seriesname']
print tvdb_instance['Lost'][1][4]['episodename']
if __name__ == '__main__':
main()
|
{
"content_hash": "7aefb8fad6dd45ff035a492b21145d40",
"timestamp": "",
"source": "github",
"line_count": 816,
"max_line_length": 130,
"avg_line_length": 36.93014705882353,
"alnum_prop": 0.5613406338145014,
"repo_name": "yaymuffins/ypautopony",
"id": "b12fbd820425fa539444e6e17d5f7d4d33c584d5",
"size": "30294",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tvdb/tvdb_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54880"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='django-userprofile',
version='0.0.6',
description='Django user profile app.',
long_description = open('README.rst', 'r').read() + open('AUTHORS.rst', 'r').read() + open('CHANGELOG.rst', 'r').read(),
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-userprofile',
packages = find_packages(),
dependency_links = [
'http://github.com/praekelt/django-photologue/tarball/master#egg=django-photologue',
],
install_requires = [
'django-photologue',
'django-registration',
],
tests_require=[
'django-setuptest>=0.0.6',
],
test_suite="setuptest.SetupTestSuite",
include_package_data=True,
classifiers = [
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
|
{
"content_hash": "385ce9b9fb1100f27c643a0fce4f59b4",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 124,
"avg_line_length": 33.371428571428574,
"alnum_prop": 0.6121575342465754,
"repo_name": "praekelt/django-userprofile",
"id": "4c29419f7ee95b3d79d25f5d6e4efb428f8a1125",
"size": "1168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "258"
},
{
"name": "Python",
"bytes": "17725"
}
],
"symlink_target": ""
}
|
preco = float(input("Digite o preço: "))
p_desconto = float(input("Digite o % de desconto: "))
desconto = preco * p_desconto / 100
preco_final = preco - desconto
print("Desconto R$%5.2f" % desconto)
print("Preço final R$%5.2f" % preco_final)
|
{
"content_hash": "b9180a8adeba7beec2069bf627c21b65",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 53,
"avg_line_length": 40.333333333333336,
"alnum_prop": 0.6818181818181818,
"repo_name": "laenderoliveira/exerclivropy",
"id": "d608db6466e2876548662ef02892a902d335ac7f",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cap03/ex-03-11.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4664"
},
{
"name": "Python",
"bytes": "321869"
}
],
"symlink_target": ""
}
|
"""
Solve a constant pressure ignition problem where the governing equations are
implemented in Python.
This demonstrates an approach for solving problems where Cantera's reactor
network model cannot be configured to describe the system in question. Here,
Cantera is used for evaluating thermodynamic properties and kinetic rates while
an external ODE solver is used to integrate the resulting equations. In this
case, the SciPy wrapper for VODE is used, which uses the same variable-order BDF
methods as the Sundials CVODES solver used by Cantera.
"""
import cantera as ct
import numpy as np
import scipy.integrate
class ReactorOde(object):
def __init__(self, gas):
# Parameters of the ODE system and auxiliary data are stored in the
# ReactorOde object.
self.gas = gas
self.P = gas.P
def __call__(self, t, y):
"""the ODE function, y' = f(t,y) """
# State vector is [T, Y_1, Y_2, ... Y_K]
self.gas.set_unnormalized_mass_fractions(y[1:])
self.gas.TP = y[0], self.P
rho = self.gas.density
wdot = self.gas.net_production_rates
dTdt = - (np.dot(self.gas.partial_molar_enthalpies, wdot) /
(rho * self.gas.cp))
dYdt = wdot * self.gas.molecular_weights / rho
return np.hstack((dTdt, dYdt))
gas = ct.Solution('gri30.xml')
# Initial condition
gas.TPX = 1001, ct.one_atm, 'H2:2,O2:1,N2:4'
y0 = np.hstack((gas.T, gas.Y))
# Set up objects representing the ODE and the solver
ode = ReactorOde(gas)
solver = scipy.integrate.ode(ode)
solver.set_integrator('vode', method='bdf', with_jacobian=True)
solver.set_initial_value(y0, 0.0)
# Integrate the equations, keeping T(t) and Y(k,t)
t_end = 1e-3
t_out = [0.0]
T_out = [gas.T]
Y_out = [gas.Y]
dt = 1e-5
while solver.successful() and solver.t < t_end:
solver.integrate(solver.t + dt)
t_out.append(solver.t)
T_out.append(gas.T)
Y_out.append(gas.Y)
Y_out = np.array(Y_out).T
# Plot the results
try:
import matplotlib.pyplot as plt
L1 = plt.plot(t_out, T_out, color='r', label='T', lw=2)
plt.xlabel('time (s)')
plt.ylabel('Temperature (K)')
plt.twinx()
L2 = plt.plot(t_out, Y_out[gas.species_index('OH')], label='OH', lw=2)
plt.ylabel('Mass Fraction')
plt.legend(L1+L2, [line.get_label() for line in L1+L2], loc='lower right')
plt.show()
except ImportError:
print('Matplotlib not found. Unable to plot results.')
|
{
"content_hash": "18dcbc834d3258ac734475e60476379d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 80,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.6624386252045826,
"repo_name": "Heathckliff/cantera",
"id": "51f3e6c48662a36512c8bd850d5a46f210cd188c",
"size": "2444",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "interfaces/cython/cantera/examples/reactors/custom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1935303"
},
{
"name": "C++",
"bytes": "6751664"
},
{
"name": "CSS",
"bytes": "2167"
},
{
"name": "FORTRAN",
"bytes": "1175454"
},
{
"name": "Groff",
"bytes": "2843"
},
{
"name": "HTML",
"bytes": "17002"
},
{
"name": "M",
"bytes": "980"
},
{
"name": "Matlab",
"bytes": "284988"
},
{
"name": "Python",
"bytes": "1055361"
},
{
"name": "Shell",
"bytes": "2662"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import logging
from operator import itemgetter
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
def flavor_list(request):
"""Utility method to retrieve a list of flavors."""
try:
return api.nova.flavor_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve instance flavors.'))
return []
def sort_flavor_list(request, flavors, with_menu_label=True):
"""Utility method to sort a list of flavors.
By default, returns the available flavors, sorted by RAM usage (ascending).
Override these behaviours with a ``CREATE_INSTANCE_FLAVOR_SORT`` dict
in ``local_settings.py``.
"""
def get_key(flavor, sort_key):
try:
return getattr(flavor, sort_key)
except AttributeError:
LOG.warning('Could not find sort key "%s". Using the default '
'"ram" instead.', sort_key)
return getattr(flavor, 'ram')
try:
flavor_sort = settings.CREATE_INSTANCE_FLAVOR_SORT
sort_key = flavor_sort.get('key', 'ram')
rev = flavor_sort.get('reverse', False)
if not callable(sort_key):
def key(flavor):
return get_key(flavor, sort_key)
else:
key = sort_key
if with_menu_label:
flavor_list = [(flavor.id, '%s' % flavor.name)
for flavor in sorted(flavors, key=key, reverse=rev)]
else:
flavor_list = sorted(flavors, key=key, reverse=rev)
return flavor_list
except Exception:
exceptions.handle(request,
_('Unable to sort instance flavors.'))
return []
def server_group_list(request):
"""Utility method to retrieve a list of server groups."""
try:
return api.nova.server_group_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve Nova server groups.'))
return []
def network_field_data(request, include_empty_option=False, with_cidr=False,
for_launch=False):
"""Returns a list of tuples of all networks.
Generates a list of networks available to the user (request). And returns
a list of (id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:param with_cidr: flag to include subnets cidr in field name
:return: list of (id, name) tuples
"""
tenant_id = request.user.tenant_id
networks = []
if api.base.is_service_enabled(request, 'network'):
extra_params = {}
if for_launch:
extra_params['include_pre_auto_allocate'] = True
try:
networks = api.neutron.network_list_for_tenant(
request, tenant_id, **extra_params)
except Exception:
msg = _('Failed to get network list.')
exceptions.handle(request, msg)
_networks = []
for n in networks:
if not n['subnets']:
continue
v = n.name_or_id
if with_cidr:
cidrs = ([subnet.cidr for subnet in n['subnets']
if subnet.ip_version == 4] +
[subnet.cidr for subnet in n['subnets']
if subnet.ip_version == 6])
v += ' (%s)' % ', '.join(cidrs)
_networks.append((n.id, v))
networks = sorted(_networks, key=itemgetter(1))
if not networks:
if include_empty_option:
return [("", _("No networks available")), ]
return []
if include_empty_option:
return [("", _("Select Network")), ] + networks
return networks
def keypair_field_data(request, include_empty_option=False):
"""Returns a list of tuples of all keypairs.
Generates a list of keypairs available to the user (request). And returns
a list of (id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
keypair_list = []
try:
keypairs = api.nova.keypair_list(request)
keypair_list = [(kp.name, kp.name) for kp in keypairs]
except Exception:
exceptions.handle(request, _('Unable to retrieve key pairs.'))
if not keypair_list:
if include_empty_option:
return [("", _("No key pairs available")), ]
return []
if include_empty_option:
return [("", _("Select a key pair")), ] + keypair_list
return keypair_list
def flavor_field_data(request, include_empty_option=False):
"""Returns a list of tuples of all image flavors.
Generates a list of image flavors available. And returns a list of
(id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
flavors = flavor_list(request)
if flavors:
flavors_list = sort_flavor_list(request, flavors)
if include_empty_option:
return [("", _("Select Flavor")), ] + flavors_list
return flavors_list
if include_empty_option:
return [("", _("No flavors available")), ]
return []
def port_field_data(request, with_network=False):
"""Returns a list of tuples of all ports available for the tenant.
Generates a list of ports that have no device_owner based on the networks
available to the tenant doing the request.
:param request: django http request object
:param with_network: include network name in field name
:return: list of (id, name) tuples
"""
def add_more_info_port_name(port, network):
# add more info to the port for the display
port_name = "{} ({})".format(
port.name_or_id, ",".join(
[ip['ip_address'] for ip in port['fixed_ips']]))
if with_network:
port_name += " - {}".format(network.name_or_id)
return port_name
ports = []
if api.base.is_service_enabled(request, 'network'):
network_list = api.neutron.network_list_for_tenant(
request, request.user.tenant_id)
network_dict = dict((n.id, n) for n in network_list)
ports = [
(port.id,
add_more_info_port_name(port, network_dict[port.network_id]))
for port
in api.neutron.port_list_with_trunk_types(
request, tenant_id=request.user.tenant_id)
if (not port.device_owner and
not isinstance(port, api.neutron.PortTrunkSubport))
]
ports.sort(key=lambda obj: obj[1])
return ports
def server_group_field_data(request):
"""Returns a list of tuples of all server groups.
Generates a list of server groups available. And returns a list of
(id, name) tuples.
:param request: django http request object
:return: list of (id, name) tuples
"""
server_groups = server_group_list(request)
if server_groups:
server_groups_list = [(sg.id, sg.name) for sg in server_groups]
server_groups_list.sort(key=lambda obj: obj[1])
return [("", _("Select Server Group")), ] + server_groups_list
return [("", _("No server groups available")), ]
def resolve_flavor(request, instance, flavors=None, **kwargs):
"""Resolves name of instance flavor independent of API microversion
:param request: django http request object
:param instance: api._nova.Server instance to resolve flavor
:param flavors: dict of flavors already retrieved
:param kwargs: flavor parameters to return if hit some flavor discrepancy
:return: flavor name or default placeholder
"""
def flavor_from_dict(flavor_dict):
"""Creates flavor-like objects from dictionary
:param flavor_dict: dictionary contains vcpu, ram, name, etc. values
:return: novaclient.v2.flavors.Flavor like object
"""
return namedtuple('Flavor', flavor_dict.keys())(*flavor_dict.values())
if flavors is None:
flavors = {}
flavor_id = instance.flavor.get('id')
if flavor_id: # Nova API <=2.46
if flavor_id in flavors:
return flavors[flavor_id]
try:
return api.nova.flavor_get(request, flavor_id)
except Exception:
msg = _('Unable to retrieve flavor information '
'for instance "%s".') % instance.id
exceptions.handle(request, msg, ignore=True)
fallback_flavor = {
'vcpus': 0, 'ram': 0, 'disk': 0, 'ephemeral': 0, 'swap': 0,
'name': _('Not available'),
'original_name': _('Not available'),
'extra_specs': {},
}
fallback_flavor.update(kwargs)
return flavor_from_dict(fallback_flavor)
else:
instance.flavor['name'] = instance.flavor['original_name']
return flavor_from_dict(instance.flavor)
|
{
"content_hash": "72b35ac989b2618b40a65dfa911b6c63",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 79,
"avg_line_length": 35.1203007518797,
"alnum_prop": 0.6001926782273603,
"repo_name": "openstack/horizon",
"id": "710cbe7c31877cd6a02c25b7b35bb2672fc5e8ce",
"size": "9888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/instances/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "583449"
},
{
"name": "JavaScript",
"bytes": "2585531"
},
{
"name": "Python",
"bytes": "5370605"
},
{
"name": "SCSS",
"bytes": "133237"
},
{
"name": "Shell",
"bytes": "6526"
}
],
"symlink_target": ""
}
|
"""
AMQP management client for Qpid dispatch.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import qpid_dispatch_site
import proton
from proton import Url
from .error import * # import all error symbols for convenience to users.
from .entity import EntityBase, clean_dict
from proton.utils import SyncRequestResponse, BlockingConnection
class Entity(EntityBase):
"""
Proxy for an AMQP manageable entity.
Modifying local attributes dict will not change the remote entity. Call
update() to send the local attributes to the remote entity.
The standard AMQP requests read, update and delete are defined
here. create() is defined on L{Node}.
Attribute access:
- via index operator: entity['foo']
- as python attributes: entity.foo (only if attribute name is a legal python identitfier)
@ivar attributes: Map of attribute values for this entity.
"""
def __init__(self, node, attributes=None, **kwattrs):
super(Entity, self).__init__(attributes, **kwattrs)
self.__dict__['_node'] = node # Avoid getattr recursion
def call(self, operation, expect=OK, **arguments):
"""Call an arbitrary management method on this entity"""
request = self._node.request(
operation=operation, type=self.type, identity=self.identity, **arguments)
return self._node.call(request, expect=expect).body
def read(self):
"""Read the remote entity attributes into the local attributes."""
self.attributes = self.call(u'READ', expect=OK)
def update(self):
"""Update the remote entity attributes from the local attributes."""
self.attributes = self.call(u'UPDATE', expect=OK, body=self.attributes)
def delete(self):
"""Delete the remote entity"""
self.call(u'DELETE', expect=NO_CONTENT)
class Node(object):
"""Client proxy for an AMQP management node"""
def clean_attrs(self, attrs):
BOOL_VALUES = {"yes" : True,
"true" : True,
"on" : True,
"no" : False,
"false": False,
"off" : False}
if isinstance(attrs, dict):
for key in attrs.keys():
if isinstance(attrs[key], str) and attrs[key] in BOOL_VALUES.keys():
attrs[key] = BOOL_VALUES[attrs[key]]
return attrs
@staticmethod
def connection(url=None, router=None, timeout=10, ssl_domain=None, sasl=None, edge_router=None):
"""Return a BlockingConnection suitable for connecting to a management node
@param url: URL of the management node.
@param router: If address does not contain a path, use the management node for this router ID.
If not specified and address does not contain a path, use the default management node.
"""
url = Url(url) # Convert string to Url class.
if url.path is None:
if router:
url.path = u'_topo/0/%s/$management' % router
elif edge_router:
url.path = u'_edge/%s/$management' % edge_router
else:
url.path = u'$management'
if ssl_domain:
sasl_enabled = True
else:
sasl_enabled = True if sasl else False
# if sasl_mechanism is unicode, convert it to python string
return BlockingConnection(url,
timeout=timeout,
ssl_domain=ssl_domain,
sasl_enabled=sasl_enabled,
allowed_mechs=str(sasl.mechs) if sasl and sasl.mechs != None else None,
user=str(sasl.user) if sasl and sasl.user != None else None,
password=str(sasl.password) if sasl and sasl.password != None else None)
@staticmethod
def connect(url=None, router=None, timeout=10, ssl_domain=None, sasl=None,
edge_router=None):
"""Return a Node connected with the given parameters, see L{connection}"""
return Node(Node.connection(url, router, timeout, ssl_domain, sasl,
edge_router=edge_router))
def __init__(self, connection, locales=None):
"""
Create a management node proxy using the given connection.
@param locales: Default list of locales for management operations.
@param connection: a L{BlockingConnection} to the management agent.
"""
self.name = self.identity = u'self'
self.type = u'org.amqp.management' # AMQP management node type
self.locales = locales
self.locales = locales
self.url = connection.url
self.client = SyncRequestResponse(connection, self.url.path)
self.reply_to = self.client.reply_to
self.connection = connection
def set_client(self, url_path):
if url_path:
self.url.path = u'%s'%url_path
self.client = SyncRequestResponse(self.connection, self.url.path)
def close(self):
"""Shut down the node"""
if self.client:
self.client.connection.close()
self.client = None
def __repr__(self):
return "%s(%s)"%(self.__class__.__name__, self.url)
@staticmethod
def check_response(response, expect=OK):
"""
Check a management response message for errors and correlation ID.
"""
code = response.properties.get(u'statusCode')
if code != expect:
if 200 <= code <= 299:
raise ValueError("Response was %s(%s) but expected %s(%s): %s" % (
code, STATUS_TEXT[code], expect, STATUS_TEXT[expect],
response.properties.get(u'statusDescription')))
else:
raise ManagementError.create(code, response.properties.get(u'statusDescription'))
def request(self, body=None, **properties):
"""
Make a L{proton.Message} containining a management request.
@param body: The request body, a dict or list.
@param properties: Keyword arguments for application-properties of the request.
@return: L{proton.Message} containining the management request.
"""
if self.locales: properties.setdefault(u'locales', self.locales)
request = proton.Message()
request.properties = clean_dict(properties)
request.body = body or {}
return request
def node_request(self, body=None, **properties):
"""Construct a request for the managment node itself"""
return self.request(body, name=self.name, type=self.type, **properties)
def call(self, request, expect=OK):
"""
Send a management request message, wait for a response.
@return: Response message.
"""
response = self.client.call(request)
self.check_response(response, expect=expect)
return response
class QueryResponse(object):
"""
Result returned by L{query}.
@ivar attribute_names: List of attribute names for the results.
@ivar results: list of lists of attribute values in same order as attribute_names
"""
def __init__(self, node, attribute_names, results):
"""
@param response: the respose message to a query.
"""
self.node = node
self.attribute_names = attribute_names
self.results = results
def iter_dicts(self, clean=False):
"""
Return an iterator that yields a dictionary for each result.
@param clean: if True remove any None values from returned dictionaries.
"""
for r in self.results:
if clean: yield clean_dict(zip(self.attribute_names, r))
else: yield dict(zip(self.attribute_names, r))
def iter_entities(self, clean=False):
"""
Return an iterator that yields an L{Entity} for each result.
@param clean: if True remove any None values from returned dictionaries.
"""
for d in self.iter_dicts(clean=clean): yield Entity(self.node, d)
def get_dicts(self, clean=False):
"""Results as list of dicts."""
return [d for d in self.iter_dicts(clean=clean)]
def get_entities(self, clean=False):
"""Results as list of entities."""
return [d for d in self.iter_entities(clean=clean)]
def __repr__(self):
return "QueryResponse(attribute_names=%r, results=%r"%(self.attribute_names, self.results)
def query(self, type=None, attribute_names=None, offset=None, count=None):
"""
Send an AMQP management query message and return the response.
At least one of type, attribute_names must be specified.
@keyword type: The type of entity to query.
@keyword attribute_names: A list of attribute names to query.
@keyword offset: An integer offset into the list of results to return.
@keyword count: A count of the maximum number of results to return.
@return: A L{QueryResponse}
"""
# There is a bug in proton (PROTON-1846) wherein we cannot ask for
# too many rows. So, as a safety we are going to ask only for
# MAX_ALLOWED_COUNT_PER_REQUEST. Since this is used by both qdstat
# and qdmanage, we have determined that the optimal value for
# MAX_ALLOWED_COUNT_PER_REQUEST is 500
MAX_ALLOWED_COUNT_PER_REQUEST = 500
response_results = []
response_attr_names = []
if offset is None:
offset = 0
if count is None or count==0:
# count has not been specified. For each request the
# maximum number of rows we can get without proton
# failing is MAX_ALLOWED_COUNT_PER_REQUEST
request_count = MAX_ALLOWED_COUNT_PER_REQUEST
else:
request_count = min(MAX_ALLOWED_COUNT_PER_REQUEST, count)
while True:
request = self.node_request(
{u'attributeNames': attribute_names or []},
operation=u'QUERY', entityType=type, offset=offset,
count=request_count)
response = self.call(request)
if not response_attr_names:
response_attr_names += response.body[u'attributeNames']
response_results += response.body[u'results']
if len(response.body[u'results']) < request_count:
break
if count:
len_response_results = len(response_results)
if count == len_response_results:
break
if count - len_response_results < request_count:
request_count = count - len_response_results
offset += request_count
query_reponse = Node.QueryResponse(self,
response_attr_names,
response_results)
return query_reponse
def create(self, attributes=None, type=None, name=None):
"""
Create an entity.
type and name can be specified in the attributes.
@param attributes: Attributes for the new entity.
@param type: Type of entity to create.
@param name: Name for the new entity.
@return: Entity proxy for the new entity.
"""
attributes = attributes or {}
type = type or attributes.get(u'type')
name = name or attributes.get(u'name')
request = self.request(operation=u'CREATE', type=type, name=name, body=attributes)
return Entity(self, self.call(request, expect=CREATED).body)
def read(self, type=None, name=None, identity=None):
"""
Read an AMQP entity.
If both name and identity are specified, only identity is used.
@param type: Entity type.
@param name: Entity name.
@param identity: Entity identity.
@return: An L{Entity}
"""
if name and identity: name = None # Only specify one
request = self.request(operation=u'READ', type=type, name=name, identity=identity)
return Entity(self, self.call(request).body)
def update(self, attributes, type=None, name=None, identity=None):
"""
Update an entity with attributes.
type, name and identity can be specified in the attributes.
If both name and identity are specified, only identity is used.
@param attributes: Attributes for the new entity.
@param type: Entity type.
@param name: Entity name.
@param identity: Entity identity.
@return: L{Entity} for the updated entity.
"""
attributes = attributes or {}
type = type or attributes.get(u'type')
name = name or attributes.get(u'name')
identity = identity or attributes.get(u'identity')
if name and identity: name = None # Only send one
request = self.request(operation=U'UPDATE', type=type, name=name,
identity=identity, body=self.clean_attrs(attributes))
return Entity(self, self.call(request).body)
def delete(self, type=None, name=None, identity=None):
"""
Delete the remote entity.
If both name and identity are specified, only identity is used.
@param type: Entity type.
@param name: Entity name.
@param identity: Entity identity.
"""
if name and identity: name = None # Only specify one
request = self.request(operation=U'DELETE', type=type, name=name,
identity=identity)
self.call(request, expect=NO_CONTENT)
def get_types(self, type=None):
return self.call(self.node_request(operation=u"GET-TYPES", entityType=type)).body
def get_annotations(self, type=None):
return self.call(self.node_request(operation=u"GET-ANNOTATIONS", entityType=type)).body
def get_attributes(self, type=None):
return self.call(self.node_request(operation=u"GET-ATTRIBUTES", entityType=type)).body
def get_operations(self, type=None):
return self.call(self.node_request(operation=u"GET-OPERATIONS", entityType=type)).body
def get_mgmt_nodes(self, type=None):
return self.call(self.node_request(operation=u"GET-MGMT-NODES", entityType=type)).body
def get_log(self, limit=None, type=None):
return self.call(self.node_request(operation=u"GET-LOG", entityType=type, limit=limit)).body
|
{
"content_hash": "5c98f27a439ee300436145daec74d0c8",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 106,
"avg_line_length": 40.29700272479564,
"alnum_prop": 0.6034890797214145,
"repo_name": "irinabov/debian-qpid-dispatch",
"id": "81c03d3e926388d1b1e8b35af202f6a7a6621e8c",
"size": "15578",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/qpid_dispatch/management/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1954231"
},
{
"name": "C++",
"bytes": "58231"
},
{
"name": "CMake",
"bytes": "42570"
},
{
"name": "CSS",
"bytes": "24393"
},
{
"name": "Dockerfile",
"bytes": "3278"
},
{
"name": "HTML",
"bytes": "2320"
},
{
"name": "JavaScript",
"bytes": "719793"
},
{
"name": "Python",
"bytes": "2115168"
},
{
"name": "Shell",
"bytes": "34107"
}
],
"symlink_target": ""
}
|
from math import log
from app.dataformats import peptable as peptabledata
from app.dataformats import prottable as prottabledata
from app.dataformats import mzidtsv as mzidtsvdata
from app.actions.psmtopeptable import evaluate_peptide
from app.readers import fasta
def generate_bestpep_proteins(peptides, scorecol, minlog, outputaccfield,
protcol, higherbetter=True):
"""Best peptide for each protein in a table"""
protein_peptides = {}
if minlog:
higherbetter = False
if not protcol:
protcol = peptabledata.HEADER_MASTERPROTEINS
for peptide in peptides:
p_acc = peptide[protcol]
if ';' in p_acc or p_acc == 'NA':
continue
protein_peptides = evaluate_peptide(protein_peptides, peptide, p_acc,
higherbetter, scorecol, fncol=False)
if minlog:
try:
nextbestscore = min([pep['score'] for pep in
protein_peptides.values()
if pep['score'] > 0])
except ValueError:
print('WARNING: Cannot find score of type {} which is above 0. '
'Only scores above zero can have a -log value.'.format(scorecol))
nextbestscore = -log(1e-06, 10) # 1 decoy in a million, fake value
else:
nextbestscore = -log(nextbestscore, 10)
for protein, peptide in protein_peptides.items():
if minlog and peptide['score'] != 'NA':
peptide['score'] = log_score(peptide['score'], nextbestscore)
yield {
outputaccfield: protein,
prottabledata.HEADER_QSCORE: str(peptide['score'])
}
def log_score(score, nextbestscore):
try:
logged_score = -log(score, 10)
except ValueError:
logged_score = nextbestscore + 2
return logged_score
def generate_classic_fdr(target, decoy, featfield):
'''Normal FDR in which we just rank all the proteins/genes and do not pick from pairs'''
tproteins = [x for x in target if get_score(x) is not None]
dproteins = [x for x in decoy if get_score(x) is not None]
[x.update({'target_decoy': 'target'}) for x in tproteins]
[x.update({'target_decoy': 'decoy'}) for x in dproteins]
proteins = sorted(tproteins + dproteins, key=lambda x: get_score(x),
reverse=True)
#fdrheader = headerfields['proteinfdr'][prottabledata.HEADER_QVAL][None]
# FIXME complex stupid headerfields system, remove it?
fdrheader = prottabledata.HEADER_QVAL
for protein in qvalue_generator(fdrheader, proteins, featfield):
yield protein
def generate_pick_fdr(target, decoy, tfastafn, dfastafn, featfield,
fastadelim, genefield):
t_scores, d_scores = {}, {}
for protein in target:
acc = protein[featfield]
t_scores[acc] = protein
t_scores[acc]['target_decoy'] = 'target'
for protein in decoy:
acc = protein[featfield]
d_scores[acc] = protein
d_scores[acc]['target_decoy'] = 'decoy'
# FIXME make sure pairs are correct?
# I mean, they are just names really - we should probably verify the T/D fasta?
# Also use the concatenated DB instead, which is sort of the point of picking
prefixlen = len(mzidtsvdata.DECOY_PREFIX)
if featfield == prottabledata.HEADER_PROTEIN:
acctype = 'protein'
elif featfield == prottabledata.HEADER_GENENAME:
acctype = 'genename'
elif featfield == prottabledata.HEADER_GENEID:
acctype = 'ensg'
else:
print('Error determining accession type of table to calculate FDR for')
sys.exit(1)
tfasta = fasta.get_genes_pickfdr(tfastafn, acctype, fastadelim, genefield)
dfasta = fasta.get_genes_pickfdr(dfastafn, acctype, fastadelim, genefield)
tdmap = {}
for target, decoy in zip(tfasta, dfasta):
tdmap[target] = decoy
picked_proteins = []
for tgene, dgene in tdmap.items():
picked = pick_target_decoy(t_scores.get(tgene), d_scores.get(dgene))
if picked:
picked_proteins.append(picked)
sorted_proteins = sorted(picked_proteins, key=lambda x: get_score(x),
reverse=True)
#fdrheader = headerfields['proteinfdr'][prottabledata.HEADER_QVAL][None]
fdrheader = prottabledata.HEADER_QVAL
for protein in qvalue_generator(fdrheader, sorted_proteins, featfield):
yield protein
def qvalue_generator(fdrheader, sorted_features, featfield):
tdcounter = {'target': 0, 'decoy': 0}
previousscore = get_score(sorted_features[0])
outfeats = []
for feat in sorted_features:
outfeat = {k: v for k, v in feat.items()}
score = get_score(outfeat)
if score != previousscore:
# new score, all proteins with previous score get same fdr
previousscore = score
try:
fdr = tdcounter['decoy'] / float(tdcounter['target'])
except ZeroDivisionError:
fdr = 1
for feat in sorted(outfeats, key=lambda x: x[featfield]):
feat[fdrheader] = fdr
yield feat
outfeats = []
tdcounter[outfeat['target_decoy']] += 1
# Only report target hits so FDR=D/T
if outfeat['target_decoy'] == 'target':
outfeats.append(outfeat)
# All proteins from bottom of list (no new score) get FDR as well
try:
fdr = tdcounter['decoy'] / float(tdcounter['target'])
except ZeroDivisionError:
fdr = 1
for feat in sorted(outfeats, key=lambda x: x[featfield]):
feat[fdrheader] = fdr
yield feat
def get_score(protein):
try:
return float(protein[prottabledata.HEADER_QSCORE])
except (TypeError, ValueError):
return None
def pick_target_decoy(tfeature, dfeature):
"""Feed it with a target and decoy score and the protein/gene/id names,
and this will return target/decoy type, the winning ID and the score"""
tscore, dscore = get_score(tfeature), get_score(dfeature)
if tscore is None and dscore is None:
return False
elif None in [tscore, dscore]:
# return the non-False feature
return [v for k, v in {tscore: tfeature, dscore: dfeature}.items()
if k is not False][0]
elif tscore == dscore:
# same score or both False
return False
elif tscore > dscore:
return tfeature
elif tscore < dscore:
return dfeature
else:
# in case uncaught edgecase occurs
print('WARNING, target score {} and decoy score {} could not be '
'compared'.format(tscore, dscore))
return False
def generate_top_ms1_peptides(peptides, protcol):
"""Fed with a peptides generator, this returns the 3 PSMs with
the highest precursor intensities (or areas, or whatever is
given in the HEADER_PRECURSOR_QUANT"""
top_ms1_peps = {}
for peptide in peptides:
protacc = peptide[protcol]
precursor_amount = peptide[peptabledata.HEADER_AREA]
if ';' in protacc or precursor_amount == 'NA':
continue
precursor_amount = float(precursor_amount)
pepseq = peptide[peptabledata.HEADER_PEPTIDE]
try:
peptide_area = top_ms1_peps[protacc][pepseq]
except KeyError:
try:
top_ms1_peps[protacc][pepseq] = precursor_amount
except KeyError:
top_ms1_peps[protacc] = {pepseq: precursor_amount}
else:
if precursor_amount > peptide_area:
top_ms1_peps[protacc][pepseq] = precursor_amount
return top_ms1_peps
def calculate_protein_precursor_quant(top_ms1_peps, prot_acc):
try:
amounts = top_ms1_peps[prot_acc].values()
except KeyError:
return 'NA'
else:
amounts = sorted([x for x in amounts if x > 0], reverse=True)[:3]
return sum(amounts) / len(amounts)
def add_ms1_quant_from_top3_mzidtsv(features, peptides, outputaccfield, featcol):
"""Collects peptides with the highest precursor quant values,
adds sum of the top 3 of these to a protein table"""
top_ms1_peps = generate_top_ms1_peptides(peptides, featcol)
for feat in features:
acc = feat[outputaccfield]
prec_area = calculate_protein_precursor_quant(top_ms1_peps, acc)
outfeat = {k: v for k, v in feat.items()}
outfeat[prottabledata.HEADER_AREA] = str(prec_area)
# outprotein[headerfields['precursorquant'][
# prottabledata.HEADER_AREA][None]] = str(prec_area)
yield outfeat
|
{
"content_hash": "cfebf6b0bc87d39be388f057a808b85c",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 92,
"avg_line_length": 39.32876712328767,
"alnum_prop": 0.6289330082433531,
"repo_name": "glormph/msstitch",
"id": "fb88260b590409bb11fb67b38cbb21b8226adcfe",
"size": "8613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app/actions/proteins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "451340"
}
],
"symlink_target": ""
}
|
import sqlalchemy
class Approvable(object):
"""Mixin for items that must be approved before becoming usable."""
@sqlalchemy.ext.declarative.declared_attr
def approver_id(cls):
return sqlalchemy.Column(
'approvedid',
sqlalchemy.ForeignKey('member.memberid'),
nullable=False
)
@sqlalchemy.ext.declarative.declared_attr
def approver(cls):
return sqlalchemy.orm.relationship(
'Person',
primaryjoin='Person.id == {}.approver_id'.format(cls.__name__)
)
class Ownable(object):
"""Mixins for models that are ownable by people."""
@sqlalchemy.ext.declarative.declared_attr
def owner_id(cls):
return sqlalchemy.Column(
'memberid',
sqlalchemy.ForeignKey('member.memberid'),
nullable=False
)
@sqlalchemy.ext.declarative.declared_attr
def owner(cls):
return sqlalchemy.orm.relationship(
'Person',
primaryjoin='Person.id == {}.owner_id'.format(cls.__name__)
)
|
{
"content_hash": "002f932b05e2a17575d708906b822b59",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 29.08108108108108,
"alnum_prop": 0.6059479553903345,
"repo_name": "UniversityRadioYork/lass-pyramid",
"id": "d7ec85cf5d9d1e185cf9db52800dbb1f393f07e2",
"size": "1076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "people/mixins.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "219469"
}
],
"symlink_target": ""
}
|
import sys
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from cms.models.managers import PageModeratorStateManager
from cms.models.pagemodel import Page
# NOTE: those are not just numbers!! we will do binary AND on them,
# so pay attention when adding/changing them, or MASKs..
ACCESS_PAGE = 1
ACCESS_CHILDREN = 2 # just immediate children (1 level)
ACCESS_PAGE_AND_CHILDREN = 3 # just immediate children (1 level)
ACCESS_DESCENDANTS = 4
ACCESS_PAGE_AND_DESCENDANTS = 5
# binary masks for ACCESS permissions
MASK_PAGE = 1
MASK_CHILDREN = 2
MASK_DESCENDANTS = 4
ACCESS_CHOICES = (
(ACCESS_PAGE, _('Current page')),
(ACCESS_CHILDREN, _('Page children (immediate)')),
(ACCESS_PAGE_AND_CHILDREN, _('Page and children (immediate)')),
(ACCESS_DESCENDANTS, _('Page descendants')),
(ACCESS_PAGE_AND_DESCENDANTS, _('Page and descendants')),
)
################################################################################
# Moderation
################################################################################
class PageModerator(models.Model):
"""
Page moderator holds user / page / moderation type states. User can be
assigned to any page (to which he haves permissions), and say which
moderation depth he requires.
"""
MAX_MODERATION_LEVEL = sys.maxint # just an number
page = models.ForeignKey(Page, verbose_name=_('Page'))
user = models.ForeignKey(User, verbose_name=_('User'))
# TODO: permission stuff could be changed to this structure also, this gives
# better querying performance
moderate_page = models.BooleanField(_('Moderate page'), blank=True, default=False)
moderate_children = models.BooleanField(_('Moderate children'), blank=True, default=False)
moderate_descendants = models.BooleanField(_('Moderate descendants'), blank=True, default=False)
class Meta:
verbose_name = _('PageModerator')
verbose_name_plural = _('PageModerator')
app_label = 'cms'
def set_decimal(self, state):
"""Converts and sets binary state to local attributes
"""
self.moderate_page = bool(state & MASK_PAGE)
moderate_children = bool(state & MASK_CHILDREN)
moderate_descendants = bool(state & MASK_DESCENDANTS)
if moderate_descendants:
moderate_children = True
self.moderate_children = moderate_children
self.moderate_descendants = moderate_descendants
def get_decimal(self):
return self.moderate_page * MASK_PAGE + \
self.moderate_children * MASK_CHILDREN + \
self.moderate_descendants * MASK_DESCENDANTS
def __unicode__(self):
return u"%s on %s mod: %d" % (self.user, self.page, self.get_decimal())
class PageModeratorState(models.Model):
"""PageModeratorState memories all actions made on page.
Page can be in only one advanced state.
"""
ACTION_ADD = "ADD"
ACTION_CHANGED = "CHA"
ACTION_PUBLISH = "PUB"
ACTION_UNPUBLISH = "UNP"
ACTION_MOVE = "MOV"
# advanced states
ACTION_DELETE = "DEL"
# approve state
ACTION_APPROVE = "APP"
_action_choices = (
(ACTION_ADD, _('created')),
(ACTION_CHANGED, _('changed')),
(ACTION_DELETE, _('delete req.')),
(ACTION_MOVE, _('move req.')),
(ACTION_PUBLISH, _('publish req.')),
(ACTION_UNPUBLISH, _('unpublish req.')),
(ACTION_APPROVE, _('approved')), # Approved by somebody in approvement process
)
page = models.ForeignKey(Page)
user = models.ForeignKey(User, null=True)
created = models.DateTimeField(auto_now_add=True)
action = models.CharField(max_length=3, choices=_action_choices, null=True, blank=True)
message = models.TextField(max_length=1000, blank=True, default="")
objects = PageModeratorStateManager()
class Meta:
verbose_name = _('Page moderator state')
verbose_name_plural = _('Page moderator states')
ordering = ('page', 'action', '-created') # newer first
app_label = 'cms'
css_class = lambda self: self.action.lower()
def __unicode__(self):
return u"%s: %s" % (self.page, self.get_action_display())
|
{
"content_hash": "585a637cdb59f43f78ae56263e724037",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 100,
"avg_line_length": 34.475806451612904,
"alnum_prop": 0.6355555555555555,
"repo_name": "pbs/django-cms",
"id": "ccb635bbee3a65cf915464823ce2f21b6152b2ee",
"size": "4299",
"binary": false,
"copies": "1",
"ref": "refs/heads/support/2.3.x",
"path": "cms/models/moderatormodels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "108603"
},
{
"name": "HTML",
"bytes": "289317"
},
{
"name": "JavaScript",
"bytes": "657946"
},
{
"name": "PHP",
"bytes": "4430"
},
{
"name": "Python",
"bytes": "2151038"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Do not warn on external images.
suppress_warnings = ["image.nonlocal_uri"]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinxcontrib.httpdomain",
"sphinxcontrib.redoc",
]
# Autodoc mocking to fix ReadTheDocs builds missing system dependencies
autodoc_mock_imports = ["gssapi", "paramiko[gssapi]"]
redoc = [
{
"page": "_static/api",
"spec": "openapi.json",
"embed": True,
"opts": {
"hide-loading": True,
"hide-hostname": True,
},
}
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "reana"
copyright = "2017-2020, info@reana.io"
author = "info@reana.io"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join("..", "reana_job_controller", "version.py"), "rt") as fp:
exec(fp.read(), g)
version = g["__version__"]
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"description": """<p>REANA-Job-Controller is a component of the <a
href="http://www.reana.io">REANA</a> reusable and
reproducible research data analysis
platform.</p><p>REANA-Job-Controller takes care of
executing and managing jobs on compute clouds.</p>""",
"github_user": "reanahub",
"github_repo": "reana-job-controller",
"github_button": False,
"github_banner": True,
"show_powered_by": False,
"extra_nav_links": {
"REANA@DockerHub": "https://hub.docker.com/u/reanahub/",
"REANA@GitHub": "https://github.com/reanahub",
"REANA@Twitter": "https://twitter.com/reanahub",
"REANA@Web": "http://www.reana.io",
},
"nosidebar": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html",
"searchbox.html",
"donate.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "reanadoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
# (master_doc, 'reana.tex', 'reana Documentation',
# 'info@reana.io', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "reana", "reana Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"reana",
"reana Documentation",
author,
"reana",
"One line description of project.",
"Miscellaneous",
),
]
rest_api_modules = ["reana_job_controller.rest"]
def get_name(full_module_name):
"""
Pull out the class/function name from the full_module_name.
Split the full_module_name by "."'s
"""
# split the full_module_name by "."'s
module_name = ".".join(full_module_name.split(".")[:2])
function_name = full_module_name.split(".")[-1]
return module_name, function_name
def process_docstring(app, what, name, obj, options, lines):
"""
Deletes unnecessary docstring, saves summary and formats a hyperlink
to redocs.
"""
module_name, function_name = get_name(name)
description = ""
operation_id = ""
if what != "module" and module_name in rest_api_modules:
for line in lines:
if "summary:" in line:
description = line.split("summary: ", 1)[1]
if "operationId:" in line:
operation_id = line.split("operationId: ", 1)[1]
url = "`%s <_static/api.html#operation/%s>`_" % (description, operation_id)
# clearing the list of docstrings
del lines[:]
# adding back description
lines.append(url)
# Intersphinx configuration
intersphinx_mapping = {
"kubernetes": ("https://kubernetes.readthedocs.io/en/latest/", None),
}
def setup(app):
app.connect("autodoc-process-docstring", process_docstring)
|
{
"content_hash": "63dd747efc44c6a161284df5efbefb00",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 83,
"avg_line_length": 30.742857142857144,
"alnum_prop": 0.631837493361657,
"repo_name": "reanahub/reana-job-controller",
"id": "fdc4009f1cd354ea3ccb0e05896fbdab10cd80f4",
"size": "8282",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3563"
},
{
"name": "Python",
"bytes": "130603"
},
{
"name": "Shell",
"bytes": "4720"
}
],
"symlink_target": ""
}
|
'''More Goodness of fit tests
contains
GOF : 1 sample gof tests based on Stephens 1970, plus AD A^2
bootstrap : vectorized bootstrap p-values for gof test with fitted parameters
Created : 2011-05-21
Author : Josef Perktold
parts based on ks_2samp and kstest from scipy.stats
(license: Scipy BSD, but were completely rewritten by Josef Perktold)
References
----------
'''
from __future__ import print_function
from statsmodels.compat.python import range, lmap, string_types, callable
import numpy as np
from scipy.stats import distributions
from statsmodels.tools.decorators import cache_readonly
from scipy.special import kolmogorov as ksprob
#from scipy.stats unchanged
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
a, b : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
D : float
KS statistic
p-value : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import ks_2samp
>>> #fix random seed to get the same result
>>> np.random.seed(12345678);
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
different distribution
we can reject the null hypothesis since the pvalue is below 1%
>>> rvs1 = stats.norm.rvs(size=n1,loc=0.,scale=1);
>>> rvs2 = stats.norm.rvs(size=n2,loc=0.5,scale=1.5)
>>> ks_2samp(rvs1,rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
slightly different distribution
we cannot reject the null hypothesis at a 10% or lower alpha since
the pvalue at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2,loc=0.01,scale=1.0)
>>> ks_2samp(rvs1,rvs3)
(0.10333333333333333, 0.14498781825751686)
identical distribution
we cannot reject the null hypothesis since the pvalue is high, 41%
>>> rvs4 = stats.norm.rvs(size=n2,loc=0.0,scale=1.0)
>>> ks_2samp(rvs1,rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1, data2 = lmap(np.asarray, (data1, data2))
n1 = data1.shape[0]
n2 = data2.shape[0]
n1 = len(data1)
n2 = len(data2)
data1 = np.sort(data1)
data2 = np.sort(data2)
data_all = np.concatenate([data1,data2])
#reminder: searchsorted inserts 2nd into 1st array
cdf1 = np.searchsorted(data1,data_all,side='right')/(1.0*n1)
cdf2 = (np.searchsorted(data2,data_all,side='right'))/(1.0*n2)
d = np.max(np.absolute(cdf1-cdf2))
#Note: d absolute not signed distance
en = np.sqrt(n1*n2/float(n1+n2))
try:
prob = ksprob((en+0.12+0.11/en)*d)
except:
prob = 1.0
return d, prob
#from scipy.stats unchanged
def kstest(rvs, cdf, args=(), N=20, alternative = 'two_sided', mode='approx',**kwds):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two_sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : string or array or callable
string: name of a distribution in scipy.stats
array: 1-D observations of random variables
callable: function to generate random variables, requires keyword
argument `size`
cdf : string or callable
string: name of a distribution in scipy.stats, if rvs is a string then
cdf can evaluate to `False` or be the same as rvs
callable: function to evaluate cdf
args : tuple, sequence
distribution parameters, used if rvs or cdf are strings
N : int
sample size if rvs is string or callable
alternative : 'two_sided' (default), 'less' or 'greater'
defines the alternative hypothesis (see explanation)
mode : 'approx' (default) or 'asymp'
defines the distribution used for calculating p-value
'approx' : use approximation to exact distribution of test statistic
'asymp' : use asymptotic distribution of test statistic
Returns
-------
D : float
KS test statistic, either D, D+ or D-
p-value : float
one-tailed or two-tailed p-value
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, G(x)<=F(x), resp. G(x)>=F(x).
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import kstest
>>> x = np.linspace(-15,15,9)
>>> kstest(x,'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> kstest('norm','',N=100)
(0.058352892479417884, 0.88531190944151261)
is equivalent to this
>>> np.random.seed(987654321)
>>> kstest(stats.norm.rvs(size=100),'norm')
(0.058352892479417884, 0.88531190944151261)
Test against one-sided alternative hypothesis:
>>> np.random.seed(987654321)
Shift distribution to larger values, so that cdf_dgp(x)< norm.cdf(x):
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
Testing t distributed random variables against normal distribution:
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the kstest does not reject the hypothesis that the sample
came from the normal distribution
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at a alpha=10% level
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
#cdf = getattr(stats, rvs).cdf
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError('if rvs is string, cdf has to be the same distribution')
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size':N}
vals = np.sort(rvs(*args,**kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
if alternative in ['two_sided', 'greater']:
Dplus = (np.arange(1.0, N+1)/N - cdfvals).max()
if alternative == 'greater':
return Dplus, distributions.ksone.sf(Dplus,N)
if alternative in ['two_sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return Dmin, distributions.ksone.sf(Dmin,N)
if alternative == 'two_sided':
D = np.max([Dplus,Dmin])
if mode == 'asymp':
return D, distributions.kstwobign.sf(D*np.sqrt(N))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D*np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 :
return D, distributions.kstwobign.sf(D*np.sqrt(N))
else:
return D, distributions.ksone.sf(D,N)*2
#TODO: split into modification and pvalue functions separately ?
# for separate testing and combining different pieces
def dplus_st70_upp(stat, nobs):
mod_factor = np.sqrt(nobs) + 0.12 + 0.11 / np.sqrt(nobs)
stat_modified = stat * mod_factor
pval = np.exp(-2 * stat_modified**2)
digits = np.sum(stat > np.array([0.82, 0.82, 1.00]))
#repeat low to get {0,2,3}
return stat_modified, pval, digits
dminus_st70_upp = dplus_st70_upp
def d_st70_upp(stat, nobs):
mod_factor = np.sqrt(nobs) + 0.12 + 0.11 / np.sqrt(nobs)
stat_modified = stat * mod_factor
pval = 2 * np.exp(-2 * stat_modified**2)
digits = np.sum(stat > np.array([0.91, 0.91, 1.08]))
#repeat low to get {0,2,3}
return stat_modified, pval, digits
def v_st70_upp(stat, nobs):
mod_factor = np.sqrt(nobs) + 0.155 + 0.24 / np.sqrt(nobs)
#repeat low to get {0,2,3}
stat_modified = stat * mod_factor
zsqu = stat_modified**2
pval = (8 * zsqu - 2) * np.exp(-2 * zsqu)
digits = np.sum(stat > np.array([1.06, 1.06, 1.26]))
return stat_modified, pval, digits
def wsqu_st70_upp(stat, nobs):
nobsinv = 1. / nobs
stat_modified = (stat - 0.4 * nobsinv + 0.6 * nobsinv**2) * (1 + nobsinv)
pval = 0.05 * np.exp(2.79 - 6 * stat_modified)
digits = np.nan # some explanation in txt
#repeat low to get {0,2,3}
return stat_modified, pval, digits
def usqu_st70_upp(stat, nobs):
nobsinv = 1. / nobs
stat_modified = (stat - 0.1 * nobsinv + 0.1 * nobsinv**2)
stat_modified *= (1 + 0.8 * nobsinv)
pval = 2 * np.exp(- 2 * stat_modified * np.pi**2)
digits = np.sum(stat > np.array([0.29, 0.29, 0.34]))
#repeat low to get {0,2,3}
return stat_modified, pval, digits
def a_st70_upp(stat, nobs):
nobsinv = 1. / nobs
stat_modified = (stat - 0.7 * nobsinv + 0.9 * nobsinv**2)
stat_modified *= (1 + 1.23 * nobsinv)
pval = 1.273 * np.exp(- 2 * stat_modified / 2. * np.pi**2)
digits = np.sum(stat > np.array([0.11, 0.11, 0.452]))
#repeat low to get {0,2,3}
return stat_modified, pval, digits
gof_pvals = {}
gof_pvals['stephens70upp'] = {
'd_plus' : dplus_st70_upp,
'd_minus' : dplus_st70_upp,
'd' : d_st70_upp,
'v' : v_st70_upp,
'wsqu' : wsqu_st70_upp,
'usqu' : usqu_st70_upp,
'a' : a_st70_upp }
def pval_kstest_approx(D, N):
pval_two = distributions.kstwobign.sf(D*np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 :
return D, distributions.kstwobign.sf(D*np.sqrt(N)), np.nan
else:
return D, distributions.ksone.sf(D,N)*2, np.nan
gof_pvals['scipy'] = {
'd_plus' : lambda Dplus, N: (Dplus, distributions.ksone.sf(Dplus, N), np.nan),
'd_minus' : lambda Dmin, N: (Dmin, distributions.ksone.sf(Dmin,N), np.nan),
'd' : lambda D, N: (D, distributions.kstwobign.sf(D*np.sqrt(N)), np.nan)
}
gof_pvals['scipy_approx'] = {
'd' : pval_kstest_approx }
class GOF(object):
'''One Sample Goodness of Fit tests
includes Kolmogorov-Smirnov D, D+, D-, Kuiper V, Cramer-von Mises W^2, U^2 and
Anderson-Darling A, A^2. The p-values for all tests except for A^2 are based on
the approximatiom given in Stephens 1970. A^2 has currently no p-values. For
the Kolmogorov-Smirnov test the tests as given in scipy.stats are also available
as options.
design: I might want to retest with different distributions, to calculate
data summary statistics only once, or add separate class that holds
summary statistics and data (sounds good).
'''
def __init__(self, rvs, cdf, args=(), N=20):
if isinstance(rvs, string_types):
#cdf = getattr(stats, rvs).cdf
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError('if rvs is string, cdf has to be the same distribution')
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size':N}
vals = np.sort(rvs(*args,**kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
self.nobs = N
self.vals_sorted = vals
self.cdfvals = cdfvals
@cache_readonly
def d_plus(self):
nobs = self.nobs
cdfvals = self.cdfvals
return (np.arange(1.0, nobs+1)/nobs - cdfvals).max()
@cache_readonly
def d_minus(self):
nobs = self.nobs
cdfvals = self.cdfvals
return (cdfvals - np.arange(0.0, nobs)/nobs).max()
@cache_readonly
def d(self):
return np.max([self.d_plus, self.d_minus])
@cache_readonly
def v(self):
'''Kuiper'''
return self.d_plus + self.d_minus
@cache_readonly
def wsqu(self):
'''Cramer von Mises'''
nobs = self.nobs
cdfvals = self.cdfvals
#use literal formula, TODO: simplify with arange(,,2)
wsqu = ((cdfvals - (2. * np.arange(1., nobs+1) - 1)/nobs/2.)**2).sum() \
+ 1./nobs/12.
return wsqu
@cache_readonly
def usqu(self):
nobs = self.nobs
cdfvals = self.cdfvals
#use literal formula, TODO: simplify with arange(,,2)
usqu = self.wsqu - nobs * (cdfvals.mean() - 0.5)**2
return usqu
@cache_readonly
def a(self):
nobs = self.nobs
cdfvals = self.cdfvals
#one loop instead of large array
msum = 0
for j in range(1,nobs):
mj = cdfvals[j] - cdfvals[:j]
mask = (mj > 0.5)
mj[mask] = 1 - mj[mask]
msum += mj.sum()
a = nobs / 4. - 2. / nobs * msum
return a
@cache_readonly
def asqu(self):
'''Stephens 1974, doesn't have p-value formula for A^2'''
nobs = self.nobs
cdfvals = self.cdfvals
asqu = -((2. * np.arange(1., nobs+1) - 1) *
(np.log(cdfvals) + np.log(1-cdfvals[::-1]) )).sum()/nobs - nobs
return asqu
def get_test(self, testid='d', pvals='stephens70upp'):
'''
'''
#print gof_pvals[pvals][testid]
stat = getattr(self, testid)
if pvals == 'stephens70upp':
return gof_pvals[pvals][testid](stat, self.nobs), stat
else:
return gof_pvals[pvals][testid](stat, self.nobs)
def gof_mc(randfn, distr, nobs=100):
#print '\nIs it correctly sized?'
from collections import defaultdict
results = defaultdict(list)
for i in range(1000):
rvs = randfn(nobs)
goft = GOF(rvs, distr)
for ti in all_gofs:
results[ti].append(goft.get_test(ti, 'stephens70upp')[0][1])
resarr = np.array([results[ti] for ti in all_gofs])
print(' ', ' '.join(all_gofs))
print('at 0.01:', (resarr < 0.01).mean(1))
print('at 0.05:', (resarr < 0.05).mean(1))
print('at 0.10:', (resarr < 0.1).mean(1))
def asquare(cdfvals, axis=0):
'''vectorized Anderson Darling A^2, Stephens 1974'''
ndim = len(cdfvals.shape)
nobs = cdfvals.shape[axis]
slice_reverse = [slice(None)] * ndim #might make copy if not specific axis???
islice = [None] * ndim
islice[axis] = slice(None)
slice_reverse[axis] = slice(None, None, -1)
asqu = -((2. * np.arange(1., nobs+1)[islice] - 1) *
(np.log(cdfvals) + np.log(1-cdfvals[slice_reverse]))/nobs).sum(axis) \
- nobs
return asqu
#class OneSGOFFittedVec(object):
# '''for vectorized fitting'''
# currently I use the bootstrap as function instead of full class
#note: kwds loc and scale are a pain
# I would need to overwrite rvs, fit and cdf depending on fixed parameters
#def bootstrap(self, distr, args=(), kwds={}, nobs=200, nrep=1000,
def bootstrap(distr, args=(), nobs=200, nrep=100, value=None, batch_size=None):
'''Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
assumes vectorized fit_vec method,
builds and analyses (nobs, nrep) sample in one step
rename function to less generic
this works also with nrep=1
'''
#signature similar to kstest ?
#delegate to fn ?
#rvs_kwds = {'size':(nobs, nrep)}
#rvs_kwds.update(kwds)
#it will be better to build a separate batch function that calls bootstrap
#keep batch if value is true, but batch iterate from outside if stat is returned
if (not batch_size is None):
if value is None:
raise ValueError('using batching requires a value')
n_batch = int(np.ceil(nrep/float(batch_size)))
count = 0
for irep in range(n_batch):
rvs = distr.rvs(args, **{'size':(batch_size, nobs)})
params = distr.fit_vec(rvs, axis=1)
params = lmap(lambda x: np.expand_dims(x, 1), params)
cdfvals = np.sort(distr.cdf(rvs, params), axis=1)
stat = asquare(cdfvals, axis=1)
count += (stat >= value).sum()
return count / float(n_batch * batch_size)
else:
#rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?
rvs = distr.rvs(args, **{'size':(nrep, nobs)})
params = distr.fit_vec(rvs, axis=1)
params = lmap(lambda x: np.expand_dims(x, 1), params)
cdfvals = np.sort(distr.cdf(rvs, params), axis=1)
stat = asquare(cdfvals, axis=1)
if value is None: #return all bootstrap results
stat_sorted = np.sort(stat)
return stat_sorted
else: #calculate and return specific p-value
return (stat >= value).mean()
def bootstrap2(value, distr, args=(), nobs=200, nrep=100):
'''Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
non vectorized, loops over all parametric bootstrap replications and calculates
and returns specific p-value,
rename function to less generic
'''
#signature similar to kstest ?
#delegate to fn ?
#rvs_kwds = {'size':(nobs, nrep)}
#rvs_kwds.update(kwds)
count = 0
for irep in range(nrep):
#rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?
rvs = distr.rvs(args, **{'size':nobs})
params = distr.fit_vec(rvs)
cdfvals = np.sort(distr.cdf(rvs, params))
stat = asquare(cdfvals, axis=0)
count += (stat >= value)
return count * 1. / nrep
class NewNorm(object):
'''just a holder for modified distributions
'''
def fit_vec(self, x, axis=0):
return x.mean(axis), x.std(axis)
def cdf(self, x, args):
return distributions.norm.cdf(x, loc=args[0], scale=args[1])
def rvs(self, args, size):
loc=args[0]
scale=args[1]
return loc + scale * distributions.norm.rvs(size=size)
if __name__ == '__main__':
from scipy import stats
#rvs = np.random.randn(1000)
rvs = stats.t.rvs(3, size=200)
print('scipy kstest')
print(kstest(rvs, 'norm'))
goft = GOF(rvs, 'norm')
print(goft.get_test())
all_gofs = ['d', 'd_plus', 'd_minus', 'v', 'wsqu', 'usqu', 'a']
for ti in all_gofs:
print(ti, goft.get_test(ti, 'stephens70upp'))
print('\nIs it correctly sized?')
from collections import defaultdict
results = defaultdict(list)
nobs = 200
for i in range(100):
rvs = np.random.randn(nobs)
goft = GOF(rvs, 'norm')
for ti in all_gofs:
results[ti].append(goft.get_test(ti, 'stephens70upp')[0][1])
resarr = np.array([results[ti] for ti in all_gofs])
print(' ', ' '.join(all_gofs))
print('at 0.01:', (resarr < 0.01).mean(1))
print('at 0.05:', (resarr < 0.05).mean(1))
print('at 0.10:', (resarr < 0.1).mean(1))
gof_mc(lambda nobs: stats.t.rvs(3, size=nobs), 'norm', nobs=200)
nobs = 200
nrep = 100
bt = bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=nrep, value=None)
quantindex = np.floor(nrep * np.array([0.99, 0.95, 0.9])).astype(int)
print(bt[quantindex])
#the bootstrap results match Stephens pretty well for nobs=100, but not so well for
#large (1000) or small (20) nobs
'''
>>> np.array([15.0, 10.0, 5.0, 2.5, 1.0])/100. #Stephens
array([ 0.15 , 0.1 , 0.05 , 0.025, 0.01 ])
>>> nobs = 100
>>> [bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=10000, value=c/ (1 + 4./nobs - 25./nobs**2)) for c in [0.576, 0.656, 0.787, 0.918, 1.092]]
[0.1545, 0.10009999999999999, 0.049000000000000002, 0.023, 0.0104]
>>>
'''
#test equality of loop, vectorized, batch-vectorized
np.random.seed(8765679)
resu1 = bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=100,
value=0.576/(1 + 4./nobs - 25./nobs**2))
np.random.seed(8765679)
tmp = [bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=1) for _ in range(100)]
resu2 = (np.array(tmp) > 0.576/(1 + 4./nobs - 25./nobs**2)).mean()
np.random.seed(8765679)
tmp = [bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=1,
value=0.576/ (1 + 4./nobs - 25./nobs**2),
batch_size=10) for _ in range(10)]
resu3 = np.array(resu).mean()
from numpy.testing import assert_almost_equal, assert_array_almost_equal
assert_array_almost_equal(resu1, resu2, 15)
assert_array_almost_equal(resu2, resu3, 15)
|
{
"content_hash": "6f58724e8d246a575f387bc4d377dab1",
"timestamp": "",
"source": "github",
"line_count": 704,
"max_line_length": 148,
"avg_line_length": 31.676136363636363,
"alnum_prop": 0.6109865470852018,
"repo_name": "ChadFulton/statsmodels",
"id": "d4c450adaf9e387abc9f8f7087c45254bfcf3d79",
"size": "22300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/sandbox/distributions/gof_new.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "3469"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "2609"
},
{
"name": "Python",
"bytes": "11749760"
},
{
"name": "R",
"bytes": "90986"
},
{
"name": "Rebol",
"bytes": "123"
},
{
"name": "Shell",
"bytes": "8181"
},
{
"name": "Smarty",
"bytes": "1014"
},
{
"name": "Stata",
"bytes": "65045"
}
],
"symlink_target": ""
}
|
import mimetypes
import uuid
from io import BytesIO
from StringIO import StringIO
from os import path
from urlparse import urlparse, urlunparse
from urllib import urlencode
from twisted.internet.interfaces import IProtocol
from twisted.internet.defer import Deferred
from twisted.python.components import proxyForInterface
from twisted.web.http_headers import Headers
from twisted.web.iweb import IBodyProducer, IResponse
from twisted.web.client import (
Agent,
FileBodyProducer,
HTTPConnectionPool,
RedirectAgent,
ContentDecoderAgent,
GzipDecoder
)
from twisted.python.components import registerAdapter
from treq._utils import default_reactor
from treq.auth import add_auth
from treq import multipart
class _BodyBufferingProtocol(proxyForInterface(IProtocol)):
def __init__(self, original, buffer, finished):
self.original = original
self.buffer = buffer
self.finished = finished
def dataReceived(self, data):
self.buffer.append(data)
self.original.dataReceived(data)
def connectionLost(self, reason):
self.original.connectionLost(reason)
self.finished.errback(reason)
class _BufferedResponse(proxyForInterface(IResponse)):
def __init__(self, original):
self.original = original
self._buffer = []
self._waiters = []
self._waiting = None
self._finished = False
self._reason = None
def _deliverWaiting(self, reason):
self._reason = reason
self._finished = True
for waiter in self._waiters:
for segment in self._buffer:
waiter.dataReceived(segment)
waiter.connectionLost(reason)
def deliverBody(self, protocol):
if self._waiting is None and not self._finished:
self._waiting = Deferred()
self._waiting.addBoth(self._deliverWaiting)
self.original.deliverBody(
_BodyBufferingProtocol(
protocol,
self._buffer,
self._waiting
)
)
elif self._finished:
for segment in self._buffer:
protocol.dataReceived(segment)
protocol.connectionLost(self._reason)
else:
self._waiters.append(protocol)
class HTTPClient(object):
def __init__(self, agent):
self._agent = agent
@classmethod
def with_config(cls, **kwargs):
reactor = default_reactor(kwargs.get('reactor'))
pool = kwargs.get('pool')
if not pool:
persistent = kwargs.get('persistent', True)
pool = HTTPConnectionPool(reactor, persistent=persistent)
agent = Agent(reactor, pool=pool)
if kwargs.get('allow_redirects', True):
agent = RedirectAgent(agent)
agent = ContentDecoderAgent(agent, [('gzip', GzipDecoder)])
auth = kwargs.get('auth')
if auth:
agent = add_auth(agent, auth)
return cls(agent)
def get(self, url, **kwargs):
return self.request('GET', url, **kwargs)
def put(self, url, data=None, **kwargs):
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
return self.request('PATCH', url, data=data, **kwargs)
def post(self, url, data=None, **kwargs):
return self.request('POST', url, data=data, **kwargs)
def head(self, url, **kwargs):
return self.request('HEAD', url, **kwargs)
def delete(self, url, **kwargs):
return self.request('DELETE', url, **kwargs)
def request(self, method, url, **kwargs):
method = method.upper()
# Join parameters provided in the URL
# and the ones passed as argument.
params = kwargs.get('params')
if params:
url = _combine_query_params(url, params)
# Convert headers dictionary to
# twisted raw headers format.
headers = kwargs.get('headers')
if headers:
if isinstance(headers, dict):
h = Headers({})
for k, v in headers.iteritems():
if isinstance(v, str):
h.addRawHeader(k, v)
else:
h.setRawHeaders(k, v)
headers = h
else:
headers = Headers({})
# Here we choose a right producer
# based on the parameters passed in.
bodyProducer = None
data = kwargs.get('data')
files = kwargs.get('files')
if files:
# If the files keyword is present we will issue a
# multipart/form-data request as it suits better for cases
# with files and/or large objects.
files = list(_convert_files(files))
boundary = uuid.uuid4()
headers.setRawHeaders(
'content-type', [
'multipart/form-data; boundary=%s' % (boundary,)])
if data:
data = _convert_params(data)
else:
data = []
bodyProducer = multipart.MultiPartProducer(
data + files, boundary=boundary)
elif data:
# Otherwise stick to x-www-form-urlencoded format
# as it's generally faster for smaller requests.
if isinstance(data, (dict, list, tuple)):
headers.setRawHeaders(
'content-type', ['application/x-www-form-urlencoded'])
data = urlencode(data, doseq=True)
bodyProducer = IBodyProducer(data)
d = self._agent.request(
method, url, headers=headers,
bodyProducer=bodyProducer)
timeout = kwargs.get('timeout')
if timeout:
delayedCall = default_reactor(kwargs.get('reactor')).callLater(
timeout, d.cancel)
def gotResult(result):
if delayedCall.active():
delayedCall.cancel()
return result
d.addBoth(gotResult)
if not kwargs.get('unbuffered', False):
d.addCallback(_BufferedResponse)
return d
def _convert_params(params):
if hasattr(params, "iteritems"):
return list(sorted(params.iteritems()))
elif isinstance(params, (tuple, list)):
return list(params)
else:
raise ValueError("Unsupported format")
def _convert_files(files):
"""Files can be passed in a variety of formats:
* {'file': open("bla.f")}
* {'file': (name, open("bla.f"))}
* {'file': (name, content-type, open("bla.f"))}
* Anything that has iteritems method, e.g. MultiDict:
MultiDict([(name, open()), (name, open())]
Our goal is to standardize it to unified form of:
* [(param, (file name, content type, producer))]
"""
if hasattr(files, "iteritems"):
files = files.iteritems()
for param, val in files:
file_name, content_type, fobj = (None, None, None)
if isinstance(val, tuple):
if len(val) == 2:
file_name, fobj = val
elif len(val) == 3:
file_name, content_type, fobj = val
else:
fobj = val
if hasattr(fobj, "name"):
file_name = path.basename(fobj.name)
if not content_type:
content_type = _guess_content_type(file_name)
yield (param, (file_name, content_type, IBodyProducer(fobj)))
def _combine_query_params(url, params):
parsed_url = urlparse(url)
qs = []
if parsed_url.query:
qs.extend([parsed_url.query, '&'])
qs.append(urlencode(params, doseq=True))
return urlunparse((parsed_url[0], parsed_url[1],
parsed_url[2], parsed_url[3],
''.join(qs), parsed_url[5]))
def _from_bytes(orig_bytes):
return FileBodyProducer(StringIO(orig_bytes))
def _from_file(orig_file):
return FileBodyProducer(orig_file)
def _guess_content_type(filename):
if filename:
guessed = mimetypes.guess_type(filename)[0]
else:
guessed = None
return guessed or 'application/octet-stream'
registerAdapter(_from_bytes, str, IBodyProducer)
registerAdapter(_from_file, file, IBodyProducer)
registerAdapter(_from_file, StringIO, IBodyProducer)
registerAdapter(_from_file, BytesIO, IBodyProducer)
|
{
"content_hash": "ff550ad150fef83cfbd93861580d8549",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 75,
"avg_line_length": 29.735915492957748,
"alnum_prop": 0.5855535820011841,
"repo_name": "alex/treq",
"id": "47e5c65c777bc49d57949c93de1f2ae574c9c289",
"size": "8445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treq/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95972"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
from initdotpy import auto_import_contents
auto_import_contents()
|
{
"content_hash": "51275ffed17be5272ef86ac2de6757e9",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 33,
"alnum_prop": 0.8333333333333334,
"repo_name": "burrowsa/initdotpy",
"id": "049a2750d0796262dce0c68ccf84bde1cce79d7a",
"size": "66",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/examples/example13/package3/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17925"
}
],
"symlink_target": ""
}
|
import random
from django.conf import settings
class ReadReplicaRouter(object):
def db_for_read(self, model, **hints):
# Reads go to a randomly-chosen replica.
return random.choice(settings.READ_REPLICAS)
def db_for_write(self, model, **hints):
# Writes always go to default.
return 'default'
def allow_relation(self, obj1, obj2, **hints):
# Relations are allowed if both objects are in default/replica pool.
db_list = ('default', settings.READ_REPLICAS)
return obj1._state.db in db_list and obj2._state.db in db_list
def allow_migrate(self, db, app_label, model=None, **hints):
return True
|
{
"content_hash": "63e12752cd17d58dbed847defbc64cdf",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 35.526315789473685,
"alnum_prop": 0.6622222222222223,
"repo_name": "npinchot/djangocon_2015_talk",
"id": "6cb2d4fda19b685df22a96f0e1bb0a41205df82b",
"size": "675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "read_replica_router/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "724"
},
{
"name": "Python",
"bytes": "10320"
}
],
"symlink_target": ""
}
|
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
worker_sid = "WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = Client(account_sid, auth_token)
worker = client.taskrouter.workspaces(workspace_sid) \
.workers(worker_sid).fetch()
print(worker.friendly_name)
|
{
"content_hash": "db03a86e01d7206840a8555ed5fafa23",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.7954545454545454,
"repo_name": "teoreteetik/api-snippets",
"id": "7a007218516bfc455e2b6723c77b072de011e0bb",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/taskrouter/workers/instance/get/example-1/example-1.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from chatterbot.logic import TimeLogicAdapter
from chatterbot.conversation import Statement
class TimeAdapterTests(TestCase):
def setUp(self):
self.adapter = TimeLogicAdapter()
def test_positive_input(self):
statement = Statement("Do you know what time it is?")
response = self.adapter.process(statement)
self.assertEqual(response.confidence, 1)
self.assertIn("The current time is ", response.text)
def test_negative_input(self):
statement = Statement("What is an example of a pachyderm?")
response = self.adapter.process(statement)
self.assertEqual(response.confidence, 0)
self.assertIn("The current time is ", response.text)
|
{
"content_hash": "6715f2f885bb95f7238c830496caa766",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 67,
"avg_line_length": 31.208333333333332,
"alnum_prop": 0.7022696929238985,
"repo_name": "davizucon/ChatterBot",
"id": "58b2d77f58dd7ad5cc4901711386619e4724fee4",
"size": "749",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/logic_adapter_tests/test_time.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "336866"
}
],
"symlink_target": ""
}
|
import six
import os
import re
import time
from datetime import datetime, timedelta
from tenable_io.api.models import Scan, ScanSettings, Template
from tenable_io.api.scans import ScansApi, ScanCreateRequest, ScanExportRequest, ScanImportRequest, ScanLaunchRequest
from tenable_io.exceptions import TenableIOException
import tenable_io.util as util
class ScanHelper(object):
STATUSES_STOPPED = [
Scan.STATUS_ABORTED,
Scan.STATUS_CANCELED,
Scan.STATUS_COMPLETED,
Scan.STATUS_IMPORTED,
Scan.STATUS_EMPTY,
]
def __init__(self, client):
self._client = client
def scans(self, name_regex=None, name=None, folder_id=None):
"""Get scans.
:param name: A string to match scans with, default to None. Ignored if the `name_regex` argument is passed.
:param name_regex: A regular expression to match scans' names with, default to None.
:param folder_id: Only scans in the folder identified by `folder_id`, default to None.
:return: A list of ScanRef.
"""
scans = self._client.scans_api.list(folder_id=folder_id).scans
if name_regex:
name_regex = re.compile(name_regex)
scans = [scan for scan in scans if name_regex.match(scan.name)]
elif name:
scans = [scan for scan in scans if name == scan.name]
return [ScanRef(self._client, scan.id) for scan in scans]
def id(self, id):
"""Get scan by ID.
:param id: Scan ID.
:return: ScanRef referenced by id if exists.
"""
self._client.scans_api.details(id)
# object_id is not returned by the API when the current user is not the owner of the scan.
# return ScanRef(self._client, self._client.scans_api.details(id).info.object_id)
return ScanRef(self._client, id)
def stop_all(self, folder=None, folder_id=None):
"""Stop all scans.
:param folder: Instance of FolderRef. Stop all scan in the folder only. Default to None.
:param folder_id: Stop all scan in the folder identified by folder_id only. Default to None.
:return: The current instance of ScanHelper.
"""
from tenable_io.helpers.folder import FolderRef
if folder_id is None and isinstance(folder, FolderRef):
folder_id = folder.id
scans = self.scans(folder_id=folder_id)
for scan in scans:
try:
# Send stop requests for all scans first before waiting for it to be fully stopped.
scan.stop(False)
except TenableIOException:
pass
# Wait for scans to stop after all the stop requests are made.
[scan.wait_until_stopped() for scan in scans]
return self
def create(self, name, text_targets, template):
"""Get scan by ID.
:param name: The name of the Scan to be created.
:param text_targets: A string of comma separated targets or a list of targets.
:param template: The name or title of the template, or an instance of Template.
:return: ScanRef referenced by id if exists.
"""
if isinstance(text_targets, list):
text_targets = ','.join(text_targets)
t = template
if not isinstance(t, Template):
t = self.template(name=template)
if not t:
t = self.template(title=template)
if not t:
raise TenableIOException(u'Template with name or title as "%s" not found.' % template)
scan_id = self._client.scans_api.create(
ScanCreateRequest(
t.uuid,
ScanSettings(
name,
text_targets,
)
)
)
return ScanRef(self._client, scan_id)
def template(self, name=None, title=None):
"""Get template by name or title. The `title` argument is ignored if `name` is passed.
:param name: The name of the template.
:param title: The title of the template.
:return: An instance of Template if exists, otherwise None.
"""
template = None
if name:
template_list = self._client.editor_api.list('scan')
for t in template_list.templates:
if t.name == name:
template = t
break
elif title:
template_list = self._client.editor_api.list('scan')
for t in template_list.templates:
if t.title == title:
template = t
break
return template
def import_scan(self, path):
"""Uploads and then imports scan report.
:param path: Path of the scan report.
:return: ScanRef referenced by id if exists.
"""
uploaded_file_name = self._client.file_helper.upload(path)
imported_scan_id = self._client.scans_api.import_scan(ScanImportRequest(uploaded_file_name))
return self.id(imported_scan_id)
def activities(
self,
targets=None,
fqdns=None,
ipv4s=None,
mac_addresses=None,
netbios_names=None,
tenable_uuids=None,
date_range=7
):
"""Get scan activities against a list of targets. Note: For uncompleted scans, only the targets configured for
the scan are matched against on the SDK-side. Completed scans are queried and matched on the API server-side.
:param targets: A single string target or a list of string targets in IPv4 or FQDN format.
:param fqdns: A list of string values in FQDN format.
:param ipv4s: A list of string values in IPv4 format.
:param mac_addresses: A list of string values in MAC address format.
:param netbios_names: A list of netbios_name's.
:param tenable_uuids: A list of tenable_uuid's.
:param date_range: The number of days of data prior to and including today that should be considered.
return: ScanActivity list sort by timestamp; active ScanActivity's are ordered first with None timestamp.
"""
if not isinstance(targets, list):
targets = [targets] if targets else []
if not fqdns:
fqdns = []
if not ipv4s:
ipv4s = []
if not mac_addresses:
mac_addresses = []
if not netbios_names:
netbios_names = []
if not tenable_uuids:
tenable_uuids = []
for target in targets:
if util.is_ipv4(target):
ipv4s.append(target)
if util.is_mac(target):
mac_addresses.append(target)
elif isinstance(target, six.string_types) and len(target) > 0:
fqdns.append(target)
asset_activities = self._asset_activities(fqdns, ipv4s, mac_addresses, netbios_names, tenable_uuids, date_range)
running_activities = self._running_activities(fqdns, ipv4s)
return running_activities + asset_activities
def _asset_activities(self, fqdns, ipv4s, mac_addresses, netbios_names, tenable_uuids, date_range):
"""Get scan activities against FQDNs and IPv4s targets from scan are not completed yet. Note: The method is to
inspect all active scan jobs from all scanners.
:param fqdns: List of string targets in FQDN-format.
:param ipv4s: List of string targets in IPv4-format.
:param date_range: The number of days of data prior to and including today that should be considered.
return: ScanActivity list.
"""
activities = []
filters = []
for fqdn in fqdns:
filters.append({
'quality': 'eq',
'filter': 'fqdn',
'value': fqdn
})
for ipv4 in ipv4s:
filters.append({
'quality': 'eq',
'filter': 'ipv4',
'value': ipv4
})
for mac_address in mac_addresses:
filters.append({
'quality': 'eq',
'filter': 'mac_address',
'value': mac_address
})
for netbios_name in netbios_names:
filters.append({
'quality': 'eq',
'filter': 'netbios_name',
'value': netbios_name
})
for tenable_uuid in tenable_uuids:
filters.append({
'quality': 'eq',
'filter': 'tenable_uuid',
'value': tenable_uuid
})
if len(filters):
# Get assets associated with targets
assets = self._client.workbenches_api.assets(
date_range=date_range,
filters=filters,
filter_search_type='or'
)
for asset in assets.assets:
activity_list = self._client.workbenches_api.asset_activity(asset.id)
activities.extend([a for a in activity_list.activity if None not in [a.scan_id, a.schedule_id]])
# TODO support for date_range is broken as of 2017/05/15, remove manual filtering when support is functional.
# Filter out activities that are outside of the time range.
start = datetime.now() - timedelta(days=date_range)
activities = [
ScanActivity(self._client, None, a.scan_id, None, a.schedule_id, a.timestamp)
for a in activities
if a.timestamp and start < datetime.strptime(a.timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
]
# Build scan_id lookup table with schedule_uuid as keys.
scans = self._client.scans_api.list()
scan_ids = {
s.schedule_uuid: s.id
for s in scans.scans
}
# Group activities by scan_id (a scenario where this can be possible is when a scan have multiple targets that
# matches the targets being queried).
activities_by_scan_history = {}
for a in activities:
if a.schedule_uuid in scan_ids:
a.scan_id = scan_ids[a.schedule_uuid]
if a.scan_id not in activities_by_scan_history:
activities_by_scan_history[a.scan_id] = {}
if a.history_uuid not in activities_by_scan_history[a.scan_id]:
activities_by_scan_history[a.scan_id][a.history_uuid] = []
activities_by_scan_history[a.scan_id][a.history_uuid].append(a)
# Look up history_id for each history_uuid
for scan_id, activities_by_history_uuid in activities_by_scan_history.items():
details = self._client.scans_api.details(scan_id)
for history in details.history:
if history.uuid in activities_by_history_uuid:
for a in activities_by_history_uuid[history.uuid]:
a.history_id = history.history_id
del activities_by_history_uuid[history.uuid]
if len(activities_by_history_uuid) < 1:
break
# Order by timestamp
return sorted(activities, key=lambda a: datetime.strptime(a.timestamp, '%Y-%m-%dT%H:%M:%S.%fZ'), reverse=True)
def _running_activities(self, fqdns, ipv4s):
"""Get scan activities against FQDNs and IPv4s targets from scan are not completed yet. Note: The method is to
inspect all active scan jobs from all scanners.
:param fqdns: List of string targets in FQDN-format.
:param ipv4s: List of string targets in IPv4-format.
return: ScanActivity list.
"""
activities = []
# Iterate through all scanners with at least 1 running scans.
for scanner in [s for s in self._client.scanners_api.list().scanners if s.scan_count > 0]:
scans = self._client.scanners_api.get_scans(scanner.id)
# Iterate through each running scan.
for s in scans.scans:
# Find the corresponding history.
history = None
for h in self._client.scans_api.details(scan_id=s.scan_id).history:
if s.id == h.uuid:
history = h
break
assert history, u'There should be history with the matching ID returned by the scanner.'
details = self._client.scans_api.details(scan_id=s.scan_id, history_id=history.history_id)
# Check if this scan has matching targets.
if details.info.targets:
scan_targets = set(details.info.targets.lower().split(u','))
if scan_targets.intersection(fqdns) or scan_targets.intersection(ipv4s):
activities.append(
ScanActivity(
self._client,
history.history_id,
history.uuid,
s.scan_id,
details.info.schedule_uuid
)
)
return sorted(activities, key=lambda o: o.scan_id, reverse=True)
class ScanActivity(object):
def __init__(
self,
client=None,
history_id=None,
history_uuid=None,
scan_id=None,
schedule_uuid=None,
timestamp=None,
):
self._client = client
self.history_id = history_id
self.history_uuid = history_uuid
self.scan_id = scan_id
self.schedule_uuid = schedule_uuid
self.timestamp = timestamp
def scan(self):
return None if self.scan_id is None else ScanRef(self._client, self.scan_id)
def details(self):
return None if self.scan_id is None else \
self._client.scans_api.details(scan_id=self.scan_id, history_id=self.history_id)
class ScanRef(object):
def __init__(self, client, id):
self._client = client
self.id = id
def copy(self):
"""Create a copy of the scan.
:return: An instance of ScanRef that references the newly copied scan.
"""
scan = self._client.scans_api.copy(self.id)
return ScanRef(self._client, scan.id)
def delete(self, force_stop=False):
"""Delete the scan.
:return: The same ScanRef instance.
"""
if force_stop and not self.stopped():
self.stop()
self._client.scans_api.delete(self.id)
return self
def details(self, history_id=None):
"""Get the scan detail.
:return: An instance of :class:`tenable_io.api.models.ScanDetails`.
"""
return self._client.scans_api.details(self.id, history_id=history_id)
def download(self, path, history_id=None, format=ScanExportRequest.FORMAT_PDF,
chapter=ScanExportRequest.CHAPTER_EXECUTIVE_SUMMARY, file_open_mode='wb'):
"""Download a scan report.
:param path: The file path to save the report to.
:param format: The report format. Default to :class:`tenable_io.api.scans.ScanExportRequest`.FORMAT_PDF.
:param chapter: The report contents. Default to \
:class:`tenable_io.api.scans.ScanExportRequest`.CHAPTER_EXECUTIVE_SUMMARY.
:param file_open_mode: The open mode to the file output. Default to "wb".
:param history_id: A specific scan history ID, None for the most recent scan history. default to None.
:return: The same ScanRef instance.
"""
self.wait_until_stopped(history_id=history_id)
if format in [ScanExportRequest.FORMAT_HTML, ScanExportRequest.FORMAT_PDF]:
export_request = ScanExportRequest(format=format, chapters=chapter)
else:
export_request = ScanExportRequest(format=format)
file_id = self._client.scans_api.export_request(
self.id,
export_request,
history_id
)
util.wait_until(
lambda: self._client.scans_api.export_status(self.id, file_id) == ScansApi.STATUS_EXPORT_READY)
iter_content = self._client.scans_api.export_download(self.id, file_id)
with open(path, file_open_mode) as fd:
for chunk in iter_content:
fd.write(chunk)
return self
def histories(self, since=None):
"""Get scan histories.
:param since: As instance of `datetime`. Default to None. \
If defined, only scan histories after this are returned.
:return: A list of :class:`tenable_io.api.models.ScanDetailsHistory`.
"""
histories = self.details().history
if since:
assert isinstance(since, datetime), '`since` parameter should be an instance of datetime.'
ts = time.mktime(since.timetuple())
histories = [h for h in histories if h.creation_date >= ts]
return histories
def last_history(self):
"""Get last (most recent) scan history if exists.
:return: An instance of :class:`tenable_io.api.models.ScanDetailsHistory` if exists, otherwise None.
"""
histories = self.histories()
return histories[-1] if len(histories) else None
def launch(self, wait=True, alt_targets=None):
"""Launch the scan.
:param wait: If True, the method blocks until the scan's status is not \
:class:`tenable_io.api.models.Scan`.STATUS_PENDING. Default is False.
:param alt_targets: String of comma separated alternative targets or list of alternative target strings.
:return: The same ScanRef instance.
"""
if isinstance(alt_targets, six.string_types):
alt_targets = [alt_targets]
self._client.scans_api.launch(
self.id,
ScanLaunchRequest(alt_targets=alt_targets)
)
if wait:
util.wait_until(lambda context: self.status(_context=context) not in Scan.STATUS_PENDING, context={})
return self
def name(self, history_id=None):
"""Get the name of the scan.
:param history_id: The scan history to get name for, None for most recent. Default to None.
:return: The name.
"""
return self.details(history_id=history_id).info.name
def folder(self, history_id=None):
"""Get the folder the scan is in.
:param history_id: The scan history to get folder for, None for most recent. Default to None.
:return: An instance of FolderRef.
"""
from tenable_io.helpers.folder import FolderRef
return FolderRef(self._client, self.details(history_id=history_id).info.folder_id)
def move_to(self, folder):
"""Move the scan to a folder.
:param folder: An instance of FolderRef identifying the folder to move the scan to.
:return: The same ScanRef instance.
"""
from tenable_io.helpers.folder import FolderRef
assert isinstance(folder, FolderRef)
self._client.scans_api.folder(self.id, folder.id)
return self
def trash(self):
"""Move the scan into the trash folder.
:return: The same ScanRef instance.
"""
trash_folder = self._client.folder_helper.trash_folder()
self.move_to(trash_folder)
return self
def pause(self, wait=True):
"""Pause the scan.
:param wait: If True, the method blocks until the scan's status is not \
:class:`tenable_io.api.models.Scan`.STATUS_PAUSING. Default is False.
:return: The same ScanRef instance.
"""
self._client.scans_api.pause(self.id)
if wait:
util.wait_until(lambda context: self.status(_context=context) != Scan.STATUS_PAUSING, context={})
return self
def resume(self, wait=True):
"""Resume the scan.
:param wait: If True, the method blocks until the scan's status is not \
:class:`tenable_io.api.models.Scan`.STATUS_RESUMING. Default is False.
:return: The same ScanRef instance.
"""
self._client.scans_api.resume(self.id)
if wait:
util.wait_until(lambda context: self.status(_context=context) != Scan.STATUS_RESUMING, context={})
return self
def status(self, history_id=None, _context=None):
"""Get the scan's status.
:param history_id: The scan history to get status for, None for most recent. Default to None.
:return: The same ScanRef instance.
"""
# _context allows caller to keep the context for subsequent calls, this is a temporary workaround to the cheaper
# status API requires a history_id, which is not always available during the pending phases when a scan is
# starting.
if _context is not None and history_id is None:
history_id = _context.get('history_id')
if history_id is not None:
status = self._client.scans_api.history(self.id, history_id=history_id).status
else:
details = self.details(history_id=history_id)
if _context and len(details.history) > 0:
_context['history_id'] = details.history[0].history_id
status = details.info.status
return status
def stop(self, wait=True):
"""Stop the scan.
:param wait: If True, the method blocks until the scan's status is stopped. Default is False.
:return: The same ScanRef instance.
"""
self._client.scans_api.stop(self.id)
if wait:
self.wait_until_stopped()
return self
def stopped(self, history_id=None, _context=None):
"""Check if the scan is stopped.
:param history_id: The scan history to check, None for most recent. Default to None.
:return: True if stopped, False otherwise.
"""
return self.status(history_id=history_id, _context=_context) in ScanHelper.STATUSES_STOPPED
def wait_or_cancel_after(self, seconds):
"""Blocks until the scan is stopped, or cancel if it isn't stopped within the specified seconds.
:param seconds: The maximum amount of seconds the method should block before canceling the scan.
:return: The same ScanRef instance.
"""
start_time = time.time()
util.wait_until(lambda: time.time() - start_time > seconds or self.stopped())
if not self.stopped():
self.stop()
return self
def wait_until_stopped(self, history_id=None):
"""Blocks until the scan is stopped.
:param history_id: The scan history to wait for, None for most recent. Default to None.
:return: The same ScanRef instance.
"""
util.wait_until(lambda context: self.stopped(history_id=history_id, _context=context), context={})
return self
|
{
"content_hash": "1827a21405f1431dc2eabea161b72543",
"timestamp": "",
"source": "github",
"line_count": 590,
"max_line_length": 120,
"avg_line_length": 38.81186440677966,
"alnum_prop": 0.5943490982138958,
"repo_name": "codeparticle/Tenable.io-SDK-for-Python",
"id": "946513bd7cb8bb16203b59a25b927b52bcef5fda",
"size": "22899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tenable_io/helpers/scan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "253425"
}
],
"symlink_target": ""
}
|
__author__ = 'Daniel Zhang (張道博)'
__copyright__ = 'Copyright (c) 2014, University of Hawaii Smart Energy Project'
__license__ = 'https://raw.github' \
'.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid/master/BSD' \
'-LICENSE.txt'
import sys
from msg_configer import MSGConfiger
from msg_db_connector import MSGDBConnector
import psycopg2
from sek.logger import SEKLogger
DEBUG = 1
class MSGDBUtil(object):
"""
Utility methods.
This is the class responsible for actions against databases such as as
executing SQL statements.
Usage:
dbUtil = MSGDBUtil()
Public API:
executeSQL(cursor: DB cursor, sql: String, exitOnFail: Boolean):Boolean
"""
def __init__(self):
"""
Constructor.
"""
self.logger = SEKLogger(__name__, 'DEBUG')
self.configer = MSGConfiger()
def getLastSequenceID(self, conn, tableName, columnName):
"""
Get last sequence ID value for the given sequence and for the
given connection.
:param conn: DB connection
:param tableName: String for name of the table that the sequence matches
:param columnName: String for name of the column to which the
sequence is applied
:returns: Integer of last sequence value or None if not found.
"""
if DEBUG:
print "table name = %s" % tableName
print "column name = %s" % columnName
sql = """SELECT currval(pg_get_serial_sequence('"{}"','{}'))""".format(
tableName, columnName)
cur = conn.cursor()
self.executeSQL(cur, sql)
try:
row = cur.fetchone()
except psycopg2.ProgrammingError, e:
msg = "Failed to retrieve the last sequence value."
msg += " Exception is %s." % e
self.logger.log(msg, 'error')
sys.exit(-1)
lastSequenceValue = row[0]
if lastSequenceValue is None:
print"Critical error. Last sequence value could not be retrieved."
sys.exit(-1)
return lastSequenceValue
def executeSQL(self, cursor, sql, exitOnFail = True):
"""
Execute SQL given a cursor and a SQL statement.
The cursor is passed here to allow control of committing outside of
this class.
exitOnFail can be toggled to handle cases such as continuing with an
insert even when duplicate keys are encountered.
The result rows of a query are accessible through the cursor that is
passed in. For example:
rows = cursor.fetchall()
:param cursor: DB cursor.
:param sql: String of a SQL statement.
:returns: Boolean True for success, execution is aborted if there is
an error.
"""
success = True
try:
cursor.execute(sql)
except Exception as detail:
success = False
msg = "SQL execute failed using {}.".format(sql)
msg += " The error is: {}.".format(detail)
self.logger.log(msg, 'error')
if exitOnFail:
sys.exit(-1)
return success
def eraseTestMeco(self):
"""
Erase the testing database. The name of the testing database is
determined from the configuration file and must be set correctly.
All sequences are reset to start with the value of one (1).
"""
self.dbConnect = MSGDBConnector(True)
self.conn = self.dbConnect.connectDB()
dbCursor = self.conn.cursor()
databaseName = self.getDBName(dbCursor)[0]
if (not (self.configer.configOptionValue("Database",
"testing_db_name") ==
databaseName)):
print "Testing DB name doesn't match %s." % \
self.configer.configOptionValue(
"Database", "testing_db_name")
exit(-1)
print "Erasing testing database %s." % databaseName
sql = ("""delete from "MeterData";""",
"""ALTER SEQUENCE interval_id_seq RESTART WITH 1;""",
"""ALTER SEQUENCE intervalreaddata_id_seq RESTART WITH 1;""",
"""ALTER SEQUENCE meterdata_id_seq RESTART WITH 1;""",
"""ALTER SEQUENCE reading_id_seq RESTART WITH 1;""",
"""ALTER SEQUENCE register_id_seq RESTART WITH 1;""",
"""ALTER SEQUENCE registerdata_id_seq RESTART WITH 1;""",
"""ALTER SEQUENCE registerread_id_seq RESTART WITH 1;""",
"""ALTER SEQUENCE tier_id_seq RESTART WITH 1;""",
"""ALTER SEQUENCE event_data_id_seq RESTART WITH 1;""",
"""ALTER SEQUENCE event_id_seq RESTART WITH 1;"""
)
for statement in sql:
print "sql = %s" % statement
self.executeSQL(dbCursor, statement)
self.conn.commit()
self.dbConnect.closeDB(self.conn)
def getDBName(self, cursor):
"""
:returns: Name of the current database.
"""
self.executeSQL(cursor, """select current_database();""")
row = cursor.fetchone()
return row
def tableColumns(self, cursor, table):
"""
Access column names as a tuple with the names being at index 0.
:param: cursor: A DB cursor
:param: table: Name of table to retrieve columns from.
:returns: List of tuples with column names in the first position.
"""
sql = """SELECT column_name FROM information_schema.columns WHERE
table_name='%s';""" % table
self.executeSQL(cursor, sql)
return cursor.fetchall() # Each column is an n-tuple.
def columns(self, cursor = None, table = None):
"""
Return column names for a given table.
:param cursor:
:param table:
:return: List of columns.
"""
if not cursor:
raise Exception('Cursor not defined.')
if not table:
raise Exception('Table not defined.')
cols = []
for col in self.tableColumns(cursor, table):
cols.append(col[0])
return cols
def columnsString(self, cursor = None, table = None):
if not cursor:
raise Exception('Cursor not defined.')
if not table:
raise Exception('Table not defined.')
return ','.join(item for item in self.columns(cursor, table))
|
{
"content_hash": "d72feaa7465b63c83f59ceebe94b66cf",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 80,
"avg_line_length": 31.386473429951693,
"alnum_prop": 0.578112975219332,
"repo_name": "Hawaii-Smart-Energy-Project/Maui-Smart-Grid",
"id": "cd75da5f73432d67e9cf44189417b70995b906a3",
"size": "6550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/msg_db_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "32493"
},
{
"name": "Python",
"bytes": "380156"
},
{
"name": "Shell",
"bytes": "6853"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2015 Alexey Nikitin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PyQt5.QtQuick import QQuickPaintedItem, QQuickItem
from PyQt5.QtGui import QColor, QPainter, QPainterPath, QPen
from PyQt5.QtCore import pyqtProperty, QRectF, pyqtSignal, Qt
from docutils.nodes import paragraph
class BezierCurve(QQuickPaintedItem):
"""Класс представляет кривую безье, задаваемою двумя точками, толщиной, цветом и двумя флагами на наличие стартовой
и концевой стрелочек.
"""
def __init__(self, parent: QQuickItem=None):
super(BezierCurve, self).__init__(parent)
self._color = QColor()
self._startX = 0
self._startY = 0
self._endX = 0
self._endY = 0
self._curveWidth = 1
self._startArrow = False
self._endArrow = False
def updateRectSize(self):
parent = self.parent()
if parent is not None:
self.setWidth(parent.width())
self.setHeight(parent.height())
colorChanged = pyqtSignal()
@pyqtProperty(QColor, notify=colorChanged)
def color(self):
return self._color
@color.setter
def color(self, color):
if self._color != color:
self._color = QColor(color)
self.colorChanged.emit()
self.update()
startXChanged = pyqtSignal()
@pyqtProperty(float, notify=startXChanged)
def startX(self):
return self._startX
@startX.setter
def startX(self, startX):
if self._startX != startX:
self._startX = startX
self.startXChanged.emit()
self.updateRectSize()
self.update()
startYChanged = pyqtSignal()
@pyqtProperty(float, notify=startXChanged)
def startY(self):
return self._startY
@startY.setter
def startY(self, startY):
if self._startY != startY:
self._startY = startY
self.startYChanged.emit()
self.updateRectSize()
self.update()
endXChanged = pyqtSignal()
@pyqtProperty(float, notify=endXChanged)
def endX(self):
return self._endX
@endX.setter
def endX(self, endX):
if self._endX != endX:
self._endX = endX
self.endXChanged.emit()
self.updateRectSize()
self.update()
endYChanged = pyqtSignal()
@pyqtProperty(float, notify=endYChanged)
def endY(self):
return self._endY
@endY.setter
def endY(self, endY):
if self._endY != endY:
self._endY = endY
self.endYChanged.emit()
self.updateRectSize()
self.update()
curveWidthChanged = pyqtSignal()
@pyqtProperty(int, notify=curveWidthChanged)
def curveWidth(self):
return self._curveWidth
@curveWidth.setter
def curveWidth(self, curveWidth):
if self._curveWidth != curveWidth:
self._curveWidth = curveWidth
self.curveWidthChanged.emit()
self.update()
startArrowChanged = pyqtSignal()
@pyqtProperty(bool, notify=startArrowChanged)
def startArrow(self):
return self._startArrow
@startArrow.setter
def startArrow(self, startArrow):
if self._startArrow != startArrow:
self._startArrow = startArrow
self.startArrowChanged.emit()
self.update()
endArrowChanged = pyqtSignal()
@pyqtProperty(bool, notify=endArrowChanged)
def endArrow(self):
return self._endArrow
@endArrow.setter
def endArrow(self, endArrow):
if self._endArrow != endArrow:
self._endArrow = endArrow
self.endArrowChanged.emit()
self.update()
@staticmethod
def add_arrow_left(x, y, painter_path: QPainterPath):
painter_path.moveTo(x, y)
painter_path.lineTo(x + 5, y + 5)
painter_path.moveTo(x, y)
painter_path.lineTo(x + 5, y - 5)
@staticmethod
def add_arrow_right(x, y, painter_path: QPainterPath):
painter_path.moveTo(x, y)
painter_path.lineTo(x - 5, y + 5)
painter_path.moveTo(x, y)
painter_path.lineTo(x - 5, y - 5)
def add_arrow(self, x, y, direction, painter_path: QPainterPath):
if direction == 'l':
self.add_arrow_left(x, y, painter_path)
elif direction == 'r':
self.add_arrow_right(x, y, painter_path)
def paint(self, painter: QPainter):
painter_path = QPainterPath()
painter_path.moveTo(self._startX, self._startY)
x1 = (7 * self._endX + self._startX) / 8
y1 = self._startY
x2 = (self._endX + 7 * self._startX) / 8
y2 = self._endY
painter_path.cubicTo(x1, y1, x2, y2, self._endX, self._endY)
if self._startArrow:
self.add_arrow(self._startX, self._startY, 'l' if self._startX <= self._endX else 'r',painter_path)
if self._endArrow:
self.add_arrow(self._endX, self._endY, 'r' if self._startX <= self._endX else 'l',painter_path)
pen = QPen(self._color, self._curveWidth, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin)
painter.setPen(pen)
painter.setRenderHints(QPainter.Antialiasing, True)
painter.drawPath(painter_path)
|
{
"content_hash": "c2244de513befe0e40611589eab72607",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 119,
"avg_line_length": 31.492537313432837,
"alnum_prop": 0.6287519747235387,
"repo_name": "nikialeksey/VisualAI",
"id": "01b330ba94e8413afa0aeaef74c402fdf34c275a",
"size": "6443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Curves/BezierCurve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21043"
},
{
"name": "QML",
"bytes": "72924"
}
],
"symlink_target": ""
}
|
import socket
def get_py():
target_host = "153.121.43.113"
target_port = 80
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((target_host,target_port))
client.send("GET / \r/ Host/1.1\r\n\r\n")
response = client.recv(8096)
print (response)
get_py()
|
{
"content_hash": "e3e4a4ec488f9a721d26b21d311a9214",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 66,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.5868263473053892,
"repo_name": "nwiizo/workspace_2017",
"id": "37e2988ba8375fb5eaaead127c373545ac8504bc",
"size": "381",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "network/seccamp/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class IhoConfig(AppConfig):
name = 'iho'
|
{
"content_hash": "de5e8cbb4981ae74fd4232985cdd300b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 16.2,
"alnum_prop": 0.7283950617283951,
"repo_name": "iho/42cc",
"id": "28f81d9d54dd918c45a2070bd83ca21c26351c50",
"size": "81",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iho/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "316"
},
{
"name": "HTML",
"bytes": "4130"
},
{
"name": "JavaScript",
"bytes": "2339"
},
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "21213"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
"""
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
import os
import sys
from collections import namedtuple
from os.path import splitext, relpath
from intelhex import IntelHex
# Implementation of mbed configuration mechanism
from tools.utils import json_file_to_dict, intelhex_offset
from tools.arm_pack_manager import Cache
from tools.targets import CUMULATIVE_ATTRIBUTES, TARGET_MAP, \
generate_py_target, get_resolution_order
# Base class for all configuration exceptions
class ConfigException(Exception):
"""Config system only exception. Makes it easier to distinguish config
errors"""
pass
class ConfigParameter(object):
"""This class keeps information about a single configuration parameter"""
def __init__(self, name, data, unit_name, unit_kind):
"""Construct a ConfigParameter
Positional arguments:
name - the name of the configuration parameter
data - the data associated with the configuration parameter
unit_name - the unit (target/library/application) that defines this
parameter
unit_ kind - the kind of the unit ("target", "library" or "application")
"""
self.name = self.get_full_name(name, unit_name, unit_kind,
allow_prefix=False)
self.defined_by = self.get_display_name(unit_name, unit_kind)
self.set_value(data.get("value", None), unit_name, unit_kind)
self.help_text = data.get("help", None)
self.required = data.get("required", False)
self.macro_name = data.get("macro_name", "MBED_CONF_%s" %
self.sanitize(self.name.upper()))
self.config_errors = []
@staticmethod
def get_full_name(name, unit_name, unit_kind, label=None,
allow_prefix=True):
"""Return the full (prefixed) name of a parameter. If the parameter
already has a prefix, check if it is valid
Positional arguments:
name - the simple (unqualified) name of the parameter
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
Keyword arguments:
label - the name of the label in the 'target_config_overrides' section
allow_prefix - True to allow the original name to have a prefix, False
otherwise
"""
if name.find('.') == -1: # the name is not prefixed
if unit_kind == "target":
prefix = "target."
elif unit_kind == "application":
prefix = "app."
else:
prefix = unit_name + '.'
return prefix + name
# The name has a prefix, so check if it is valid
if not allow_prefix:
raise ConfigException("Invalid parameter name '%s' in '%s'" %
(name, ConfigParameter.get_display_name(
unit_name, unit_kind, label)))
temp = name.split(".")
# Check if the parameter syntax is correct (must be
# unit_name.parameter_name)
if len(temp) != 2:
raise ConfigException("Invalid parameter name '%s' in '%s'" %
(name, ConfigParameter.get_display_name(
unit_name, unit_kind, label)))
prefix = temp[0]
# Check if the given parameter prefix matches the expected prefix
if (unit_kind == "library" and prefix != unit_name) or \
(unit_kind == "target" and prefix != "target"):
raise ConfigException(
"Invalid prefix '%s' for parameter name '%s' in '%s'" %
(prefix, name, ConfigParameter.get_display_name(
unit_name, unit_kind, label)))
return name
@staticmethod
def get_display_name(unit_name, unit_kind, label=None):
"""Return the name displayed for a unit when interrogating the origin
and the last set place of a parameter
Positional arguments:
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
Keyword arguments:
label - the name of the label in the 'target_config_overrides' section
"""
if unit_kind == "target":
return "target:" + unit_name
elif unit_kind == "application":
return "application%s" % ("[%s]" % label if label else "")
else: # library
return "library:%s%s" % (unit_name, "[%s]" % label if label else "")
@staticmethod
def sanitize(name):
""" "Sanitize" a name so that it is a valid C macro name. Currently it
simply replaces '.' and '-' with '_'.
Positional arguments:
name - the name to make into a valid C macro
"""
return name.replace('.', '_').replace('-', '_')
def set_value(self, value, unit_name, unit_kind, label=None):
""" Sets a value for this parameter, remember the place where it was
set. If the value is a Boolean, it is converted to 1 (for True) or
to 0 (for False).
Positional arguments:
value - the value of the parameter
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
Keyword arguments:
label - the name of the label in the 'target_config_overrides' section
(optional)
"""
self.value = int(value) if isinstance(value, bool) else value
self.set_by = self.get_display_name(unit_name, unit_kind, label)
def __str__(self):
"""Return the string representation of this configuration parameter
Arguments: None
"""
if self.value is not None:
return '%s = %s (macro name: "%s")' % \
(self.name, self.value, self.macro_name)
else:
return '%s has no value' % self.name
def get_verbose_description(self):
"""Return a verbose description of this configuration parameter as a
string
Arguments: None
"""
desc = "Name: %s%s\n" % \
(self.name, " (required parameter)" if self.required else "")
if self.help_text:
desc += " Description: %s\n" % self.help_text
desc += " Defined by: %s\n" % self.defined_by
if not self.value:
return desc + " No value set"
desc += " Macro name: %s\n" % self.macro_name
desc += " Value: %s (set by %s)" % (self.value, self.set_by)
return desc
class ConfigMacro(object):
""" A representation of a configuration macro. It handles both macros
without a value (MACRO) and with a value (MACRO=VALUE)
"""
def __init__(self, name, unit_name, unit_kind):
"""Construct a ConfigMacro object
Positional arguments:
name - the macro's name
unit_name - the location where the macro was defined
unit_kind - the type of macro this is
"""
self.name = name
self.defined_by = ConfigParameter.get_display_name(unit_name, unit_kind)
if name.find("=") != -1:
tmp = name.split("=")
if len(tmp) != 2:
raise ValueError("Invalid macro definition '%s' in '%s'" %
(name, self.defined_by))
self.macro_name = tmp[0]
self.macro_value = tmp[1]
else:
self.macro_name = name
self.macro_value = None
class ConfigCumulativeOverride(object):
"""Representation of overrides for cumulative attributes"""
def __init__(self, name, additions=None, removals=None, strict=False):
"""Construct a ConfigCumulativeOverride object
Positional arguments:
name - the name of the config file this came from ?
Keyword arguments:
additions - macros to add to the overrides
removals - macros to remove from the overrides
strict - Boolean indicating that attempting to remove from an override
that does not exist should error
"""
self.name = name
if additions:
self.additions = set(additions)
else:
self.additions = set()
if removals:
self.removals = set(removals)
else:
self.removals = set()
self.strict = strict
def remove_cumulative_overrides(self, overrides):
"""Extend the list of override removals.
Positional arguments:
overrides - a list of names that, when the override is evaluated, will
be removed
"""
for override in overrides:
if override in self.additions:
raise ConfigException(
"Configuration conflict. The %s %s both added and removed."
% (self.name[:-1], override))
self.removals |= set(overrides)
def add_cumulative_overrides(self, overrides):
"""Extend the list of override additions.
Positional arguments:
overrides - a list of a names that, when the override is evaluated, will
be added to the list
"""
for override in overrides:
if override in self.removals or \
(self.strict and override not in self.additions):
raise ConfigException(
"Configuration conflict. The %s %s both added and removed."
% (self.name[:-1], override))
self.additions |= set(overrides)
def strict_cumulative_overrides(self, overrides):
"""Remove all overrides that are not the specified ones
Positional arguments:
overrides - a list of names that will replace the entire attribute when
this override is evaluated.
"""
self.remove_cumulative_overrides(self.additions - set(overrides))
self.add_cumulative_overrides(overrides)
self.strict = True
def update_target(self, target):
"""Update the attributes of a target based on this override"""
setattr(target, self.name,
list((set(getattr(target, self.name, []))
| self.additions) - self.removals))
def _process_config_parameters(data, params, unit_name, unit_kind):
"""Process a "config_parameters" section in either a target, a library,
or the application.
Positional arguments:
data - a dictionary with the configuration parameters
params - storage for the discovered configuration parameters
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
"""
for name, val in data.items():
full_name = ConfigParameter.get_full_name(name, unit_name, unit_kind)
# If the parameter was already defined, raise an error
if full_name in params:
raise ConfigException(
"Parameter name '%s' defined in both '%s' and '%s'" %
(name, ConfigParameter.get_display_name(unit_name, unit_kind),
params[full_name].defined_by))
# Otherwise add it to the list of known parameters
# If "val" is not a dictionary, this is a shortcut definition,
# otherwise it is a full definition
params[full_name] = ConfigParameter(name, val if isinstance(val, dict)
else {"value": val}, unit_name,
unit_kind)
return params
def _process_macros(mlist, macros, unit_name, unit_kind):
"""Process a macro definition and check for incompatible duplicate
definitions.
Positional arguments:
mlist - list of macro names to process
macros - dictionary with currently discovered macros
unit_name - the unit (library/application) that defines this macro
unit_kind - the kind of the unit ("library" or "application")
"""
for mname in mlist:
macro = ConfigMacro(mname, unit_name, unit_kind)
if (macro.macro_name in macros) and \
(macros[macro.macro_name].name != mname):
# Found an incompatible definition of the macro in another module,
# so raise an error
full_unit_name = ConfigParameter.get_display_name(unit_name,
unit_kind)
raise ConfigException(
("Macro '%s' defined in both '%s' and '%s'"
% (macro.macro_name, macros[macro.macro_name].defined_by,
full_unit_name)) +
" with incompatible values")
macros[macro.macro_name] = macro
Region = namedtuple("Region", "name start size active filename")
class Config(object):
"""'Config' implements the mbed configuration mechanism"""
# Libraries and applications have different names for their configuration
# files
__mbed_app_config_name = "mbed_app.json"
__mbed_lib_config_name = "mbed_lib.json"
# Allowed keys in configuration dictionaries
# (targets can have any kind of keys, so this validation is not applicable
# to them)
__allowed_keys = {
"library": set(["name", "config", "target_overrides", "macros",
"__config_path"]),
"application": set(["config", "target_overrides",
"macros", "__config_path"])
}
__unused_overrides = set(["target.bootloader_img", "target.restrict_size"])
# Allowed features in configurations
__allowed_features = [
"UVISOR", "BLE", "CLIENT", "IPV4", "LWIP", "COMMON_PAL", "STORAGE", "NANOSTACK",
# Nanostack configurations
"LOWPAN_BORDER_ROUTER", "LOWPAN_HOST", "LOWPAN_ROUTER", "NANOSTACK_FULL", "THREAD_BORDER_ROUTER", "THREAD_END_DEVICE", "THREAD_ROUTER", "ETHERNET_HOST"
]
def __init__(self, tgt, top_level_dirs=None, app_config=None):
"""Construct a mbed configuration
Positional arguments:
target - the name of the mbed target used for this configuration
instance
Keyword argumets:
top_level_dirs - a list of top level source directories (where
mbed_app_config.json could be found)
app_config - location of a chosen mbed_app.json file
NOTE: Construction of a Config object will look for the application
configuration file in top_level_dirs. If found once, it'll parse it.
top_level_dirs may be None (in this case, the constructor will not
search for a configuration file).
"""
self.app_config_location = app_config
if self.app_config_location is None:
for directory in top_level_dirs or []:
full_path = os.path.join(directory, self.__mbed_app_config_name)
if os.path.isfile(full_path):
if self.app_config_location is not None:
raise ConfigException("Duplicate '%s' file in '%s' and '%s'"
% (self.__mbed_app_config_name,
self.app_config_location, full_path))
else:
self.app_config_location = full_path
try:
self.app_config_data = json_file_to_dict(self.app_config_location) \
if self.app_config_location else {}
except ValueError as exc:
sys.stderr.write(str(exc) + "\n")
self.app_config_data = {}
# Check the keys in the application configuration data
unknown_keys = set(self.app_config_data.keys()) - \
self.__allowed_keys["application"]
if unknown_keys:
raise ConfigException("Unknown key(s) '%s' in %s" %
(",".join(unknown_keys),
self.__mbed_app_config_name))
# Update the list of targets with the ones defined in the application
# config, if applicable
self.lib_config_data = {}
# Make sure that each config is processed only once
self.processed_configs = {}
if isinstance(tgt, basestring):
if tgt in TARGET_MAP:
self.target = TARGET_MAP[tgt]
else:
self.target = generate_py_target(
self.app_config_data.get("custom_targets", {}), tgt)
else:
self.target = tgt
self.target = deepcopy(self.target)
self.target_labels = self.target.labels
self.cumulative_overrides = {key: ConfigCumulativeOverride(key)
for key in CUMULATIVE_ATTRIBUTES}
self._process_config_and_overrides(self.app_config_data, {}, "app",
"application")
self.config_errors = None
def add_config_files(self, flist):
"""Add configuration files
Positional arguments:
flist - a list of files to add to this configuration
"""
for config_file in flist:
if not config_file.endswith(self.__mbed_lib_config_name):
continue
full_path = os.path.normpath(os.path.abspath(config_file))
# Check that we didn't already process this file
if self.processed_configs.has_key(full_path):
continue
self.processed_configs[full_path] = True
# Read the library configuration and add a "__full_config_path"
# attribute to it
try:
cfg = json_file_to_dict(config_file)
except ValueError as exc:
sys.stderr.write(str(exc) + "\n")
continue
cfg["__config_path"] = full_path
if "name" not in cfg:
raise ConfigException(
"Library configured at %s has no name field." % full_path)
# If there's already a configuration for a module with the same
# name, exit with error
if self.lib_config_data.has_key(cfg["name"]):
raise ConfigException(
"Library name '%s' is not unique (defined in '%s' and '%s')"
% (cfg["name"], full_path,
self.lib_config_data[cfg["name"]]["__config_path"]))
self.lib_config_data[cfg["name"]] = cfg
@property
def has_regions(self):
"""Does this config have regions defined?"""
if 'target_overrides' in self.app_config_data:
target_overrides = self.app_config_data['target_overrides'].get(
self.target.name, {})
return ('target.bootloader_img' in target_overrides or
'target.restrict_size' in target_overrides)
else:
return False
@property
def regions(self):
"""Generate a list of regions from the config"""
if not self.target.bootloader_supported:
raise ConfigException("Bootloader not supported on this target.")
cmsis_part = Cache(False, False).index[self.target.device_name]
start = 0
target_overrides = self.app_config_data['target_overrides'].get(
self.target.name, {})
try:
rom_size = int(cmsis_part['memory']['IROM1']['size'], 0)
rom_start = int(cmsis_part['memory']['IROM1']['start'], 0)
except KeyError:
raise ConfigException("Not enough information in CMSIS packs to "
"build a bootloader project")
if 'target.bootloader_img' in target_overrides:
filename = target_overrides['target.bootloader_img']
part = intelhex_offset(filename, offset=rom_start)
if part.minaddr() != rom_start:
raise ConfigException("bootloader executable does not "
"start at 0x%x" % rom_start)
part_size = (part.maxaddr() - part.minaddr()) + 1
yield Region("bootloader", rom_start + start, part_size, False,
filename)
start += part_size
if 'target.restrict_size' in target_overrides:
new_size = int(target_overrides['target.restrict_size'], 0)
yield Region("application", rom_start + start, new_size, True, None)
start += new_size
yield Region("post_application", rom_start +start, rom_size - start,
False, None)
else:
yield Region("application", rom_start + start, rom_size - start,
True, None)
if start > rom_size:
raise ConfigException("Not enough memory on device to fit all "
"application regions")
@property
def report(self):
return {'app_config': self.app_config_location,
'library_configs': map(relpath, self.processed_configs.keys())}
def _process_config_and_overrides(self, data, params, unit_name, unit_kind):
"""Process "config_parameters" and "target_config_overrides" into a
given dictionary
Positional arguments:
data - the configuration data of the library/appliation
params - storage for the discovered configuration parameters
unit_name - the unit (library/application) that defines this parameter
unit_kind - the kind of the unit ("library" or "application")
"""
self.config_errors = []
_process_config_parameters(data.get("config", {}), params, unit_name,
unit_kind)
for label, overrides in data.get("target_overrides", {}).items():
# If the label is defined by the target or it has the special value
# "*", process the overrides
if (label == '*') or (label in self.target_labels):
# Check for invalid cumulative overrides in libraries
if (unit_kind == 'library' and
any(attr.startswith('target.extra_labels') for attr
in overrides.iterkeys())):
raise ConfigException(
"Target override 'target.extra_labels' in " +
ConfigParameter.get_display_name(unit_name, unit_kind,
label) +
" is only allowed at the application level")
# Parse out cumulative overrides
for attr, cumulatives in self.cumulative_overrides.iteritems():
if 'target.'+attr in overrides:
cumulatives.strict_cumulative_overrides(
overrides['target.'+attr])
del overrides['target.'+attr]
if 'target.'+attr+'_add' in overrides:
cumulatives.add_cumulative_overrides(
overrides['target.'+attr+'_add'])
del overrides['target.'+attr+'_add']
if 'target.'+attr+'_remove' in overrides:
cumulatives.remove_cumulative_overrides(
overrides['target.'+attr+'_remove'])
del overrides['target.'+attr+'_remove']
# Consider the others as overrides
for name, val in overrides.items():
# Get the full name of the parameter
full_name = ConfigParameter.get_full_name(name, unit_name,
unit_kind, label)
if full_name in params:
params[full_name].set_value(val, unit_name, unit_kind,
label)
elif name in self.__unused_overrides:
pass
else:
self.config_errors.append(
ConfigException(
"Attempt to override undefined parameter" +
(" '%s' in '%s'"
% (full_name,
ConfigParameter.get_display_name(unit_name,
unit_kind,
label)))))
for cumulatives in self.cumulative_overrides.itervalues():
cumulatives.update_target(self.target)
return params
def get_target_config_data(self):
"""Read and interpret configuration data defined by targets.
We consider the resolution order for our target and sort it by level
reversed, so that we first look at the top level target (the parent),
then its direct children, then the children of those children and so on,
until we reach self.target
TODO: this might not work so well in some multiple inheritance scenarios
At each step, look at two keys of the target data:
- config_parameters: used to define new configuration parameters
- config_overrides: used to override already defined configuration
parameters
Arguments: None
"""
params, json_data = {}, self.target.json_data
resolution_order = [e[0] for e
in sorted(
self.target.resolution_order,
key=lambda e: e[1], reverse=True)]
for tname in resolution_order:
# Read the target data directly from its description
target_data = json_data[tname]
# Process definitions first
_process_config_parameters(target_data.get("config", {}), params,
tname, "target")
# Then process overrides
for name, val in target_data.get("overrides", {}).items():
full_name = ConfigParameter.get_full_name(name, tname, "target")
# If the parameter name is not defined or if there isn't a path
# from this target to the target where the parameter was defined
# in the target inheritance tree, raise an error We need to use
# 'defined_by[7:]' to remove the "target:" prefix from
# defined_by
rel_names = [tgt for tgt, _ in
get_resolution_order(self.target.json_data, tname,
[])]
if full_name in self.__unused_overrides:
continue
if (full_name not in params) or \
(params[full_name].defined_by[7:] not in rel_names):
raise ConfigException(
"Attempt to override undefined parameter '%s' in '%s'"
% (name,
ConfigParameter.get_display_name(tname, "target")))
# Otherwise update the value of the parameter
params[full_name].set_value(val, tname, "target")
return params
def get_lib_config_data(self):
""" Read and interpret configuration data defined by libraries. It is
assumed that "add_config_files" above was already called and the library
configuration data exists in self.lib_config_data
Arguments: None
"""
all_params, macros = {}, {}
for lib_name, lib_data in self.lib_config_data.items():
unknown_keys = set(lib_data.keys()) - self.__allowed_keys["library"]
if unknown_keys:
raise ConfigException("Unknown key(s) '%s' in %s" %
(",".join(unknown_keys), lib_name))
all_params.update(self._process_config_and_overrides(lib_data, {},
lib_name,
"library"))
_process_macros(lib_data.get("macros", []), macros, lib_name,
"library")
return all_params, macros
def get_app_config_data(self, params, macros):
""" Read and interpret the configuration data defined by the target. The
target can override any configuration parameter, as well as define its
own configuration data.
Positional arguments.
params - the dictionary with configuration parameters found so far (in
the target and in libraries)
macros - the list of macros defined in the configuration
"""
app_cfg = self.app_config_data
# The application can have a "config_parameters" and a
# "target_config_overrides" section just like a library
self._process_config_and_overrides(app_cfg, params, "app",
"application")
# The application can also defined macros
_process_macros(app_cfg.get("macros", []), macros, "app",
"application")
def get_config_data(self):
""" Return the configuration data in two parts: (params, macros)
params - a dictionary with mapping a name to a ConfigParam
macros - the list of macros defined with "macros" in libraries and in
the application (as ConfigMacro instances)
Arguments: None
"""
all_params = self.get_target_config_data()
lib_params, macros = self.get_lib_config_data()
all_params.update(lib_params)
self.get_app_config_data(all_params, macros)
return all_params, macros
@staticmethod
def _check_required_parameters(params):
"""Check that there are no required parameters without a value
Positional arguments:
params - the list of parameters to check
NOTE: This function does not return. Instead, it throws a
ConfigException when any of the required parameters are missing values
"""
for param in params.values():
if param.required and (param.value is None):
raise ConfigException("Required parameter '" + param.name +
"' defined by '" + param.defined_by +
"' doesn't have a value")
@staticmethod
def parameters_to_macros(params):
""" Encode the configuration parameters as C macro definitions.
Positional arguments:
params - a dictionary mapping a name to a ConfigParameter
Return: a list of strings that encode the configuration parameters as
C pre-processor macros
"""
return ['%s=%s' % (m.macro_name, m.value) for m in params.values()
if m.value is not None]
@staticmethod
def config_macros_to_macros(macros):
""" Return the macro definitions generated for a dictionary of
ConfigMacros (as returned by get_config_data).
Positional arguments:
params - a dictionary mapping a name to a ConfigMacro instance
Return: a list of strings that are the C pre-processor macros
"""
return [m.name for m in macros.values()]
@staticmethod
def config_to_macros(config):
"""Convert the configuration data to a list of C macros
Positional arguments:
config - configuration data as (ConfigParam instances, ConfigMacro
instances) tuple (as returned by get_config_data())
"""
params, macros = config[0], config[1]
Config._check_required_parameters(params)
return Config.config_macros_to_macros(macros) + \
Config.parameters_to_macros(params)
def get_config_data_macros(self):
""" Convert a Config object to a list of C macros
Arguments: None
"""
return self.config_to_macros(self.get_config_data())
def get_features(self):
""" Extract any features from the configuration data
Arguments: None
"""
params, _ = self.get_config_data()
self._check_required_parameters(params)
self.cumulative_overrides['features']\
.update_target(self.target)
for feature in self.target.features:
if feature not in self.__allowed_features:
raise ConfigException(
"Feature '%s' is not a supported features" % feature)
return self.target.features
def validate_config(self):
""" Validate configuration settings. This either returns True or
raises an exception
Arguments: None
"""
if self.config_errors:
raise self.config_errors[0]
return True
def load_resources(self, resources):
""" Load configuration data from a Resources instance and expand it
based on defined features.
Positional arguments:
resources - the resources object to load from and expand
"""
# Update configuration files until added features creates no changes
prev_features = set()
while True:
# Add/update the configuration with any .json files found while
# scanning
self.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(self.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources.add(resources.features[feature])
prev_features = features
self.validate_config()
return resources
@staticmethod
def config_to_header(config, fname=None):
""" Convert the configuration data to the content of a C header file,
meant to be included to a C/C++ file. The content is returned as a
string.
Positional arguments:
config - configuration data as (ConfigParam instances, ConfigMacro
instances) tuple (as returned by get_config_data())
Keyword arguments:
fname - also write the content is to the file called "fname".
WARNING: if 'fname' names an existing file, it will be
overwritten!
"""
params, macros = config[0], config[1]
Config._check_required_parameters(params)
header_data = "// Automatically generated configuration file.\n"
header_data += "// DO NOT EDIT, content will be overwritten.\n\n"
header_data += "#ifndef __MBED_CONFIG_DATA__\n"
header_data += "#define __MBED_CONFIG_DATA__\n\n"
# Compute maximum length of macro names for proper alignment
max_param_macro_name_len = (max([len(m.macro_name) for m
in params.values()
if m.value is not None])
if params else 0)
max_direct_macro_name_len = (max([len(m.macro_name) for m
in macros.values()])
if macros else 0)
max_macro_name_len = max(max_param_macro_name_len,
max_direct_macro_name_len)
# Compute maximum length of macro values for proper alignment
max_param_macro_val_len = (max([len(str(m.value)) for m
in params.values()
if m.value is not None])
if params else 0)
max_direct_macro_val_len = max([len(m.macro_value or "") for m
in macros.values()]) if macros else 0
max_macro_val_len = max(max_param_macro_val_len,
max_direct_macro_val_len)
# Generate config parameters first
if params:
header_data += "// Configuration parameters\n"
for macro in params.values():
if macro.value is not None:
header_data += ("#define {0:<{1}} {2!s:<{3}} " +
"// set by {4}\n")\
.format(macro.macro_name, max_macro_name_len,
macro.value, max_macro_val_len, macro.set_by)
# Then macros
if macros:
header_data += "// Macros\n"
for macro in macros.values():
if macro.macro_value:
header_data += ("#define {0:<{1}} {2!s:<{3}}" +
" // defined by {4}\n")\
.format(macro.macro_name, max_macro_name_len,
macro.macro_value, max_macro_val_len,
macro.defined_by)
else:
header_data += ("#define {0:<{1}}" +
" // defined by {2}\n")\
.format(macro.macro_name,
max_macro_name_len + max_macro_val_len + 1,
macro.defined_by)
header_data += "\n#endif\n"
# If fname is given, write "header_data" to it
if fname:
with open(fname, "w+") as file_desc:
file_desc.write(header_data)
return header_data
def get_config_data_header(self, fname=None):
""" Convert a Config instance to the content of a C header file, meant
to be included to a C/C++ file. The content is returned as a string.
Keyword arguments:
fname - also write the content to the file called "fname".
WARNING: if 'fname' names an existing file, it will be
overwritten!
"""
return self.config_to_header(self.get_config_data(), fname)
|
{
"content_hash": "ecbabc7f873553d36f1391a54b0e79ea",
"timestamp": "",
"source": "github",
"line_count": 889,
"max_line_length": 159,
"avg_line_length": 43.70528683914511,
"alnum_prop": 0.5583466309775056,
"repo_name": "theotherjimmy/mbed",
"id": "7a1150b40f40c03b790fba314eb76ee50156da1d",
"size": "38854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6096956"
},
{
"name": "Batchfile",
"bytes": "22"
},
{
"name": "C",
"bytes": "239688856"
},
{
"name": "C++",
"bytes": "9151366"
},
{
"name": "CMake",
"bytes": "4762"
},
{
"name": "HTML",
"bytes": "1692819"
},
{
"name": "Makefile",
"bytes": "99226"
},
{
"name": "Objective-C",
"bytes": "370970"
},
{
"name": "Perl",
"bytes": "2589"
},
{
"name": "Python",
"bytes": "24522"
},
{
"name": "Shell",
"bytes": "16819"
},
{
"name": "XSLT",
"bytes": "5596"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
import os
import signal
import socket
import sys
from zktraffic.base.sniffer import Sniffer, SnifferConfig
from scapy.sendrecv import sniff
import mock
class TestSniffer(TestCase):
def setUp(self):
self.zkt = Sniffer(SnifferConfig())
@mock.patch('os.kill', spec=os.kill)
@mock.patch('zktraffic.base.sniffer.sniff', spec=sniff)
def test_run_socket_error(self, mock_sniff, mock_kill):
mock_sniff.side_effect = socket.error
self.zkt.run()
mock_sniff.assert_called_once_with(
filter=self.zkt.config.filter,
store=0,
prn=self.zkt.handle_packet,
iface=self.zkt.config.iface,
stop_filter=self.zkt.wants_stop
)
mock_kill.assert_called_once_with(os.getpid(), signal.SIGINT)
@mock.patch('os.kill', spec=os.kill)
@mock.patch('zktraffic.base.sniffer.sniff', spec=sniff)
def test_run(self, mock_sniff, mock_kill):
self.zkt.run()
mock_sniff.assert_called_once_with(
filter=self.zkt.config.filter,
store=0,
prn=self.zkt.handle_packet,
iface=self.zkt.config.iface,
stop_filter=self.zkt.wants_stop
)
mock_kill.assert_called_once_with(os.getpid(), signal.SIGINT)
def test_exclude(self):
filter_text = 'port 2181'
excluded_ip = '8.8.8.8'
assert self.zkt.config.filter == filter_text
self.zkt.config.excluded_ips = [excluded_ip]
assert self.zkt.config.filter == filter_text
self.zkt.config.update_filter()
assert self.zkt.config.filter == '%s and host not %s' % (filter_text, excluded_ip)
excluded_ip_two = '8.8.4.4'
excluded_ip_three = '172.24.6.11'
self.zkt.config.excluded_ips = [excluded_ip, excluded_ip_two, excluded_ip_three]
self.zkt.config.update_filter()
assert self.zkt.config.filter == '%s and host not %s and host not %s and host not %s' % (
filter_text, excluded_ip, excluded_ip_two, excluded_ip_three)
def test_include(self):
filter_text = 'port 2181'
included_ip = '8.8.8.8'
assert self.zkt.config.filter == filter_text
self.zkt.config.included_ips = [included_ip]
assert self.zkt.config.filter == filter_text
self.zkt.config.update_filter()
assert self.zkt.config.filter == '%s and (host %s)' % (filter_text, included_ip)
included_ip_two = '8.8.4.4'
included_ip_three = '172.24.6.11'
self.zkt.config.included_ips = [included_ip, included_ip_two, included_ip_three]
self.zkt.config.update_filter()
assert self.zkt.config.filter == '%s and (host %s or host %s or host %s)' % (
filter_text, included_ip, included_ip_two, included_ip_three)
|
{
"content_hash": "3468df857e7090272c37fff3358653fb",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 93,
"avg_line_length": 34.103896103896105,
"alnum_prop": 0.6706016755521707,
"repo_name": "rgs1/zktraffic",
"id": "bee8af22426e41ceec59bb22493ff5c315a39c3b",
"size": "3527",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "zktraffic/tests/test_sniffer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "204224"
}
],
"symlink_target": ""
}
|
"""
NodeMap application.
"""
import argparse
import functools
import itertools
import json
import random
import os
import sys
from collections import defaultdict
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, QCoreApplication, pyqtSignal, QObject, QPoint
from PyQt5.QtGui import QIcon, QFont, QColor, QPainter, QBrush, QPixmap
from PyQt5.QtWidgets import (
QApplication, QWidget, QDesktopWidget, QMainWindow, QAction, qApp,
QDialog, QToolTip, QPushButton, QMessageBox, QLabel,
QHBoxLayout, QVBoxLayout, QGridLayout,
QLineEdit, QTextEdit, QInputDialog, QFileDialog, QColorDialog
)
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
APPLICATION_NAME = 'NodeMap'
VERSION = 'v0.0.0'
AUTHOR = 'Artem Panov'
def _abs_path(rel_path):
"""
Get full path of a file given relative path to the script's directory
(used to load assets such as icons).
"""
return os.path.join(BASE_DIR, rel_path)
app = None
class MainApp(QApplication):
icon_logo = None
# For drag'n'drop operations.
NODE_MIMETYPE = 'application/x-qt-windows-mime;value="NodeWidget"'
# Document file extension.
FILENAME_EXT = 'json'
# Standard color choices for nodes
STD_COLORS = (
('White', (255, 255, 255)),
('Gray', (127, 127, 127)),
('Red', (255, 0, 0)),
('Green', (0, 255, 0)),
('Blue', (0, 0, 255)),
('Yellow (R+G)', (255, 255, 0)),
('Magenta (R+B)', (255, 0, 255)),
('Cyan (G+B)', (0, 255, 255)),
)
# Maps node IDs to node data dictionary.
nodes = {}
# Using `collections.defaultdict` for edges to avoid checks.
# Default value will be empty set (no edges for this node).
edges = defaultdict(set, {})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.icon_logo = QIcon(_abs_path('pixmaps/icon_32x32.xpm'))
@functools.lru_cache(maxsize=256)
def get_rgb_icon(self, size, rgb):
""" Generate QIcon object filled with specified color. """
pixmap = QPixmap(size[0], size[1]);
pixmap.fill(QColor(*rgb));
return QIcon(pixmap);
class AboutWindow(QDialog):
""" Typical 'About' modal dialog. Nothing special. """
table_data = (
('Author', AUTHOR),
('Version', VERSION),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.initUI()
def initUI(self):
self.setWindowTitle('About')
self.setWindowIcon(app.icon_logo)
# Render table with values
grid = QGridLayout()
grid.setSpacing(4)
grid.setVerticalSpacing(4)
row = 1
label = QLabel() # TODO: self - to use or not to use?
label.setPixmap(QPixmap(_abs_path('pixmaps/icon_32x32.xpm')))
grid.addWidget(label, row, 0)
grid.addWidget(QLabel('<b>%s</b>' % APPLICATION_NAME), row, 1)
row += 1
# Draw the table.
for k,v in self.table_data:
textbox = QLineEdit()
textbox.setText(v)
textbox.setReadOnly(True)
grid.addWidget(QLabel(k), row, 0)
grid.addWidget(textbox, row, 1)
row += 1
self.setLayout(grid)
self.show()
self.setMinimumSize(320, 0)
# This window can't be resized.
self.setFixedSize(self.size())
class NodeWidget(QtWidgets.QWidget):
"""
Custom widget that renders graph nodes as circles, provides context menu
and edit/remove actions, implements drag'n'drop, highlights selected nodes.
(note: overlayed node captions are rendered separately by the parent window)
"""
SIZE = (32, 32)
color = None
color_dimmed = None
node_id = None
def __init__(self, *args, **kwargs):
self.node_id = kwargs.pop('node_id')
node_data = kwargs.pop('node_data')
super().__init__(*args, **kwargs)
self.set_node_color(QColor(*node_data['color']))
self.initUI()
def set_node_color(self, color):
self.color = color
# Dimmed color is used to mark selected nodes.
self.color_dimmed = QColor(*[x // 2 for x in color.getRgb()[0:3]])
def initUI(self):
self.resize(*self.SIZE)
#self.setMinimumSize(32, 32)
data = app.nodes[self.node_id]
self.move(data['x'], data['y'])
# Context menu
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu);
self.customContextMenuRequested[QtCore.QPoint].connect(
self.contextMenuRequested
)
self.show()
def move(self, *args, **kwargs):
ret = super().move(*args, **kwargs)
# Update node data with new position
app.nodes[self.node_id]['x'] = self.pos().x()
app.nodes[self.node_id]['y'] = self.pos().y()
self.parentWidget().mark_as_unsaved()
return ret
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
"""
Draw the node as circle (styling depends on node's selection status).
"""
# Prepare brush.
brush = QtGui.QBrush()
brush.setStyle(Qt.SolidPattern)
if self.is_selected():
# Fill selected circle with dimmed color
brush.setColor(self.color_dimmed)
else:
brush.setColor(self.parentWidget().BACKGROUND_COLOR)
qp.setBrush(brush)
# Prepare pen.
pen = QtGui.QPen()
pen.setColor(self.color)
pen.setWidth(2);
qp.setPen(pen)
size = self.size()
w = size.width()
h = size.height()
center = QPoint(w // 2, h // 2)
radius = min(w, h) // 2 - 2
qp.drawEllipse(center, radius, radius)
def mouseMoveEvent(self, e):
"""
We are setting custom mimetype for draggable nodes here to distinguish
them from any other junk user may try to drop into the main window.
Otherwise the app is likely to crash on reckless mouse movements.
"""
if e.buttons() != Qt.LeftButton:
return
mimeData = QtCore.QMimeData()
mimeData.setData(
app.NODE_MIMETYPE,
QtCore.QByteArray(bytes('data string', 'utf-8')),
)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setHotSpot(e.pos() - self.rect().topLeft())
dropAction = drag.exec_(Qt.MoveAction)
def mousePressEvent(self, e):
"""
Mark the node as selected on mouse click (and deselect any other
selected nodes).
"""
super().mousePressEvent(e)
if e.button() in (Qt.LeftButton, Qt.RightButton):
modifiers = app.keyboardModifiers()
if modifiers != QtCore.Qt.ControlModifier:
self.parentWidget().clear_selection()
if self.is_selected():
self.parentWidget().remove_from_selection(self.node_id)
else:
self.parentWidget().add_to_selection(self.node_id)
self.update()
e.accept()
def is_selected(self):
return self.node_id in self.parentWidget().selected_nodes
def contextMenuRequested(self, point):
"""
Context menu triggered when user right-clicks on the node.
"""
menu = QtWidgets.QMenu()
action_rename = menu.addAction('Rename...')
action_rename.triggered.connect(self.rename)
submenu = QtWidgets.QMenu(menu)
submenu.setTitle('Change color')
for color_name, rgb in app.STD_COLORS:
submenu_action = submenu.addAction(color_name)
submenu_action.triggered.connect(
functools.partial(self.change_color, QColor(*rgb))
)
submenu_action.setIcon(app.get_rgb_icon((32, 32), rgb))
submenu.addSeparator()
submenu_action = submenu.addAction('Custom...')
submenu_action.triggered.connect(self.change_color)
menu.addMenu(submenu)
action_delete = menu.addAction('Delete')
action_delete.triggered.connect(self.delete)
menu.exec_(self.mapToGlobal(point))
def rename(self):
node_data = app.nodes[self.node_id]
text, ok = QInputDialog.getText(
self, 'Rename',
'Enter new name for node (#%s):' % self.node_id,
text=node_data['text']
)
if ok:
app.nodes[self.node_id]['text'] = text
self.parentWidget().mark_as_unsaved()
self.parentWidget().update()
def change_color(self, color=None):
node_data = app.nodes[self.node_id]
if color is None:
color = QColorDialog.getColor(self.color, self, "Choose color")
if not color.isValid(): # if the user cancels the dialog
return
if color == self.color:
return
app.nodes[self.node_id]['color'] = [
color.red(), color.green(), color.blue()
]
self.set_node_color(color)
# This color will be used when creating the next node.
self.parentWidget().last_color = color
self.parentWidget().mark_as_unsaved()
self.parentWidget().update()
def delete(self):
node_data = app.nodes[self.node_id]
reply = QMessageBox.question(self,
'Confirmation',
'Are you sure you want to delete node "{}" (id={})?'.format(
node_data['text'], self.node_id
),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No
)
if reply == QMessageBox.Yes:
del app.nodes[self.node_id]
del app.edges[self.node_id]
for v in app.edges.values():
v.discard(self.node_id)
del self.parentWidget().nodes[self.node_id]
self.close()
self.destroy()
self.parentWidget().mark_as_unsaved()
self.parentWidget().update()
class MainWindow(QMainWindow):
"""
Main application window. Serves as parent window to all graph node widgets.
Provides context menu to create new nodes on the right-click in the empty
space. Renders overlay captions below the nodes.
Can be used as target for drag'n'drop (node movements).
Handles main menu, status bar, toolbar and other generic stuff.
"""
NODE_LABEL_MAX_WIDTH = 200 # max pixels
NODE_LABEL_MAX_LENGTH = 32 # max characters (otherwise truncated)
BACKGROUND_COLOR = QColor(8, 8, 8)
# Opened file name (or None)
filename = None
# Default directory for open/save dialogs
browse_dir = os.getenv('HOME')
unsaved_changes = False
# This color will be updated on node color change
# and used for new nodes by default.
last_color = QColor(127, 127, 127)
actions = {}
selected_nodes = set()
nodes = {} # maps node IDs to actual widget instances
def __init__(self):
super().__init__()
self.initUI()
def mousePressEvent(self, e):
"""
Clear node selection when user clicks somewhere in the empty space
of the main window.
"""
super().mousePressEvent(e)
if e.button() in (Qt.LeftButton, Qt.RightButton):
# Ignore this event if Ctrl key is pressed (multiple selection).
modifiers = app.keyboardModifiers()
if modifiers != QtCore.Qt.ControlModifier:
self.clear_selection()
self.update()
def _update_statusbar_on_selection(self):
if not self.selected_nodes:
msg = ''
else:
msg = 'Selected nodes ({} total): {}.'.format(
len(self.selected_nodes),
', '.join(['#%d' % x for x in self.selected_nodes])
)
self.statusBar().showMessage(msg)
def add_to_selection(self, node_id):
self.selected_nodes.add(node_id)
self._update_statusbar_on_selection()
self.update()
def remove_from_selection(self, node_id):
self.selected_nodes.discard(node_id)
self._update_statusbar_on_selection()
self.update()
def clear_selection(self):
self.selected_nodes.clear()
self._update_statusbar_on_selection()
def center(self):
""" Center window on the screen. """
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def about(self):
""" Show 'About' dialog. """
dialog = AboutWindow(self).exec()
def confirm_unsaved_changes(self):
if not self.unsaved_changes:
return True
filename = self.filename or '[No Name]'
reply = QMessageBox.question(self,
'Question', 'Save changes to "%s"?' % filename,
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel,
QMessageBox.No
)
if reply == QMessageBox.Yes:
return self.save()
elif reply == QMessageBox.No:
return True
return False
def closeEvent(self, event):
""" Exit (with confirmation dialog). """
if self.confirm_unsaved_changes():
event.accept()
else:
event.ignore()
def update_window_title(self):
if self.filename:
file_info = '{} ({})'.format(
os.path.basename(self.filename),
self.filename
)
else:
file_info = '[No Name]'
self.setWindowTitle('{}{} - {}'.format(
'* ' if self.unsaved_changes else '', file_info,
APPLICATION_NAME))
def mark_as_unsaved(self):
self.unsaved_changes = True
self.update_window_title()
def initUI(self):
# Window title
self.update_window_title()
self.setWindowIcon(app.icon_logo)
# Set a font used to render all tooltips.
QToolTip.setFont(QFont('SansSerif', 10))
# Window size and position
self.resize(640, 480)
self.center()
# Background color
palette = QtGui.QPalette()
palette.setColor(QtGui.QPalette.Background, self.BACKGROUND_COLOR)
self.setPalette(palette)
# Actions
action_icon = QIcon.fromTheme('application-exit')
action = QAction(action_icon, '&Exit', self)
action.setShortcut('Ctrl+Q')
action.setStatusTip('Exit application.')
action.triggered.connect(self.close)
self.actions['exit'] = action
action_icon = QIcon.fromTheme('list-add')
action = QAction(action_icon, '&Connect nodes', self)
action.triggered.connect(self.connect_nodes)
action.setShortcut('Ctrl+J')
action.setStatusTip('Connect selected nodes.')
self.actions['connect_nodes'] = action
action_icon = QIcon.fromTheme('list-remove')
action = QAction(action_icon, '&Disconnect nodes', self)
action.triggered.connect(self.disconnect_nodes)
action.setShortcut('Ctrl+D')
action.setStatusTip('Disconnect selected nodes.')
self.actions['disconnect_nodes'] = action
action = QAction(app.icon_logo, '&About %s...' % APPLICATION_NAME, self)
action.triggered.connect(self.about)
self.actions['about'] = action
action_icon = QIcon.fromTheme('document-open')
action = QAction(action_icon, '&Open...', self)
action.triggered.connect(self.open)
self.actions['open'] = action
action = QAction(
QIcon.fromTheme('document-new'),
'New', self
)
action.triggered.connect(self.new)
self.actions['new'] = action
action = QAction(
QIcon.fromTheme('document-save'),
'&Save', self
)
action.triggered.connect(self.save)
action.setShortcut('F2')
self.actions['save'] = action
action = QAction(
QIcon.fromTheme('document-save-as'),
'Save as...', self
)
action.triggered.connect(self.save_as)
self.actions['save_as'] = action
# Menu
menubar = self.menuBar()
menu = menubar.addMenu('&File')
menu.addAction(self.actions['new'])
menu.addAction(self.actions['open'])
menu.addAction(self.actions['save'])
menu.addAction(self.actions['save_as'])
menu.addSeparator()
menu.addAction(self.actions['exit'])
menu = menubar.addMenu('&Edit')
menu.addAction(self.actions['connect_nodes'])
menu.addAction(self.actions['disconnect_nodes'])
menu = menubar.addMenu('&About')
menu.addAction(self.actions['about'])
# Toolbar
self.toolbar = self.addToolBar('Toolbar')
self.toolbar.addAction(self.actions['new'])
self.toolbar.addAction(self.actions['open'])
self.toolbar.addAction(self.actions['save'])
self.toolbar.addAction(self.actions['save_as'])
self.toolbar.addAction(self.actions['connect_nodes'])
self.toolbar.addAction(self.actions['disconnect_nodes'])
# Status bar
bar = QtWidgets.QStatusBar()
bar.setStyleSheet(
"background-color: rgb(224, 224, 224);"
"color: rgb(0, 0, 0);"
)
self.setStatusBar(bar)
self.statusBar().showMessage('Ready.')
# Context menu
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu);
self.customContextMenuRequested[QtCore.QPoint].connect(self.contextMenuRequested)
# Drag and drop
self.setAcceptDrops(True)
# Initialize nodes
self.initialize_nodes()
self.show()
def initialize_nodes(self):
for node_id, node_data in app.nodes.items():
widget = NodeWidget(self, node_id=node_id, node_data=node_data)
self.nodes[node_id] = widget
def dragEnterEvent(self, e):
"""
If we ignore wrong content types here we will have troubles with them
in `dropEvent` handler. So be it by now (some arcane wizardry here).
"""
# TODO: Do it properly.
# TODO: Redraw widget while dragging.
e.accept()
def dropEvent(self, e):
# Ignore objects with unsupported content-type.
data = e.mimeData().data(app.NODE_MIMETYPE).data().decode('utf-8')
if not data:
e.ignore()
return
# Actually move the node (place center on the widget under cursor)
size = e.source().size()
e.source().move(
e.pos().x() - size.width() // 2,
e.pos().y() - size.height() // 2
)
e.setDropAction(Qt.MoveAction)
self.update()
# Show new coordinates in status bar.
self.statusBar().showMessage('Moved node {} to ({}, {}).'.format(
e.source().node_id, e.pos().x(), e.pos().y()
))
e.accept()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.draw_edges(event, qp)
self.draw_node_labels(event, qp)
qp.end()
def draw_edges(self, event, qp):
pen = QtGui.QPen(Qt.gray, 2, Qt.SolidLine)
qp.setPen(pen)
for src_id, trg_ids in app.edges.items():
src_rect = self.nodes[src_id].geometry()
src_x = src_rect.x() + src_rect.width() // 2
src_y = src_rect.y() + src_rect.height() // 2
for trg_id in trg_ids:
trg_rect = self.nodes[trg_id].geometry()
trg_x = trg_rect.x() + trg_rect.width() // 2
trg_y = trg_rect.y() + trg_rect.height() // 2
qp.drawLine(src_x, src_y, trg_x, trg_y)
def draw_node_labels(self, event, qp):
'''
Draw overlay on parent window with all node labels
(text labels are rendered separately from node widgets themselves).
'''
font = qp.font()
font.setPointSize(10)
for node_id, node_widget in self.nodes.items():
# Selected nodes' labels are rendered bold. Choosing the font here.
if node_widget.is_selected():
font.setBold(True)
qp.setPen(Qt.white)
else:
font.setBold(False)
qp.setPen(Qt.gray)
qp.setFont(font)
# Limit max label size (truncate and append "..." if necessary)
label_text = app.nodes[node_id]['text']
if len(label_text) > self.NODE_LABEL_MAX_LENGTH-3:
label_text = "%s..." % label_text[:self.NODE_LABEL_MAX_LENGTH-4]
# Determine actual text width via "font metrics".
# Limit max widget width accordingly.
metrics = qp.fontMetrics()
label_width = metrics.width(label_text)
widget_width = min(label_width, self.NODE_LABEL_MAX_WIDTH)
# Place the label below the node widget, symmetrically.
node_rect = node_widget.geometry()
x = node_rect.x()
y = node_rect.y()
rect = QtCore.QRect(
node_rect.x() + (node_rect.width() - widget_width) // 2,
node_rect.bottom() + 4,
widget_width, 16
)
# If label is truncated then it should be aligned left, not center
# (otherwise we will only see the middle part of the label).
if label_width < widget_width:
alignment = Qt.AlignCenter
else:
alignment = Qt.AlignLeft
# Render
qp.drawText(rect, alignment, label_text)
def contextMenuRequested(self, point):
"""
Context menu triggered when user right-clicks somewhere in the empty
space of the main window.
"""
menu = QtWidgets.QMenu()
action1 = menu.addAction('Add node...')
action1.triggered.connect(lambda: self.add_node(point))
menu.exec_(self.mapToGlobal(point))
def add_node(self, point):
# Poor man's autoincrement.
node_id = max(app.nodes.keys() or [0]) + 1
app.nodes[node_id] = {
'text': 'Node %s' % node_id,
'x': point.x(),
'y': point.y(),
'color': list(self.last_color.getRgb()[0:3])
}
app.edges[node_id] = set()
widget = NodeWidget(self, node_id=node_id, node_data=app.nodes[node_id])
self.nodes[node_id] = widget
self.mark_as_unsaved()
self.update()
def connect_nodes(self):
""" Connect all selected nodes. """
for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):
if src_id != trg_id:
app.edges[src_id].add(trg_id)
self.mark_as_unsaved()
self.update()
def disconnect_nodes(self):
""" Disconnect all selected nodes. """
for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):
if src_id != trg_id:
# `discard` ignores non-existing elements (unlike `remove`)
app.edges[src_id].discard(trg_id)
self.mark_as_unsaved()
self.update()
def new(self):
if not self.confirm_unsaved_changes():
return
for node_widget in self.nodes.values():
node_widget.close()
node_widget.destroy()
self.nodes = {}
app.nodes = {}
app.edges = defaultdict(set, {})
self.filename = None
self.unsaved_changes = False
self.update_window_title()
self.update()
def open(self, filename):
if not self.confirm_unsaved_changes():
return
if not filename:
filename = QFileDialog.getOpenFileName(
self, 'Open file', self.browse_dir,
"*.%s" % app.FILENAME_EXT)[0]
if not filename:
return
with open(filename, 'r') as f:
data = json.load(f)
if data['version'] != VERSION:
reply = QMessageBox.question(self, 'Question',
'"%s" is created with another application version. '
'Attempt to open it anyway?' % filename,
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes
)
if reply != QMessageBox.Yes:
return
def _get_node_data(value):
return {
'x': value['x'],
'y': value['y'],
'text': value.get('text', ''),
'color': value.get('color', [127, 127, 127]),
}
app.nodes = {
int(k): _get_node_data(v)
for k, v in data['nodes'].items()
}
app.edges = defaultdict(
set, {int(k): set(v) for k, v in data['edges'].items()}
)
for node_widget in self.nodes.values():
node_widget.close()
node_widget.destroy()
self.nodes = {}
self.initialize_nodes()
self.filename = filename
self.browse_dir = os.path.dirname(filename)
self.unsaved_changes = False
self.update_window_title()
self.update()
print('Opened file: %s' % filename)
def save(self):
if not self.filename:
return self.save_as()
data = {
'nodes': app.nodes,
'edges': {k: list(v) for k,v in app.edges.items()},
'version': VERSION,
}
with open(self.filename, 'w') as f:
json.dump(data, f)
self.unsaved_changes = False
self.update_window_title()
print('Saved file: %s' % self.filename)
return True
def save_as(self):
filename = QFileDialog.getSaveFileName(
self, 'Save as', self.browse_dir, "*.%s" % app.FILENAME_EXT)[0]
if not filename:
return False
if not filename.lower().endswith('.%s' % app.FILENAME_EXT):
if not filename.endswith('.'):
filename += '.'
filename += app.FILENAME_EXT
self.filename = filename
self.browse_dir = os.path.dirname(filename)
return self.save()
if __name__ == '__main__':
def _file_path(value):
if not os.path.isfile(value):
msg = "Could not open %s" % value
raise argparse.ArgumentTypeError(msg)
return value
parser = argparse.ArgumentParser(
description='NodeMap application.'
)
parser.add_argument('file', type=_file_path, nargs='?',
help='Open document.')
args = parser.parse_args()
app = MainApp(sys.argv)
w = MainWindow()
if args.file:
w.open(args.file)
sys.exit(app.exec_())
|
{
"content_hash": "00ec5c50f68fc48dd316aac93f3b9246",
"timestamp": "",
"source": "github",
"line_count": 870,
"max_line_length": 89,
"avg_line_length": 31.27471264367816,
"alnum_prop": 0.5579771399169392,
"repo_name": "amt386/nodemap",
"id": "b4c4abd5230f5ef199681395f311991c1c73d5cf",
"size": "27228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nodemap.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27228"
}
],
"symlink_target": ""
}
|
from ._base import SQLBaseStore
from synapse.util.caches.descriptors import cachedInlineCallbacks
from twisted.internet import defer
import logging
import simplejson as json
logger = logging.getLogger(__name__)
class PushRuleStore(SQLBaseStore):
@cachedInlineCallbacks()
def get_push_rules_for_user(self, user_name):
rows = yield self._simple_select_list(
table=PushRuleTable.table_name,
keyvalues={
"user_name": user_name,
},
retcols=PushRuleTable.fields,
desc="get_push_rules_enabled_for_user",
)
rows.sort(
key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))
)
defer.returnValue(rows)
@cachedInlineCallbacks()
def get_push_rules_enabled_for_user(self, user_name):
results = yield self._simple_select_list(
table=PushRuleEnableTable.table_name,
keyvalues={
'user_name': user_name
},
retcols=PushRuleEnableTable.fields,
desc="get_push_rules_enabled_for_user",
)
defer.returnValue({
r['rule_id']: False if r['enabled'] == 0 else True for r in results
})
@defer.inlineCallbacks
def add_push_rule(self, before, after, **kwargs):
vals = kwargs
if 'conditions' in vals:
vals['conditions'] = json.dumps(vals['conditions'])
if 'actions' in vals:
vals['actions'] = json.dumps(vals['actions'])
# we could check the rest of the keys are valid column names
# but sqlite will do that anyway so I think it's just pointless.
vals.pop("id", None)
if before or after:
ret = yield self.runInteraction(
"_add_push_rule_relative_txn",
self._add_push_rule_relative_txn,
before=before,
after=after,
**vals
)
defer.returnValue(ret)
else:
ret = yield self.runInteraction(
"_add_push_rule_highest_priority_txn",
self._add_push_rule_highest_priority_txn,
**vals
)
defer.returnValue(ret)
def _add_push_rule_relative_txn(self, txn, user_name, **kwargs):
after = kwargs.pop("after", None)
relative_to_rule = kwargs.pop("before", after)
res = self._simple_select_one_txn(
txn,
table=PushRuleTable.table_name,
keyvalues={
"user_name": user_name,
"rule_id": relative_to_rule,
},
retcols=["priority_class", "priority"],
allow_none=True,
)
if not res:
raise RuleNotFoundException(
"before/after rule not found: %s" % (relative_to_rule,)
)
priority_class = res["priority_class"]
base_rule_priority = res["priority"]
if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class:
raise InconsistentRuleException(
"Given priority class does not match class of relative rule"
)
new_rule = kwargs
new_rule.pop("before", None)
new_rule.pop("after", None)
new_rule['priority_class'] = priority_class
new_rule['user_name'] = user_name
new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
# check if the priority before/after is free
new_rule_priority = base_rule_priority
if after:
new_rule_priority -= 1
else:
new_rule_priority += 1
new_rule['priority'] = new_rule_priority
sql = (
"SELECT COUNT(*) FROM " + PushRuleTable.table_name +
" WHERE user_name = ? AND priority_class = ? AND priority = ?"
)
txn.execute(sql, (user_name, priority_class, new_rule_priority))
res = txn.fetchall()
num_conflicting = res[0][0]
# if there are conflicting rules, bump everything
if num_conflicting:
sql = "UPDATE "+PushRuleTable.table_name+" SET priority = priority "
if after:
sql += "-1"
else:
sql += "+1"
sql += " WHERE user_name = ? AND priority_class = ? AND priority "
if after:
sql += "<= ?"
else:
sql += ">= ?"
txn.execute(sql, (user_name, priority_class, new_rule_priority))
txn.call_after(
self.get_push_rules_for_user.invalidate, (user_name,)
)
txn.call_after(
self.get_push_rules_enabled_for_user.invalidate, (user_name,)
)
self._simple_insert_txn(
txn,
table=PushRuleTable.table_name,
values=new_rule,
)
def _add_push_rule_highest_priority_txn(self, txn, user_name,
priority_class, **kwargs):
# find the highest priority rule in that class
sql = (
"SELECT COUNT(*), MAX(priority) FROM " + PushRuleTable.table_name +
" WHERE user_name = ? and priority_class = ?"
)
txn.execute(sql, (user_name, priority_class))
res = txn.fetchall()
(how_many, highest_prio) = res[0]
new_prio = 0
if how_many > 0:
new_prio = highest_prio + 1
# and insert the new rule
new_rule = kwargs
new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
new_rule['user_name'] = user_name
new_rule['priority_class'] = priority_class
new_rule['priority'] = new_prio
txn.call_after(
self.get_push_rules_for_user.invalidate, (user_name,)
)
txn.call_after(
self.get_push_rules_enabled_for_user.invalidate, (user_name,)
)
self._simple_insert_txn(
txn,
table=PushRuleTable.table_name,
values=new_rule,
)
@defer.inlineCallbacks
def delete_push_rule(self, user_name, rule_id):
"""
Delete a push rule. Args specify the row to be deleted and can be
any of the columns in the push_rule table, but below are the
standard ones
Args:
user_name (str): The matrix ID of the push rule owner
rule_id (str): The rule_id of the rule to be deleted
"""
yield self._simple_delete_one(
PushRuleTable.table_name,
{'user_name': user_name, 'rule_id': rule_id},
desc="delete_push_rule",
)
self.get_push_rules_for_user.invalidate((user_name,))
self.get_push_rules_enabled_for_user.invalidate((user_name,))
@defer.inlineCallbacks
def set_push_rule_enabled(self, user_name, rule_id, enabled):
ret = yield self.runInteraction(
"_set_push_rule_enabled_txn",
self._set_push_rule_enabled_txn,
user_name, rule_id, enabled
)
defer.returnValue(ret)
def _set_push_rule_enabled_txn(self, txn, user_name, rule_id, enabled):
new_id = self._push_rules_enable_id_gen.get_next_txn(txn)
self._simple_upsert_txn(
txn,
PushRuleEnableTable.table_name,
{'user_name': user_name, 'rule_id': rule_id},
{'enabled': 1 if enabled else 0},
{'id': new_id},
)
txn.call_after(
self.get_push_rules_for_user.invalidate, (user_name,)
)
txn.call_after(
self.get_push_rules_enabled_for_user.invalidate, (user_name,)
)
class RuleNotFoundException(Exception):
pass
class InconsistentRuleException(Exception):
pass
class PushRuleTable(object):
table_name = "push_rules"
fields = [
"id",
"user_name",
"rule_id",
"priority_class",
"priority",
"conditions",
"actions",
]
class PushRuleEnableTable(object):
table_name = "push_rules_enable"
fields = [
"user_name",
"rule_id",
"enabled"
]
|
{
"content_hash": "317d912ccc4cec35e945effe8601e62e",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 85,
"avg_line_length": 31.03041825095057,
"alnum_prop": 0.54331577012621,
"repo_name": "iot-factory/synapse",
"id": "5305b7e1227a2ff8e5a16e1033ee7a16af8f3166",
"size": "8764",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synapse/storage/push_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2000"
},
{
"name": "HTML",
"bytes": "2905"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31842"
},
{
"name": "Python",
"bytes": "1879672"
},
{
"name": "Shell",
"bytes": "4548"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import User
from django.db import models
class FacebookUser(models.Model):
facebook_id = models.CharField(max_length=100, unique=True)
contrib_user = models.OneToOneField(User)
contrib_password = models.CharField(max_length=100)
|
{
"content_hash": "8e856dbad27489ad7545be44ebfabb63",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 67,
"avg_line_length": 36.375,
"alnum_prop": 0.7285223367697594,
"repo_name": "teebes/django-facebookconnect",
"id": "1f5a549d5764cc50c7e686c8fdb601ca6c4f52cd",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5587"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, merge
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Lambda, concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD
import keras.backend as K
import matplotlib.pyplot as plt
import sys
import os
import numpy as np
backend_name = K.backend()
is_tf = False
if 'tensorflow' in backend_name.lower():
is_tf = True
class DCGAN():
def __init__(self):
self.img_rows = 64
self.img_cols = 64
self.channels = 3
self.save_img_folder = 'dcgan/images/'
optimizer = Adam(lr=1e-3, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
optimizer_dis = SGD(lr=1e-3, momentum=0.9, nesterov=True)
self.latent_dim = 100
### generator params
self.initial_filters = 512
self.start_dim = int(self.img_cols / 16)
self.nb_upconv = 4
self.bn_axis = -1
self.initial_reshape_shape = (self.start_dim, self.start_dim, self.initial_filters)
self.bn_mode = 2
### discriminator params
self.list_f = [64,128,256]
self.num_kernels = 100
self.dim_per_kernel = 5
self.use_mbd = True
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer_dis,
metrics=['accuracy'])
# Build and compile the generator
self.generator = self.build_generator()
self.generator.compile(loss='binary_crossentropy', optimizer=optimizer)
# The generator takes noise as input and generated imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator) takes
# noise as input => generates images => determines validity
self.combined = Model(z, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_generator(self):
noise_shape = (self.latent_dim,)
model = Sequential()
model.add(Dense(self.initial_filters * self.start_dim * self.start_dim, input_shape=noise_shape))
model.add(Reshape(self.initial_reshape_shape))
model.add(BatchNormalization(axis=self.bn_axis))
for i in range(self.nb_upconv):
model.add(UpSampling2D(size=(2, 2)))
nb_filters = int(self.initial_filters / (2 ** (i + 1)))
model.add(Conv2D(nb_filters, (3, 3), padding="same"))
model.add(BatchNormalization(axis=1))
model.add(Activation("relu"))
model.add(Conv2D(nb_filters, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, (3, 3), name="gen_convolution2d_final", padding="same", activation='tanh'))
model.summary()
noise = Input(shape=noise_shape)
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
img_shape = (self.img_rows, self.img_cols, self.channels)
img = Input(shape=img_shape)
# First conv
x = Conv2D(32, (3, 3), strides=(2, 2), name="disc_convolution2d_1", padding="same")(img)
x = BatchNormalization(axis=self.bn_axis)(x)
x = LeakyReLU(0.2)(x)
# Next convs
for i, f in enumerate(self.list_f):
name = "disc_convolution2d_%s" % (i + 2)
x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same")(x)
x = BatchNormalization(axis=self.bn_axis)(x)
x = LeakyReLU(0.2)(x)
x = Flatten()(x)
def minb_disc(x):
diffs = K.expand_dims(x, 3) - K.expand_dims(K.permute_dimensions(x, [1, 2, 0]), 0)
abs_diffs = K.sum(K.abs(diffs), 2)
x = K.sum(K.exp(-abs_diffs), 2)
return x
def lambda_output(input_shape):
return input_shape[:2]
M = Dense(self.num_kernels * self.dim_per_kernel, bias=False, activation=None)
MBD = Lambda(minb_disc, output_shape=lambda_output)
if self.use_mbd:
x_mbd = M(x)
x_mbd = Reshape((self.num_kernels, self.dim_per_kernel))(x_mbd)
x_mbd = MBD(x_mbd)
x = concatenate([x, x_mbd])
x = Dense(1, activation='sigmoid', name="disc_dense_2")(x)
discriminator_model = Model(input=[img], output=[x], name='discriminator')
discriminator_model.summary()
return discriminator_model
def train(self, epochs, batch_size=128, save_interval=50):
# Load the dataset
X_train = self.load_data()
half_batch = int(batch_size / 2)
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], half_batch)
imgs = X_train[idx]
# Sample noise and generate a half batch of new images
noise = np.random.normal(0, 1, (half_batch, 100))
gen_imgs = self.generator.predict(noise)
# Train the discriminator (real classified as ones and generated as zeros)
d_loss_real = self.discriminator.train_on_batch(imgs, np.ones((half_batch, 1)))
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1)))
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, 100))
# Train the generator (wants discriminator to mistake images as real)
g_loss = self.combined.train_on_batch(noise, np.ones((batch_size, 1)))
# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
# If at save interval => save generated image samples
if epoch % save_interval == 0:
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
self.save_imgs(epoch,imgs)
def save_imgs(self, epoch,imgs):
if not(os.path.exists(self.save_img_folder)):
os.makedirs(self.save_img_folder)
r, c = 5, 5
z = np.random.normal(size=(25, self.latent_dim))
gen_imgs = self.generator.predict(z)
gen_imgs = 0.5 * gen_imgs + 0.5
# z_imgs = self.encoder.predict(imgs)
# gen_enc_imgs = self.generator.predict(z_imgs)
# gen_enc_imgs = 0.5 * gen_enc_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
self.plot(axs[i,j],gen_imgs[cnt, :,:,:].squeeze())
cnt += 1
print('----- Saving generated -----')
if isinstance(epoch, str):
fig.savefig(self.save_img_folder + "celeba_{}.png".format(epoch))
else:
fig.savefig(self.save_img_folder + "celeba_%d.png" % epoch)
plt.close()
# fig, axs = plt.subplots(r, c)
# cnt = 0
# for i in range(r):
# for j in range(c):
# self.plot(axs[i,j],gen_enc_imgs[cnt, :,:,:].squeeze())
# cnt += 1
# print('----- Saving encoded -----')
# if isinstance(epoch, str):
# fig.savefig(self.save_img_folder + "celeba_{}_enc.png".format(epoch))
# else :
# fig.savefig(self.save_img_folder + "celeba_%d_enc.png" % epoch)
# plt.close()
fig, axs = plt.subplots(r, c)
cnt = 0
imgs = imgs * 0.5 + 0.5
for i in range(r):
for j in range(c):
self.plot(axs[i,j],imgs[cnt, :,:,:].squeeze())
cnt += 1
print('----- Saving real -----')
if isinstance(epoch, str):
fig.savefig(self.save_img_folder + "celeba_{}_real.png".format(epoch))
else :
fig.savefig(self.save_img_folder + "celeba_%d_real.png" % epoch)
plt.close()
def load_data(self):
self.dataPath = 'D:\Code\data\sceleba.npy'
print('----- Loading CelebA -------')
X_train = np.load(self.dataPath)
X_train = X_train.transpose([0,2,3,1])
X_train = (X_train.astype(np.float32) - 0.5) / 0.5
print('CelebA shape:', X_train.shape, X_train.min(), X_train.max())
print('------- CelebA loaded -------')
return X_train
def plot(self, fig, img):
if self.channels == 1:
fig.imshow(img,cmap=self.cmap)
fig.axis('off')
else:
fig.imshow(img)
fig.axis('off')
if __name__ == '__main__':
dcgan = DCGAN()
dcgan.train(epochs=50001, batch_size=64, save_interval=100)
|
{
"content_hash": "f6fb1ed4865ed934c805ad6f72779ae6",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 115,
"avg_line_length": 33.602787456445995,
"alnum_prop": 0.5461426793861468,
"repo_name": "sofiane87/lasagne-GAN",
"id": "6f7a6cdae9b2a2d44e44de98f07a3323ee11ee73",
"size": "9644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dcgan/dcgan_celeba3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "271429"
}
],
"symlink_target": ""
}
|
"""Query an exported model. Py2 only. Install tensorflow-serving-api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from oauth2client.client import GoogleCredentials
from six.moves import input # pylint: disable=redefined-builtin
from tensor2tensor import problems as problems_lib # pylint: disable=unused-import
from tensor2tensor.serving import serving_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import usr_dir
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("server", None, "Address to Tensorflow Serving server.")
flags.DEFINE_string("servable_name", None, "Name of served model.")
flags.DEFINE_string("problem", None, "Problem name.")
flags.DEFINE_string("data_dir", None, "Data directory, for vocab files.")
flags.DEFINE_string("t2t_usr_dir", None, "Usr dir for registrations.")
flags.DEFINE_string("inputs_once", None, "Query once with this input.")
flags.DEFINE_integer("timeout_secs", 10, "Timeout for query.")
# For Cloud ML Engine predictions.
flags.DEFINE_string("cloud_mlengine_model_name", None,
"Name of model deployed on Cloud ML Engine.")
flags.DEFINE_string(
"cloud_mlengine_model_version", None,
"Version of the model to use. If None, requests will be "
"sent to the default version.")
def validate_flags():
"""Validates flags are set to acceptable values."""
if FLAGS.cloud_mlengine_model_name:
assert not FLAGS.server
assert not FLAGS.servable_name
else:
assert FLAGS.server
assert FLAGS.servable_name
def make_request_fn():
"""Returns a request function."""
if FLAGS.cloud_mlengine_model_name:
request_fn = serving_utils.make_cloud_mlengine_request_fn(
credentials=GoogleCredentials.get_application_default(),
model_name=FLAGS.cloud_mlengine_model_name,
version=FLAGS.cloud_mlengine_model_version)
else:
request_fn = serving_utils.make_grpc_request_fn(
servable_name=FLAGS.servable_name,
server=FLAGS.server,
timeout_secs=FLAGS.timeout_secs)
return request_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
validate_flags()
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
problem = registry.problem(FLAGS.problem)
hparams = tf.contrib.training.HParams(
data_dir=os.path.expanduser(FLAGS.data_dir))
problem.get_hparams(hparams)
request_fn = make_request_fn()
while True:
inputs = FLAGS.inputs_once if FLAGS.inputs_once else input(">> ")
outputs = serving_utils.predict([inputs], problem, request_fn)
outputs, = outputs
output, score = outputs
print_str = """
Input:
{inputs}
Output (Score {score:.3f}):
{output}
"""
print(print_str.format(inputs=inputs, output=output, score=score))
if FLAGS.inputs_once:
break
if __name__ == "__main__":
flags.mark_flags_as_required(["problem", "data_dir"])
tf.app.run()
|
{
"content_hash": "be27f7dbcfe16fc39b7ed519f450e52b",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 83,
"avg_line_length": 32.59340659340659,
"alnum_prop": 0.7147673634524613,
"repo_name": "vthorsteinsson/tensor2tensor",
"id": "7d89d3c7e87390afca17bf2a0c53c75fdb6b6d61",
"size": "3571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/serving/query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "34646"
},
{
"name": "JavaScript",
"bytes": "78396"
},
{
"name": "Jupyter Notebook",
"bytes": "2423366"
},
{
"name": "Python",
"bytes": "3566836"
},
{
"name": "Shell",
"bytes": "7888"
}
],
"symlink_target": ""
}
|
import distribute_setup
import svm_specializer
distribute_setup.use_setuptools()
from setuptools import setup
import glob
setup(name="svm_specializer",
version=svm_specializer.__version__,
description="This is a SEJITS (selective embedded just-in-time specializer) for Support Vector Machines, built on the ASP framework.",
long_description="""
See http://www.armandofox.com/geek/home/sejits/ for more about SEJITS, including links to
publications. See http://github.com/egonina/svm/wiki for more about the SVM specializer.
""",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
author=u"Katya Gonina",
url="http://github.com/egonina/svm/wiki/",
author_email="egonina@eecs.berkeley.edu",
license = "BSD",
packages=["svm_specializer"],
install_requires=[
"asp",
"scikit_learn"
],
)
|
{
"content_hash": "81c1dd7e302c107cd2ec56b7f7dd945f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 140,
"avg_line_length": 34.55263157894737,
"alnum_prop": 0.6344249809596344,
"repo_name": "egonina/svm",
"id": "6f78b941fa9e4af9afa0240d9c255c2a296a0cad",
"size": "1413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "46345"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
with open("README.rst") as f:
readme = f.read()
setup(
name="flmakesetup",
version="0.0.8",
author="takahrio iwatani",
author_email="taka.05022002@gmail.com",
description="easy create setup.py",
long_description=readme,
url="https://github.com/float1251/flmakesetup",
py_modules=["flmakesetup"],
packages=find_packages(),
include_package_data=True,
entry_points={
"console_scripts":[
"flmakesetup = flmakesetup:main"
]
},
install_requires=["Jinja2>=2.7.3"],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
)
|
{
"content_hash": "e75a395900b7e6dc0071da059aafea89",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 51,
"avg_line_length": 26.06896551724138,
"alnum_prop": 0.6097883597883598,
"repo_name": "float1251/flmakesetup",
"id": "2ad278f52da07ef1afef2b1af75575d4a3d9e4c6",
"size": "756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3998"
}
],
"symlink_target": ""
}
|
import json
import pytz
from unittest import skipIf
from datetime import date, datetime
from django.test import TestCase
from myuw.dao.calendar import get_events
TRUMBA_PREFIX = 'http://www.trumba.com/calendar/5_current'
class TestCalendar(TestCase):
def setUp(self):
self.now = datetime(2013, 4, 15, 0, 0, 0, tzinfo=pytz.utc)
def test_far_future(self):
cal = {'far_future': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['future_active_cals']), 0)
self.assertEqual(len(event_response['events']), 0)
def test_past_events(self):
cal = {'past': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['future_active_cals']), 0)
self.assertEqual(len(event_response['events']), 0)
def test_future(self):
cal = {'future_1': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['future_active_cals']), 1)
self.assertEqual(len(event_response['events']), 0)
def test_future_two(self):
cal = {'future_1': None,
'future_2': None}
event_response = get_events(cal, self.now)
self.assertTrue(True)
self.assertEqual(len(event_response['future_active_cals']), 2)
self.assertEqual(len(event_response['events']), 0)
self.assertEqual(event_response['future_active_cals'][0]['count'], 1)
self.assertEqual(event_response['future_active_cals'][1]['count'], 2)
def test_current(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['future_active_cals']), 0)
self.assertEqual(len(event_response['events']), 5)
def test_event_url(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
url = "%s?%s" % (TRUMBA_PREFIX,
'trumbaEmbed=eventid%3D1107241160%26view%3Devent')
self.assertEqual(event_response['events'][0]['event_url'], url)
def test_date_sort(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertEqual(event_response['events'][0]['summary'],
'Multi Day Event')
self.assertEqual(event_response['events'][4]['summary'],
'Organic Chemistry Seminar: Prof. Matthew Becker4')
def test_active_cals(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertEqual(len(event_response['active_cals']), 1)
self.assertEqual(event_response['active_cals'][0]['url'],
TRUMBA_PREFIX)
self.assertEqual(event_response['active_cals'][0]['title'],
"Department of Five Events")
def test_all_day_1(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertTrue(event_response['events'][3]['is_all_day'])
def test_no_location(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertEqual(event_response['events'][3]['event_location'], "")
def test_all_day_2(self):
cal = {'5_current': None}
event_response = get_events(cal, self.now)
self.assertTrue(event_response['events'][3]['is_all_day'])
self.assertFalse(event_response['events'][2]['is_all_day'])
self.assertIn('2013-04-18', event_response['events'][3]['end'])
|
{
"content_hash": "b3e644d9a955ba0abae50d19d51688be",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 77,
"avg_line_length": 40.06741573033708,
"alnum_prop": 0.606842400448682,
"repo_name": "uw-it-aca/myuw",
"id": "2d51987221d368bafc9a9ce2ef1d314bb1e9bd21",
"size": "3654",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "myuw/test/dao/test_calendar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1353"
},
{
"name": "Dockerfile",
"bytes": "1182"
},
{
"name": "HTML",
"bytes": "87842"
},
{
"name": "JavaScript",
"bytes": "362025"
},
{
"name": "Python",
"bytes": "1057335"
},
{
"name": "SCSS",
"bytes": "5763"
},
{
"name": "Shell",
"bytes": "838"
},
{
"name": "Vue",
"bytes": "522119"
}
],
"symlink_target": ""
}
|
""" Cisco_IOS_XR_infra_rmf_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR infra\-rmf package operational data.
This module contains definitions
for the following management objects\:
redundancy\: Redundancy show information
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Redundancy(object):
"""
Redundancy show information
.. attribute:: nodes
Location show information
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rmf_oper.Redundancy.Nodes>`
.. attribute:: summary
Redundancy Summary of Nodes
**type**\: :py:class:`Summary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rmf_oper.Redundancy.Summary>`
"""
_prefix = 'infra-rmf-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = Redundancy.Nodes()
self.nodes.parent = self
self.summary = Redundancy.Summary()
self.summary.parent = self
class Nodes(object):
"""
Location show information
.. attribute:: node
Redundancy Node Information
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rmf_oper.Redundancy.Nodes.Node>`
"""
_prefix = 'infra-rmf-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
Redundancy Node Information
.. attribute:: node_id <key>
Node Location
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: active_reboot_reason
Active node reload
**type**\: str
.. attribute:: err_log
Error Log
**type**\: str
.. attribute:: log
Reload and boot logs
**type**\: str
.. attribute:: redundancy
Row information
**type**\: :py:class:`Redundancy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rmf_oper.Redundancy.Nodes.Node.Redundancy>`
.. attribute:: standby_reboot_reason
Standby node reload
**type**\: str
"""
_prefix = 'infra-rmf-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_id = None
self.active_reboot_reason = None
self.err_log = None
self.log = None
self.redundancy = Redundancy.Nodes.Node.Redundancy()
self.redundancy.parent = self
self.standby_reboot_reason = None
class Redundancy(object):
"""
Row information
.. attribute:: active
Active node name R/S/I
**type**\: str
.. attribute:: groupinfo
groupinfo
**type**\: list of :py:class:`Groupinfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rmf_oper.Redundancy.Nodes.Node.Redundancy.Groupinfo>`
.. attribute:: ha_state
High Availability state Ready/Not Ready
**type**\: str
.. attribute:: nsr_state
NSR state Configured/Not Configured
**type**\: str
.. attribute:: standby
Standby node name R/S/I
**type**\: str
"""
_prefix = 'infra-rmf-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.active = None
self.groupinfo = YList()
self.groupinfo.parent = self
self.groupinfo.name = 'groupinfo'
self.ha_state = None
self.nsr_state = None
self.standby = None
class Groupinfo(object):
"""
groupinfo
.. attribute:: active
Active
**type**\: str
.. attribute:: ha_state
HAState
**type**\: str
.. attribute:: nsr_state
NSRState
**type**\: str
.. attribute:: standby
Standby
**type**\: str
"""
_prefix = 'infra-rmf-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.active = None
self.ha_state = None
self.nsr_state = None
self.standby = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-rmf-oper:groupinfo'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.active is not None:
return True
if self.ha_state is not None:
return True
if self.nsr_state is not None:
return True
if self.standby is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_rmf_oper as meta
return meta._meta_table['Redundancy.Nodes.Node.Redundancy.Groupinfo']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-rmf-oper:redundancy'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.active is not None:
return True
if self.groupinfo is not None:
for child_ref in self.groupinfo:
if child_ref._has_data():
return True
if self.ha_state is not None:
return True
if self.nsr_state is not None:
return True
if self.standby is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_rmf_oper as meta
return meta._meta_table['Redundancy.Nodes.Node.Redundancy']['meta_info']
@property
def _common_path(self):
if self.node_id is None:
raise YPYModelError('Key property node_id is None')
return '/Cisco-IOS-XR-infra-rmf-oper:redundancy/Cisco-IOS-XR-infra-rmf-oper:nodes/Cisco-IOS-XR-infra-rmf-oper:node[Cisco-IOS-XR-infra-rmf-oper:node-id = ' + str(self.node_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node_id is not None:
return True
if self.active_reboot_reason is not None:
return True
if self.err_log is not None:
return True
if self.log is not None:
return True
if self.redundancy is not None and self.redundancy._has_data():
return True
if self.standby_reboot_reason is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_rmf_oper as meta
return meta._meta_table['Redundancy.Nodes.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-rmf-oper:redundancy/Cisco-IOS-XR-infra-rmf-oper:nodes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_rmf_oper as meta
return meta._meta_table['Redundancy.Nodes']['meta_info']
class Summary(object):
"""
Redundancy Summary of Nodes
.. attribute:: err_log
Error Log
**type**\: str
.. attribute:: red_pair
Redundancy Pair
**type**\: list of :py:class:`RedPair <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rmf_oper.Redundancy.Summary.RedPair>`
"""
_prefix = 'infra-rmf-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.err_log = None
self.red_pair = YList()
self.red_pair.parent = self
self.red_pair.name = 'red_pair'
class RedPair(object):
"""
Redundancy Pair
.. attribute:: active
Active node name R/S/I
**type**\: str
.. attribute:: groupinfo
groupinfo
**type**\: list of :py:class:`Groupinfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rmf_oper.Redundancy.Summary.RedPair.Groupinfo>`
.. attribute:: ha_state
High Availability state Ready/Not Ready
**type**\: str
.. attribute:: nsr_state
NSR state Configured/Not Configured
**type**\: str
.. attribute:: standby
Standby node name R/S/I
**type**\: str
"""
_prefix = 'infra-rmf-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.active = None
self.groupinfo = YList()
self.groupinfo.parent = self
self.groupinfo.name = 'groupinfo'
self.ha_state = None
self.nsr_state = None
self.standby = None
class Groupinfo(object):
"""
groupinfo
.. attribute:: active
Active
**type**\: str
.. attribute:: ha_state
HAState
**type**\: str
.. attribute:: nsr_state
NSRState
**type**\: str
.. attribute:: standby
Standby
**type**\: str
"""
_prefix = 'infra-rmf-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.active = None
self.ha_state = None
self.nsr_state = None
self.standby = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-rmf-oper:redundancy/Cisco-IOS-XR-infra-rmf-oper:summary/Cisco-IOS-XR-infra-rmf-oper:red-pair/Cisco-IOS-XR-infra-rmf-oper:groupinfo'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.active is not None:
return True
if self.ha_state is not None:
return True
if self.nsr_state is not None:
return True
if self.standby is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_rmf_oper as meta
return meta._meta_table['Redundancy.Summary.RedPair.Groupinfo']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-rmf-oper:redundancy/Cisco-IOS-XR-infra-rmf-oper:summary/Cisco-IOS-XR-infra-rmf-oper:red-pair'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.active is not None:
return True
if self.groupinfo is not None:
for child_ref in self.groupinfo:
if child_ref._has_data():
return True
if self.ha_state is not None:
return True
if self.nsr_state is not None:
return True
if self.standby is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_rmf_oper as meta
return meta._meta_table['Redundancy.Summary.RedPair']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-rmf-oper:redundancy/Cisco-IOS-XR-infra-rmf-oper:summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.err_log is not None:
return True
if self.red_pair is not None:
for child_ref in self.red_pair:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_rmf_oper as meta
return meta._meta_table['Redundancy.Summary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-rmf-oper:redundancy'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.nodes is not None and self.nodes._has_data():
return True
if self.summary is not None and self.summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_rmf_oper as meta
return meta._meta_table['Redundancy']['meta_info']
|
{
"content_hash": "f0659cb2fabd05e724bbce22e79fcf1f",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 196,
"avg_line_length": 30.457337883959045,
"alnum_prop": 0.4498543254146123,
"repo_name": "abhikeshav/ydk-py",
"id": "8602e1f4d1c8aad7924312dffbc67445927ac5d8",
"size": "17848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_infra_rmf_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117934"
}
],
"symlink_target": ""
}
|
import os
import demistomock as demisto
RETURN_ERROR_TARGET = 'DocumentationAutomation.return_error'
def test_get_yaml_obj(mocker):
from DocumentationAutomation import get_yaml_obj
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# sanity
file_path = os.path.join('test_data', 'ANYRUN_yml.txt')
mocker.patch.object(demisto, 'getFilePath',
return_value={'path': file_path})
data = get_yaml_obj('12345')
# error count should not change
assert return_error_mock.call_count == 0
# call_args last call with a tuple of args list and kwargs
assert data['commonfields']['id'] == 'ANYRUN'
# invalid yml
mocker.patch.object(demisto, 'getFilePath',
return_value={'path': os.path.join('test_data', 'not_yml_file.txt')})
get_yaml_obj('234')
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert err_msg == 'Failed to open integration file'
# no such file
mocker.patch.object(demisto, 'getFilePath', side_effect=ValueError('no such file'))
get_yaml_obj('234')
assert return_error_mock.call_count == 2
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert err_msg == 'Failed to open integration file'
def test_extract_command():
from DocumentationAutomation import extract_command
# no args
cmd, args = extract_command('!no-args-command')
assert cmd == '!no-args-command'
assert args == {}
# sanity
cmd, args = extract_command('!command ip=8.8.8.8')
expected = {'ip': '8.8.8.8'}
assert cmd == '!command'
assert len(expected) == len(args)
for k, v in expected.items():
assert args[k] == v
# edge cases
cmd, args = extract_command('!command SomeParam=8.8.8.8 dash-arg="args" special_chars="1qazxsw2 EW3- *3d" '
'backTick=`hello "hello" \'hello\'` triple_quotes="""this is a multi quotes"""')
expected = {
'SomeParam': '8.8.8.8',
'dash-arg': 'args',
'special_chars': '1qazxsw2 EW3- *3d',
'backTick': 'hello "hello" \'hello\'',
'triple_quotes': 'this is a multi quotes'
}
assert cmd == '!command'
assert len(expected) == len(args)
for k, v in expected.items():
assert args[k] == v
cmd, args = extract_command('!command SomeParam="""hello\nthis is multiline"""')
expected = {
'SomeParam': 'hello\nthis is multiline',
}
assert cmd == '!command'
assert len(expected) == len(args)
for k, v in expected.items():
assert args[k] == v
def test_generate_commands_section():
from DocumentationAutomation import generate_commands_section
yml_data = {
'script': {
'commands': [
{'deprecated': True,
'name': 'deprecated-cmd'},
{'deprecated': False,
'name': 'non-deprecated-cmd'}
]
}
}
section, errors = generate_commands_section(yml_data, {})
expected_section = [
'## Commands', '---',
'You can execute these commands from the Demisto CLI, as part of an automation, or in a playbook.',
'After you successfully execute a command, a DBot message appears in the War Room with the command details.',
'1. non-deprecated-cmd', '### 1. non-deprecated-cmd', '---', ' ', '##### Required Permissions',
'**FILL IN REQUIRED PERMISSIONS HERE**', '##### Base Command', '', '`non-deprecated-cmd`', '##### Input', '',
'There are no input arguments for this command.', '', '##### Context Output', '',
'There is no context output for this command.', '', '##### Command Example', '``` ```', '',
'##### Human Readable Output', '', '']
assert section == expected_section
def test_add_lines():
from DocumentationAutomation import add_lines
outputs = [
add_lines('this is some free text.'),
add_lines('1.this is numbered text.'),
add_lines('this is multi line\nwithout numbers'),
add_lines('1.this is multi line\n2.with numbers'),
add_lines('12.this is multi line\n1234.with large numbers'),
]
expected_values = [
['this is some free text.'],
['1.this is numbered text.'],
['this is multi line\nwithout numbers'],
['1.this is multi line', '2.with numbers'],
['12.this is multi line', '1234.with large numbers']
]
for expected, out in zip(expected_values, outputs):
assert out == expected
|
{
"content_hash": "9b122386076751573f3d4902224417ad",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 117,
"avg_line_length": 35.097744360902254,
"alnum_prop": 0.5974721508140531,
"repo_name": "VirusTotal/content",
"id": "3cd0b1b6151010d85a8b0157c32e9b0a650d26b7",
"size": "4668",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/DeprecatedContent/Scripts/DocumentationAutomation/DocumentationAutomation_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("advicer", "0011_auto_20150725_1213")]
operations = [
migrations.AlterField(
model_name="advice",
name="issues",
field=models.ManyToManyField(
to="advicer.Issue", verbose_name="Issues", blank=True
),
preserve_default=True,
)
]
|
{
"content_hash": "e8c6c4cf7215b4304e60b1f4714f517f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 69,
"avg_line_length": 25.41176470588235,
"alnum_prop": 0.5694444444444444,
"repo_name": "rwakulszowa/poradnia",
"id": "cf590ba31c44ff61e40dfba99edd2d1b435e34ae",
"size": "432",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "poradnia/advicer/migrations/0012_auto_20151124_1541.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "261351"
},
{
"name": "HTML",
"bytes": "154406"
},
{
"name": "JavaScript",
"bytes": "1083760"
},
{
"name": "Makefile",
"bytes": "263"
},
{
"name": "Python",
"bytes": "481049"
},
{
"name": "Shell",
"bytes": "320"
}
],
"symlink_target": ""
}
|
"""BenchlingAPI."""
from .__version__ import __authors__
from .__version__ import __homepage__
from .__version__ import __repo__
from .__version__ import __title__
from .__version__ import __version__
from benchlingapi.models import schema # must import schema before session
from benchlingapi.session import Session
|
{
"content_hash": "4a4424a653aba9e82c6494332478756c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 75,
"avg_line_length": 39.75,
"alnum_prop": 0.7138364779874213,
"repo_name": "klavinslab/benchling-api",
"id": "0fef73b020a0dcc1e72b0a9fbd3111f88fd36f7e",
"size": "318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchlingapi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "786"
},
{
"name": "CSS",
"bytes": "2390"
},
{
"name": "HTML",
"bytes": "1814"
},
{
"name": "Makefile",
"bytes": "2149"
},
{
"name": "Python",
"bytes": "109660"
}
],
"symlink_target": ""
}
|
import os
import setuptools
import textwrap
from ceilometer.openstack.common import setup as common_setup
requires = common_setup.parse_requirements(['tools/pip-requires'])
depend_links = common_setup.parse_dependency_links(['tools/pip-requires'])
project = 'ceilometer'
version = common_setup.get_version(project, '2013.1.3')
url_base = 'http://tarballs.openstack.org/ceilometer/ceilometer-%s.tar.gz'
def directories(target_dir):
return [dirpath
for dirpath, dirnames, filenames in os.walk(target_dir)]
setuptools.setup(
name='ceilometer',
version=version,
description='cloud computing metering',
author='OpenStack',
author_email='ceilometer@lists.launchpad.net',
url='https://launchpad.net/ceilometer',
download_url=url_base % version,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Setuptools Plugin',
'Environment :: OpenStack',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Monitoring',
],
packages=setuptools.find_packages(exclude=['bin',
'tests',
'tests.*',
'*.tests',
'nova_tests']),
cmdclass=common_setup.get_cmdclass(),
package_data={
"ceilometer":
directories("ceilometer/api/static")
+ directories("ceilometer/api/templates"),
},
include_package_data=True,
test_suite='nose.collector',
scripts=['bin/ceilometer-agent-compute',
'bin/ceilometer-agent-central',
'bin/ceilometer-api',
'bin/ceilometer-collector',
'bin/ceilometer-dbsync'],
py_modules=[],
install_requires=requires,
dependency_links=depend_links,
zip_safe=False,
entry_points=textwrap.dedent("""
[ceilometer.collector]
instance = ceilometer.compute.notifications:Instance
instance_flavor = ceilometer.compute.notifications:InstanceFlavor
instance_delete = ceilometer.compute.notifications:InstanceDelete
memory = ceilometer.compute.notifications:Memory
vcpus = ceilometer.compute.notifications:VCpus
disk_root_size = ceilometer.compute.notifications:RootDiskSize
disk_ephemeral_size = ceilometer.compute.notifications:EphemeralDiskSize
volume = ceilometer.volume.notifications:Volume
volume_size = ceilometer.volume.notifications:VolumeSize
image_crud = ceilometer.image.notifications:ImageCRUD
image = ceilometer.image.notifications:Image
image_size = ceilometer.image.notifications:ImageSize
image_download = ceilometer.image.notifications:ImageDownload
image_serve = ceilometer.image.notifications:ImageServe
network = ceilometer.network.notifications:Network
subnet = ceilometer.network.notifications:Subnet
port = ceilometer.network.notifications:Port
router = ceilometer.network.notifications:Router
floatingip = ceilometer.network.notifications:FloatingIP
[ceilometer.poll.compute]
diskio = ceilometer.compute.pollsters:DiskIOPollster
cpu = ceilometer.compute.pollsters:CPUPollster
net = ceilometer.compute.pollsters:NetPollster
instance = ceilometer.compute.pollsters:InstancePollster
[ceilometer.poll.central]
network_floatingip = ceilometer.network.floatingip:FloatingIPPollster
image = ceilometer.image.glance:ImagePollster
objectstore = ceilometer.objectstore.swift:SwiftPollster
kwapi = ceilometer.energy.kwapi:KwapiPollster
[ceilometer.storage]
log = ceilometer.storage.impl_log:LogStorage
mongodb = ceilometer.storage.impl_mongodb:MongoDBStorage
mysql = ceilometer.storage.impl_sqlalchemy:SQLAlchemyStorage
postgresql = ceilometer.storage.impl_sqlalchemy:SQLAlchemyStorage
sqlite = ceilometer.storage.impl_sqlalchemy:SQLAlchemyStorage
test = ceilometer.storage.impl_test:TestDBStorage
hbase = ceilometer.storage.impl_hbase:HBaseStorage
[ceilometer.compute.virt]
libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector
[ceilometer.transformer]
accumulator = ceilometer.transformer.accumulator:TransformerAccumulator
[ceilometer.publisher]
meter_publisher = ceilometer.publisher.meter_publish:MeterPublisher
[paste.filter_factory]
swift=ceilometer.objectstore.swift_middleware:filter_factory
"""),
)
|
{
"content_hash": "2175e5b92a8334f5960f553035cf2f43",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 76,
"avg_line_length": 36.56923076923077,
"alnum_prop": 0.6989903239377366,
"repo_name": "shootstar/ctest",
"id": "dad7f219f3da04c6923e0afc3b25cc5a542da637",
"size": "5453",
"binary": false,
"copies": "1",
"ref": "refs/heads/test",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "368517"
},
{
"name": "Python",
"bytes": "1012329"
}
],
"symlink_target": ""
}
|
import mysql.connector
import time
from insert_channel_table import get_channel_url_id_dict, get_channel_id_from_url
class YoutubeVideoCrawlerPipeline(object):
def __init__(self):
self.conn = mysql.connector.connect(user='taloscar', password='taloscar', database='taloscar', host='10.161.23.57')
self.cursor = self.conn.cursor()
def open_spider(self, spider):
pass
def close_spider(self, spider):
self.cursor.close()
self.conn.close()
def process_item(self, item, spider):
video_url = item['video_url']
channel_url = item['channel_url']
channel_url_with_id = get_channel_url_id_dict()
channel_id = get_channel_id_from_url(channel_url, channel_url_with_id)
if channel_id:
update_time = int(round(time.time() * 1000))
print update_time
try:
self.cursor.execute('select * from youtube_video where channel_id = "' + str(channel_id) +
'" and video_url = "' + video_url + '";')
row = self.cursor.fetchone()
if row:
print 'record existed'
else:
sql = 'insert into youtube_video (channel_id, video_url, update_time) values (%s, %s, %s)'
self.cursor.execute(sql, (channel_id, video_url, update_time))
self.conn.commit()
print 'record inserted'
except Exception as e:
print 'exception when mysql operation'
print e
return item
|
{
"content_hash": "0365b055a076af248dbd137a3074f052",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 123,
"avg_line_length": 35.8,
"alnum_prop": 0.5543140906269398,
"repo_name": "eric-huo/youtube_video_crawler",
"id": "162d099da0e377bd4ae1fb0310b44ad957d9cf04",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "youtube_video_crawler/youtube_video_crawler/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13482"
}
],
"symlink_target": ""
}
|
from celery import task
from celery.exceptions import SoftTimeLimitExceeded
from celery.utils.log import get_task_logger
from go_http import HttpApiSender
from go_http.send import HttpApiSender as sendHttpApiSender
import csv
from .models import Message, Subscription
from django.conf import settings
from django.db import IntegrityError, transaction, connection
from django.db.models import Max
from django.core.exceptions import ObjectDoesNotExist
import logging
standard_logger = logging.getLogger(__name__)
celery_logger = get_task_logger(__name__)
@task()
def ingest_csv(csv_data, message_set):
""" Expecting data in the following format:
message_id,en,safe,af,safe,zu,safe,xh,safe,ve,safe,tn,safe,ts,safe,ss,safe,st,safe,nso,safe,nr,safe
"""
records = csv.DictReader(csv_data)
for line in records:
for key in line:
# Ignore non-content keys and empty keys
if key not in ["message_id", "safe"] and line[key] != "":
try:
with transaction.atomic():
message = Message()
message.message_set = message_set
message.sequence_number = line["message_id"]
message.lang = key
message.content = line[key]
message.save()
except (IntegrityError, ValueError) as e:
message = None
# crappy CSV data
standard_logger.error(e)
@task()
def ensure_one_subscription():
"""
Fixes issues caused by upstream failures
that lead to users having multiple active subscriptions
Runs daily
"""
cursor = connection.cursor()
cursor.execute("UPDATE subscription_subscription SET active = False \
WHERE id NOT IN \
(SELECT MAX(id) as id FROM \
subscription_subscription GROUP BY to_addr)")
affected = cursor.rowcount
vumi_fire_metric.delay(
metric="subscription.duplicates", value=affected, agg="last")
return affected
@task()
def vumi_fire_metric(metric, value, agg, sender=None):
try:
if sender is None:
sender = HttpApiSender(
account_key=settings.VUMI_GO_ACCOUNT_KEY,
conversation_key=settings.VUMI_GO_CONVERSATION_KEY,
conversation_token=settings.VUMI_GO_ACCOUNT_TOKEN
)
sender.fire_metric(metric, value, agg=agg)
return sender
except SoftTimeLimitExceeded:
standard_logger.error(
'Soft time limit exceed processing metric fire to Vumi \
HTTP API via Celery',
exc_info=True)
@task()
def process_message_queue(schedule, sender=None):
# Get all active and incomplete subscriptions for schedule
subscriptions = Subscription.objects.filter(
schedule=schedule, active=True, completed=False,
process_status=0).all()
# Make a reusable session to Vumi
if sender is None:
sender = sendHttpApiSender(
account_key=settings.VUMI_GO_ACCOUNT_KEY,
conversation_key=settings.VUMI_GO_CONVERSATION_KEY,
conversation_token=settings.VUMI_GO_ACCOUNT_TOKEN
)
# sender = LoggingSender('go_http.test')
# Fire off message processor for each
for subscription in subscriptions:
subscription.process_status = 1 # In Proceses
subscription.save()
processes_message.delay(subscription.id, sender)
return subscriptions.count()
@task()
def processes_message(subscription_id, sender):
try:
# Get next message
try:
subscription = Subscription.objects.get(id=subscription_id)
message = Message.objects.get(
message_set=subscription.message_set, lang=subscription.lang,
sequence_number=subscription.next_sequence_number)
# Send messages
messages = message.content.split(
settings.SUBSCRIPTION_MULTIPART_BOUNDARY)
if len(messages) == 1:
if message.content == settings.SUBSCRIPTION_NOOP_KEYWORD:
response = []
else:
response = sender.send_text(
subscription.to_addr, message.content)
else:
response = []
for text in messages:
response.append(
sender.send_text(subscription.to_addr, text.strip()))
# Post process moving to next message, next set or finished
# Get set max
set_max = Message.objects.filter(
message_set=subscription.message_set
).aggregate(Max('sequence_number'))["sequence_number__max"]
# Compare user position to max
if subscription.next_sequence_number == set_max:
# Mark current as completed
subscription.completed = True
subscription.active = False
subscription.process_status = 2 # Completed
subscription.save()
# fire completed metric
vumi_fire_metric.delay(
metric="sum.%s_completed" %
(subscription.message_set.short_name, ),
value=1, agg="sum", sender=sender)
# If next set defined create new subscription
message_set = subscription.message_set
if message_set.next_set:
# clone existing minus PK as recommended in
# https://docs.djangoproject.com/en/1.6/topics/db/queries/#copying-model-instances
subscription.pk = None
subscription.process_status = 0 # Ready
subscription.active = True
subscription.completed = False
subscription.next_sequence_number = 1
subscription_new = subscription
subscription_new.message_set = message_set.next_set
subscription_new.schedule = (
message_set.next_set.default_schedule)
subscription_new.save()
else:
vumi_fire_metric.delay(
metric="sum.finished_messages",
value=1, agg="sum", sender=sender)
else:
# More in this set so interate by one
subscription.next_sequence_number = \
subscription.next_sequence_number + 1
subscription.process_status = 0 # Ready
subscription.save()
return response
except ObjectDoesNotExist:
subscription.process_status = -1 # Errored
subscription.save()
celery_logger.error('Missing subscription message', exc_info=True)
except SoftTimeLimitExceeded:
celery_logger.error(
'Soft time limit exceed processing message to Vumi \
HTTP API via Celery',
exc_info=True)
|
{
"content_hash": "a7829990896a679de334e63423181a07",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 103,
"avg_line_length": 40.822857142857146,
"alnum_prop": 0.5816069428891377,
"repo_name": "westerncapelabs/django-messaging-subscription",
"id": "9e599446c6c00b783655f1450c87a264d7e30b61",
"size": "7144",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "subscription/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1227"
},
{
"name": "Python",
"bytes": "58431"
}
],
"symlink_target": ""
}
|
"""
Utility functions and classes for classifiers.
"""
from __future__ import print_function, division
import math
#from nltk.util import Deprecated
import nltk.classify.util # for accuracy & log_likelihood
from nltk.util import LazyMap
######################################################################
#{ Helper Functions
######################################################################
# alternative name possibility: 'map_featurefunc()'?
# alternative name possibility: 'detect_features()'?
# alternative name possibility: 'map_featuredetect()'?
# or.. just have users use LazyMap directly?
def apply_features(feature_func, toks, labeled=None):
"""
Use the ``LazyMap`` class to construct a lazy list-like
object that is analogous to ``map(feature_func, toks)``. In
particular, if ``labeled=False``, then the returned list-like
object's values are equal to::
[feature_func(tok) for tok in toks]
If ``labeled=True``, then the returned list-like object's values
are equal to::
[(feature_func(tok), label) for (tok, label) in toks]
The primary purpose of this function is to avoid the memory
overhead involved in storing all the featuresets for every token
in a corpus. Instead, these featuresets are constructed lazily,
as-needed. The reduction in memory overhead can be especially
significant when the underlying list of tokens is itself lazy (as
is the case with many corpus readers).
:param feature_func: The function that will be applied to each
token. It should return a featureset -- i.e., a dict
mapping feature names to feature values.
:param toks: The list of tokens to which ``feature_func`` should be
applied. If ``labeled=True``, then the list elements will be
passed directly to ``feature_func()``. If ``labeled=False``,
then the list elements should be tuples ``(tok,label)``, and
``tok`` will be passed to ``feature_func()``.
:param labeled: If true, then ``toks`` contains labeled tokens --
i.e., tuples of the form ``(tok, label)``. (Default:
auto-detect based on types.)
"""
if labeled is None:
labeled = toks and isinstance(toks[0], (tuple, list))
if labeled:
def lazy_func(labeled_token):
return (feature_func(labeled_token[0]), labeled_token[1])
return LazyMap(lazy_func, toks)
else:
return LazyMap(feature_func, toks)
def attested_labels(tokens):
"""
:return: A list of all labels that are attested in the given list
of tokens.
:rtype: list of (immutable)
:param tokens: The list of classified tokens from which to extract
labels. A classified token has the form ``(token, label)``.
:type tokens: list
"""
return tuple(set(label for (tok, label) in tokens))
def log_likelihood(classifier, gold):
results = classifier.prob_classify_many([fs for (fs, l) in gold])
ll = [pdist.prob(l) for ((fs, l), pdist) in zip(gold, results)]
return math.log(sum(ll) / len(ll))
def accuracy(classifier, gold):
results = classifier.classify_many([fs for (fs, l) in gold])
correct = [l == r for ((fs, l), r) in zip(gold, results)]
if correct:
return sum(correct) / len(correct)
else:
return 0
class CutoffChecker(object):
"""
A helper class that implements cutoff checks based on number of
iterations and log likelihood.
Accuracy cutoffs are also implemented, but they're almost never
a good idea to use.
"""
def __init__(self, cutoffs):
self.cutoffs = cutoffs.copy()
if 'min_ll' in cutoffs:
cutoffs['min_ll'] = -abs(cutoffs['min_ll'])
if 'min_lldelta' in cutoffs:
cutoffs['min_lldelta'] = abs(cutoffs['min_lldelta'])
self.ll = None
self.acc = None
self.iter = 1
def check(self, classifier, train_toks):
cutoffs = self.cutoffs
self.iter += 1
if 'max_iter' in cutoffs and self.iter >= cutoffs['max_iter']:
return True # iteration cutoff.
new_ll = nltk.classify.util.log_likelihood(classifier, train_toks)
if math.isnan(new_ll):
return True
if 'min_ll' in cutoffs or 'min_lldelta' in cutoffs:
if 'min_ll' in cutoffs and new_ll >= cutoffs['min_ll']:
return True # log likelihood cutoff
if ('min_lldelta' in cutoffs and self.ll and
((new_ll - self.ll) <= abs(cutoffs['min_lldelta']))):
return True # log likelihood delta cutoff
self.ll = new_ll
if 'max_acc' in cutoffs or 'min_accdelta' in cutoffs:
new_acc = nltk.classify.util.log_likelihood(
classifier, train_toks)
if 'max_acc' in cutoffs and new_acc >= cutoffs['max_acc']:
return True # log likelihood cutoff
if ('min_accdelta' in cutoffs and self.acc and
((new_acc - self.acc) <= abs(cutoffs['min_accdelta']))):
return True # log likelihood delta cutoff
self.acc = new_acc
return False # no cutoff reached.
######################################################################
#{ Demos
######################################################################
def names_demo_features(name):
features = {}
features['alwayson'] = True
features['startswith'] = name[0].lower()
features['endswith'] = name[-1].lower()
for letter in 'abcdefghijklmnopqrstuvwxyz':
features['count(%s)' % letter] = name.lower().count(letter)
features['has(%s)' % letter] = letter in name.lower()
return features
def binary_names_demo_features(name):
features = {}
features['alwayson'] = True
features['startswith(vowel)'] = name[0].lower() in 'aeiouy'
features['endswith(vowel)'] = name[-1].lower() in 'aeiouy'
for letter in 'abcdefghijklmnopqrstuvwxyz':
features['count(%s)' % letter] = name.lower().count(letter)
features['has(%s)' % letter] = letter in name.lower()
features['startswith(%s)' % letter] = (letter == name[0].lower())
features['endswith(%s)' % letter] = (letter == name[-1].lower())
return features
def names_demo(trainer, features=names_demo_features):
from nltk.corpus import names
import random
# Construct a list of classified names, using the names corpus.
namelist = ([(name, 'male') for name in names.words('male.txt')] +
[(name, 'female') for name in names.words('female.txt')])
# Randomly split the names into a test & train set.
random.seed(123456)
random.shuffle(namelist)
train = namelist[:5000]
test = namelist[5000:5500]
# Train up a classifier.
print('Training classifier...')
classifier = trainer( [(features(n), g) for (n, g) in train] )
# Run the classifier on the test data.
print('Testing classifier...')
acc = accuracy(classifier, [(features(n), g) for (n, g) in test])
print('Accuracy: %6.4f' % acc)
# For classifiers that can find probabilities, show the log
# likelihood and some sample probability distributions.
try:
test_featuresets = [features(n) for (n, g) in test]
pdists = classifier.prob_classify_many(test_featuresets)
ll = [pdist.logprob(gold)
for ((name, gold), pdist) in zip(test, pdists)]
print('Avg. log likelihood: %6.4f' % (sum(ll) / len(test)))
print()
print('Unseen Names P(Male) P(Female)\n'+'-'*40)
for ((name, gender), pdist) in list(zip(test, pdists))[:5]:
if gender == 'male':
fmt = ' %-15s *%6.4f %6.4f'
else:
fmt = ' %-15s %6.4f *%6.4f'
print(fmt % (name, pdist.prob('male'), pdist.prob('female')))
except NotImplementedError:
pass
# Return the classifier
return classifier
def partial_names_demo(trainer, features=names_demo_features):
from nltk.corpus import names
import random
male_names = names.words('male.txt')
female_names = names.words('female.txt')
random.seed(654321)
random.shuffle(male_names)
random.shuffle(female_names)
# Create a list of male names to be used as positive-labeled examples for training
positive = map(features, male_names[:2000])
# Create a list of male and female names to be used as unlabeled examples
unlabeled = map(features, male_names[2000:2500] + female_names[:500])
# Create a test set with correctly-labeled male and female names
test = [(name, True) for name in male_names[2500:2750]] \
+ [(name, False) for name in female_names[500:750]]
random.shuffle(test)
# Train up a classifier.
print('Training classifier...')
classifier = trainer(positive, unlabeled)
# Run the classifier on the test data.
print('Testing classifier...')
acc = accuracy(classifier, [(features(n), m) for (n, m) in test])
print('Accuracy: %6.4f' % acc)
# For classifiers that can find probabilities, show the log
# likelihood and some sample probability distributions.
try:
test_featuresets = [features(n) for (n, m) in test]
pdists = classifier.prob_classify_many(test_featuresets)
ll = [pdist.logprob(gold)
for ((name, gold), pdist) in zip(test, pdists)]
print('Avg. log likelihood: %6.4f' % (sum(ll) / len(test)))
print()
print('Unseen Names P(Male) P(Female)\n'+'-'*40)
for ((name, is_male), pdist) in zip(test, pdists)[:5]:
if is_male == True:
fmt = ' %-15s *%6.4f %6.4f'
else:
fmt = ' %-15s %6.4f *%6.4f'
print(fmt % (name, pdist.prob(True), pdist.prob(False)))
except NotImplementedError:
pass
# Return the classifier
return classifier
_inst_cache = {}
def wsd_demo(trainer, word, features, n=1000):
from nltk.corpus import senseval
import random
# Get the instances.
print('Reading data...')
global _inst_cache
if word not in _inst_cache:
_inst_cache[word] = [(i, i.senses[0]) for i in senseval.instances(word)]
instances = _inst_cache[word][:]
if n > len(instances):
n = len(instances)
senses = list(set(l for (i, l) in instances))
print(' Senses: ' + ' '.join(senses))
# Randomly split the names into a test & train set.
print('Splitting into test & train...')
random.seed(123456)
random.shuffle(instances)
train = instances[:int(.8*n)]
test = instances[int(.8*n):n]
# Train up a classifier.
print('Training classifier...')
classifier = trainer([(features(i), l) for (i, l) in train])
# Run the classifier on the test data.
print('Testing classifier...')
acc = accuracy(classifier, [(features(i), l) for (i, l) in test])
print('Accuracy: %6.4f' % acc)
# For classifiers that can find probabilities, show the log
# likelihood and some sample probability distributions.
try:
test_featuresets = [features(i) for (i, n) in test]
pdists = classifier.prob_classify_many(test_featuresets)
ll = [pdist.logprob(gold)
for ((name, gold), pdist) in zip(test, pdists)]
print('Avg. log likelihood: %6.4f' % (sum(ll) / len(test)))
except NotImplementedError:
pass
# Return the classifier
return classifier
|
{
"content_hash": "b2750f68a3c025a9ae1449ad1f02d28b",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 86,
"avg_line_length": 37.76315789473684,
"alnum_prop": 0.6040940766550522,
"repo_name": "sfrenza/test-for-bot",
"id": "47419483ec6da8a1ef5a5304775535290ef1151e",
"size": "11755",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/nltk/classify/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1248"
},
{
"name": "C",
"bytes": "491365"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "HTML",
"bytes": "1431"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "11360695"
},
{
"name": "Tcl",
"bytes": "1237789"
}
],
"symlink_target": ""
}
|
"""
The photo module contains the :class:`Photo` class, which is used to track
image objects (JPG, DNG, etc.).
.. moduleauthor:: Jaisen Mathai <jaisen@jmathai.com>
"""
import imghdr
import os
import pyexiv2
import re
import subprocess
import time
from elodie import constants
from media import Media
from elodie import geolocation
class Photo(Media):
"""A photo object.
:param str source: The fully qualified path to the photo file
"""
__name__ = 'Photo'
#: Valid extensions for photo files.
extensions = ('jpg', 'jpeg', 'nef', 'dng', 'gif')
def __init__(self, source=None):
super(Photo, self).__init__(source)
# We only want to parse EXIF once so we store it here
self.exif = None
def get_duration(self):
"""Get the duration of a photo in seconds. Uses ffmpeg/ffprobe.
:returns: str or None for a non-photo file
"""
if(not self.is_valid()):
return None
source = self.source
result = subprocess.Popen(
['ffprobe', source],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
for key in result.stdout.readlines():
if 'Duration' in key:
return re.search(
'(\d{2}:\d{2}.\d{2})',
key
).group(1).replace('.', ':')
return None
def get_coordinate(self, type='latitude'):
"""Get latitude or longitude of photo from EXIF
:param str type: Type of coordinate to get. Either "latitude" or
"longitude".
:returns: float or None if not present in EXIF or a non-photo file
"""
if(not self.is_valid()):
return None
key = self.exif_map[type]
exif = self.get_exif()
if(key not in exif):
return None
try:
# this is a hack to get the proper direction by negating the
# values for S and W
coords = exif[key].value
return geolocation.dms_to_decimal(
*coords,
direction=exif[self.exif_map[self.d_coordinates[type]]].value
)
except KeyError:
return None
def get_date_taken(self):
"""Get the date which the photo was taken.
The date value returned is defined by the min() of mtime and ctime.
:returns: time object or None for non-photo files or 0 timestamp
"""
if(not self.is_valid()):
return None
source = self.source
seconds_since_epoch = min(os.path.getmtime(source), os.path.getctime(source)) # noqa
# We need to parse a string from EXIF into a timestamp.
# EXIF DateTimeOriginal and EXIF DateTime are both stored
# in %Y:%m:%d %H:%M:%S format
# we use date.strptime -> .timetuple -> time.mktime to do
# the conversion in the local timezone
# EXIF DateTime is already stored as a timestamp
# Sourced from https://github.com/photo/frontend/blob/master/src/libraries/models/Photo.php#L500 # noqa
exif = self.get_exif()
for key in self.exif_map['date_taken']:
try:
if(key in exif):
if(re.match('\d{4}(-|:)\d{2}(-|:)\d{2}', str(exif[key].value)) is not None): # noqa
seconds_since_epoch = time.mktime(exif[key].value.timetuple()) # noqa
break
except BaseException as e:
if(constants.debug is True):
print e
pass
if(seconds_since_epoch == 0):
return None
return time.gmtime(seconds_since_epoch)
def is_valid(self):
"""Check the file extension against valid file extensions.
The list of valid file extensions come from self.extensions. This
also checks whether the file is an image.
:returns: bool
"""
source = self.source
# gh-4 This checks if the source file is an image.
# It doesn't validate against the list of supported types.
if(imghdr.what(source) is None):
return False
return os.path.splitext(source)[1][1:].lower() in self.extensions
def set_date_taken(self, time):
"""Set the date/time a photo was taken.
:param datetime time: datetime object of when the photo was taken
:returns: bool
"""
if(time is None):
return False
source = self.source
exif_metadata = pyexiv2.ImageMetadata(source)
exif_metadata.read()
# Writing exif with pyexiv2 differs if the key already exists so we
# handle both cases here.
for key in ['Exif.Photo.DateTimeOriginal', 'Exif.Image.DateTime']:
if(key in exif_metadata):
exif_metadata[key].value = time
else:
exif_metadata[key] = pyexiv2.ExifTag(key, time)
exif_metadata.write()
return True
def set_location(self, latitude, longitude):
"""Set latitude and longitude for a photo.
:param float latitude: Latitude of the file
:param float longitude: Longitude of the file
:returns: bool
"""
if(latitude is None or longitude is None):
return False
source = self.source
exif_metadata = pyexiv2.ImageMetadata(source)
exif_metadata.read()
exif_metadata['Exif.GPSInfo.GPSLatitude'] = geolocation.decimal_to_dms(latitude, False) # noqa
exif_metadata['Exif.GPSInfo.GPSLatitudeRef'] = pyexiv2.ExifTag('Exif.GPSInfo.GPSLatitudeRef', 'N' if latitude >= 0 else 'S') # noqa
exif_metadata['Exif.GPSInfo.GPSLongitude'] = geolocation.decimal_to_dms(longitude, False) # noqa
exif_metadata['Exif.GPSInfo.GPSLongitudeRef'] = pyexiv2.ExifTag('Exif.GPSInfo.GPSLongitudeRef', 'E' if longitude >= 0 else 'W') # noqa
exif_metadata.write()
return True
def set_title(self, title):
"""Set title for a photo.
:param str title: Title of the photo.
:returns: bool
"""
if(title is None):
return False
source = self.source
exif_metadata = pyexiv2.ImageMetadata(source)
exif_metadata.read()
exif_metadata['Xmp.dc.title'] = title
exif_metadata.write()
return True
|
{
"content_hash": "5007b38ad3cf15dfbf6614337fcf6930",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 143,
"avg_line_length": 31.655172413793103,
"alnum_prop": 0.5796763149704326,
"repo_name": "zserg/elodie",
"id": "bd6ddb29ee6dd315a60a3f619926a2aa67449b1d",
"size": "6426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elodie/media/photo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16750"
},
{
"name": "HTML",
"bytes": "8996"
},
{
"name": "JavaScript",
"bytes": "17406"
},
{
"name": "Python",
"bytes": "106443"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
from weakref import WeakValueDictionary
import wx
import wx.lib.newevent
from atom.api import Int, Typed
from enaml.widgets.action import ProxyAction
from .wx_toolkit_object import WxToolkitObject
#: An event emitted when a wxAction has been triggered by the user. The
#: payload of the event will have an 'IsChecked' attribute.
wxActionTriggeredEvent, EVT_ACTION_TRIGGERED = wx.lib.newevent.NewEvent()
#: An event emitted by a wxAction when it has been toggled by the user.
#: The payload of the event will have an 'IsChecked' attribute.
wxActionToggledEvent, EVT_ACTION_TOGGLED = wx.lib.newevent.NewEvent()
#: An event emitted by a wxAction when its state has been changed.
wxActionChangedEvent, EVT_ACTION_CHANGED = wx.lib.newevent.NewEvent()
class wxAction(wx.EvtHandler):
""" A wx.EvtHandler which behaves similar to a QAction.
"""
#: Class storage which maps action id -> action instance.
_action_map = WeakValueDictionary()
@classmethod
def FindById(cls, action_id):
""" Find a wxAction instance using the given action id.
Parameters
----------
action_id : int
The id for the action.
Returns
-------
result : wxAction or None
The wxAction instance for the given id, or None if not
action exists for that id.
"""
return cls._action_map.get(action_id)
def __init__(self, parent=None):
""" Initialize a wxAction.
Parameters
----------
parent : object or None
The parent for this wxAction. The parent is not directly
used by the action, but is provided as a convenience for
other parts of the framework.
"""
super(wxAction, self).__init__()
self._parent = parent
self._text = u''
self._tool_tip = u''
self._status_tip = u''
self._checkable = False
self._checked = False
self._enabled = True
self._visible = True
self._group_enabled = True
self._group_visible = True
self._separator = False
self._batch = False
self._id = wx.NewId()
self._action_map[self._id] = self
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _EmitChanged(self):
""" Emits the EVT_ACTION_CHANGED event if not in batch mode.
"""
if not self._batch:
event = wxActionChangedEvent()
event.SetEventObject(self)
wx.PostEvent(self, event)
def _SetGroupEnabled(self, enabled):
""" A private method called by an owner action group.
Parameters
----------
enabled : bool
Whether or not the owner group is enabled.
"""
if self._group_enabled != enabled:
old = self.IsEnabled()
self._group_enabled = enabled
new = self.IsEnabled()
if old != new:
self._EmitChanged()
def _SetGroupVisible(self, visible):
""" A private method called by an owner action group.
Parameters
----------
visible : bool
Whether or not the owner group is visble.
"""
if self._group_visible != visible:
old = self.IsVisible()
self._group_visible = visible
new = self.IsVisible()
if old != new:
self._EmitChanged()
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def GetParent(self):
""" Get the parent of the action.
Returns
-------
result : object or None
The parent of this action or None.
"""
return self._parent
def SetParent(self, parent):
""" Set the parent of the action.
Parameters
----------
parent : object or None
The object to use as the parent of this action.
"""
self._parent = parent
def Trigger(self):
""" A method called by the action owner when the user triggers
the action.
This handler will emit the custom EVT_ACTION_TRIGGERED event.
User code should not typically call this method directly.
"""
# This event is dispatched immediately in order to preserve
# the order of event firing for trigger/toggle.
event = wxActionTriggeredEvent(IsChecked=self._checked)
event.SetEventObject(self)
wx.PostEvent(self, event)
def BeginBatch(self):
""" Enter batch update mode for the action.
"""
self._batch = True
def EndBatch(self, emit=True):
""" Exit batch update mode for the action.
Parameters
----------
emit : bool, optional
If True, emit a changed event after leaving batch mode. The
default is True.
"""
self._batch = False
if emit:
self._EmitChanged()
def GetId(self):
""" Get the unique wx id for this action.
Returns
-------
result : int
The wx id number for this action.
"""
return self._id
def GetText(self):
""" Get the text for the action.
Returns
-------
result : unicode
The unicode text for the action.
"""
return self._text
def SetText(self, text):
""" Set the text for the action.
Parameters
----------
text : unicode
The unicode text for the action.
"""
if self._text != text:
self._text = text
self._EmitChanged()
def GetToolTip(self):
""" Get the tool tip for the action.
Returns
-------
result : unicode
The unicode tool tip for the action.
"""
return self._tool_tip
def SetToolTip(self, tool_tip):
""" Set the tool tip for the action.
Parameters
----------
tool_tip : unicode
The unicode tool tip for the action.
"""
if self._tool_tip != tool_tip:
self._tool_tip = tool_tip
self._EmitChanged()
def GetStatusTip(self):
""" Get the status tip for the action.
Returns
-------
result : unicode
The unicode status tip for the action.
"""
return self._status_tip
def SetStatusTip(self, status_tip):
""" Set the status tip for the action.
Parameters
----------
status_tip : unicode
The unicode status tip for the action.
"""
if self._status_tip != status_tip:
self._status_tip = status_tip
self._EmitChanged()
def IsCheckable(self):
""" Get whether or not the action is checkable.
Returns
-------
result : bool
Whether or not the action is checkable.
"""
return self._checkable
def SetCheckable(self, checkable):
""" Set whether or not the action is checkable.
Parameters
----------
checkable : bool
Whether or not the action is checkable.
"""
if self._checkable != checkable:
self._checkable = checkable
self._EmitChanged()
def IsChecked(self):
""" Get whether or not the action is checked.
Returns
-------
result : bool
Whether or not the action is checked.
"""
return self._checked
def SetChecked(self, checked):
""" Set whether or not the action is checked.
Parameters
----------
checked : bool
Whether or not the action is checked.
"""
if self._checked != checked:
self._checked = checked
self._EmitChanged()
event = wxActionToggledEvent(IsChecked=checked)
event.SetEventObject(self)
wx.PostEvent(self, event)
def IsEnabled(self):
""" Get whether or not the action is enabled.
Returns
-------
result : bool
Whether or not the action is enabled.
"""
if self._group_enabled:
return self._enabled
return False
def SetEnabled(self, enabled):
""" Set whether or not the action is enabled.
Parameters
----------
enabled : bool
Whether or not the action is enabled.
"""
if self._enabled != enabled:
self._enabled = enabled
if self._group_enabled:
self._EmitChanged()
def IsVisible(self):
""" Get whether or not the action is visible.
Returns
-------
result : bool
Whether or not the action is visible.
"""
if self._group_visible:
return self._visible
return False
def SetVisible(self, visible):
""" Set whether or not the action is visible.
Parameters
----------
visible : bool
Whether or not the action is visible.
"""
if self._visible != visible:
self._visible = visible
if self._group_visible:
self._EmitChanged()
def IsSeparator(self):
""" Get whether or not the action is a separator.
Returns
-------
result : bool
Whether or not the action is a separator.
"""
return self._separator
def SetSeparator(self, separator):
""" Set whether or not the action is a separator.
Parameters
----------
separator : bool
Whether or not the action is a separator.
"""
if self._separator != separator:
self._separator = separator
self._EmitChanged()
# cyclic notification guard flags
CHECKED_GUARD = 0x1
class WxAction(WxToolkitObject, ProxyAction):
""" A Wx implementation of an Enaml ProxyAction.
"""
#: A reference to the widget created by the proxy.
widget = Typed(wxAction)
#: Cyclic notification guard. This a bitfield of multiple guards.
_guard = Int(0)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying wxAction object.
"""
self.widget = wxAction(self.parent_widget())
def init_widget(self):
""" Create and initialize the underlying control.
"""
super(WxAction, self).init_widget()
d = self.declaration
widget = self.widget
widget.BeginBatch()
if d.text:
self.set_text(d.text)
if d.tool_tip:
self.set_tool_tip(d.tool_tip)
if d.status_tip:
self.set_status_tip(d.status_tip)
if d.icon:
self.set_icon(d.icon)
self.set_checkable(d.checkable)
self.set_checked(d.checked)
self.set_enabled(d.enabled)
self.set_visible(d.visible)
self.set_separator(d.separator)
widget.EndBatch(emit=False)
widget.Bind(EVT_ACTION_TRIGGERED, self.on_triggered)
widget.Bind(EVT_ACTION_TOGGLED, self.on_toggled)
#--------------------------------------------------------------------------
# Event Handlers
#--------------------------------------------------------------------------
def on_triggered(self, event):
""" The event handler for the EVT_ACTION_TRIGGERED event.
"""
if not self._guard & CHECKED_GUARD:
checked = event.IsChecked
self.declaration.checked = checked
self.declaration.triggered(checked)
def on_toggled(self, event):
""" The event handler for the EVT_ACTION_TOGGLED event.
"""
if not self._guard & CHECKED_GUARD:
checked = event.IsChecked
self.declaration.checked = checked
self.declaration.toggled(checked)
#--------------------------------------------------------------------------
# ProxyAction API
#--------------------------------------------------------------------------
def set_text(self, text):
""" Set the text on the underlying control.
"""
self.widget.SetText(text)
def set_tool_tip(self, tool_tip):
""" Set the tool tip on the underlying control.
"""
self.widget.SetToolTip(tool_tip)
def set_status_tip(self, status_tip):
""" Set the status tip on the underyling control.
"""
self.widget.SetStatusTip(status_tip)
def set_icon(self, icon):
""" Set the icon for the action.
This is not supported on Wx.
"""
pass
def set_checkable(self, checkable):
""" Set the checkable state on the underlying control.
"""
self.widget.SetCheckable(checkable)
def set_checked(self, checked):
""" Set the checked state on the underlying control.
"""
self._guard |= CHECKED_GUARD
try:
self.widget.SetChecked(checked)
finally:
self._guard &= ~CHECKED_GUARD
def set_enabled(self, enabled):
""" Set the enabled state on the underlying control.
"""
self.widget.SetEnabled(enabled)
def set_visible(self, visible):
""" Set the visible state on the underlying control.
"""
self.widget.SetVisible(visible)
def set_separator(self, separator):
""" Set the separator state on the underlying control.
"""
self.widget.SetSeparator(separator)
|
{
"content_hash": "05acf17de699e0cf8fa1981a0fe4b567",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 79,
"avg_line_length": 26.803059273422562,
"alnum_prop": 0.5226137822799258,
"repo_name": "ContinuumIO/ashiba",
"id": "6e206d7b5f203a1827464060a3421e92c736e59b",
"size": "14368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enaml/enaml/wx/wx_action.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4560"
},
{
"name": "C",
"bytes": "738"
},
{
"name": "C++",
"bytes": "77464"
},
{
"name": "CSS",
"bytes": "2286"
},
{
"name": "Emacs Lisp",
"bytes": "1210"
},
{
"name": "HTML",
"bytes": "4891"
},
{
"name": "JavaScript",
"bytes": "17243"
},
{
"name": "Makefile",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "3241535"
},
{
"name": "Shell",
"bytes": "119"
},
{
"name": "VimL",
"bytes": "1821"
}
],
"symlink_target": ""
}
|
""" Friendly Dates and Times """
__version__ = '0.4.0'
# Disable pylint's invalid name warning. 'tz' is used in a few places
# and it should be the only thing causing pylint to include the warning.
# pylint: disable-msg=C0103
import calendar
import datetime
import locale
import os
import random
import pytz
# Some functions may take a parameter to designate a return value in UTC
# instead of local time. This will be used to force them to return UTC
# regardless of the paramter's value.
_FORCE_UTC = False
class _FormatsMetaClass(type):
"""Allows the formats class to be treated as an iterable.
It is important to understand how this class works.
``hasattr(formats, 'DATE')`` is true. ``'DATE' in formats` is false.
``hasattr(formats, 'D_FMT')`` is false. ``'D_FMT' in formats` is
true.
This is made possible through the ``__contains__`` and
``__getitem__`` methods. ``__getitem__`` checks for the name of the
attribute within the ``formats`` class. ``__contains__``, on the
other hand, checks for the specified value assigned to an attribute
of the class.
"""
DATE = 'D_FMT'
DATETIME = 'D_T_FMT'
TIME = 'T_FMT'
TIME_AMPM = 'T_FMT_AMPM'
def __contains__(self, value):
index = 0
for attr in dir(_FormatsMetaClass):
if (not attr.startswith('__')
and attr != 'mro'
and getattr(_FormatsMetaClass, attr) == value):
index = attr
break
return index
def __getitem__(self, attr):
return getattr(_FormatsMetaClass, attr)
def __iter__(self):
for attr in dir(_FormatsMetaClass):
if not attr.startswith('__') and attr != 'mro':
yield attr
formats = _FormatsMetaClass('formats', (object,), {})
formats.__doc__ = """A set of predefined datetime formats.
.. versionadded:: 0.3.0
"""
def _add_time(value, years=0, months=0, weeks=0, days=0, hours=0, minutes=0,
seconds=0, milliseconds=0, microseconds=0):
"""Adds units of time to a datetime.
This function creates a :class:`~datetime.timedelta` instance from
the parameters passed info it and adds it to ``value``. The
parameters not supported by :class:`~datetime.timedelta`--``months``
and ``years``--are then applied to ``value``.
:param value: The original datetime.
:type value: datetime.datetime, datetime.date, datetime.time.
:param years: The number of years to add to ``value``.
:type years: int.
:param months: The number of months to add to ``value``.
:type months: int.
:param weeks: The number of weeks to add to ``value``.
:type weeks: int.
:param days: The number of days to add to ``value``.
:type days: int.
:param hours: The number of hours to add to ``value``.
:type hours: int.
:param minutes: The number of minutes to add to ``value``.
:type minutes: int.
:param seconds: The number of seconds to add to ``value``.
:type seconds: int.
:param milliseconds: The number of milliseconds to add to ``value``.
:type milliseconds: int.
:param microseconds: The number of microseconds to add to ``value``.
:type microseconds: int.
:returns: str -- the adjusted datetime.
:raises: TypeError
"""
if not _is_date_type(value):
message = "'{0}' object is not a valid date or time."
raise TypeError(message.format(type(value).__name__))
# If any of the standard timedelta values are used, use timedelta
# for them.
if seconds or minutes or hours or days or weeks:
delta = datetime.timedelta(weeks=weeks, days=days, hours=hours,
minutes=minutes, seconds=seconds,
milliseconds=milliseconds,
microseconds=microseconds)
value += delta
# Months are tricky. If the current month plus the requested number
# of months is greater than 12 (or less than 1), we'll get a
# ValueError. After figuring out the number of years and months from
# the number of months, shift the values so that we get a valid
# month.
if months:
more_years, months = divmod(months, 12)
years += more_years
if not (1 <= months + value.month <= 12):
more_years, months = divmod(months + value.month, 12)
months -= value.month
years += more_years
if months or years:
year = value.year + years
month = value.month + months
# When converting from a day in amonth that doesn't exist in the
# ending month, a ValueError will be raised. What follows is an
# ugly, ugly hack to get around this.
try:
value = value.replace(year=year, month=month)
except ValueError:
# When the day in the origin month isn't in the destination
# month, the total number of days in the destination month
# is needed. calendar.mdays would be a nice way to do this
# except it doesn't account for leap years at all; February
# always has 28 days.
_, destination_days = calendar.monthrange(year, month)
# I am reluctantly writing this comment as I fear putting
# the craziness of the hack into writing, but I don't want
# to forget what I was doing here so I can fix it later.
#
# The new day will either be 1, 2, or 3. It will be
# determined by the difference in days between the day value
# of the datetime being altered and the number of days in
# the destination month. After that, month needs to be
# incremented. If that puts the new date into January (the
# value will be 13), year will also need to be incremented
# (with month being switched to 1).
#
# Once all of that has been figured out, a simple replace
# will do the trick.
day = value.day - destination_days
month += 1
if month > 12:
month = 1
year += 1
value = value.replace(year=year, month=month, day=day)
return value
def _is_date_type(value):
# Acceptible types must be or extend:
# datetime.date
# datetime.time
return isinstance(value, (datetime.date, datetime.time))
def all_timezones():
"""Get a list of all time zones.
This is a wrapper for ``pytz.all_timezones``.
:returns: list -- all time zones.
"""
return pytz.all_timezones
def all_timezones_set():
"""Get a set of all time zones.
This is a wrapper for ``pytz.all_timezones_set``.
:returns: set -- all time zones.
"""
return pytz.all_timezones_set
def common_timezones():
"""Get a list of common time zones.
This is a wrapper for ``pytz.common_timezones``.
:returns: list -- common time zones.
"""
return pytz.common_timezones
def common_timezones_set():
"""Get a set of common time zones.
This is a wrapper for ``pytz.common_timezones_set``.
:returns: set -- common time zones.
"""
return pytz.common_timezones_set
def ever():
"""Get a random datetime.
Instead of using ``datetime.MINYEAR`` and ``datetime.MAXYEAR`` as
the bounds, the current year +/- 100 is used. The thought behind
this is that years that are too extreme will not be as useful.
:returns: datetime.datetime -- a random datetime.
.. versionadded:: 0.3.0
"""
# Get the year bounds
min_year = max(datetime.MINYEAR, today().year - 100)
max_year = min(datetime.MAXYEAR, today().year + 100)
# Get the random values
year = random.randint(min_year, max_year)
month = random.randint(1, 12)
day = random.randint(1, calendar.mdays[month])
hour = random.randint(0, 23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
microsecond = random.randint(0, 1000000)
return datetime.datetime(year=year, month=month, day=day, hour=hour,
minute=minute, second=second,
microsecond=microsecond)
def format(value, format_string):
"""Get a formatted version of a datetime.
This is a wrapper for ``strftime()``. The full list of directives
that can be used can be found at
http://docs.python.org/library/datetime.html#strftime-strptime-behavior.
Predefined formats are exposed through ``when.formats``:
.. data:: when.formats.DATE
Date in locale-based format.
.. data:: when.formats.DATETIME
Date and time in locale-based format.
.. data:: when.formats.TIME
Time in locale-based format.
.. data:: when.formats.TIME_AMPM
12-hour time in locale-based format.
:param value: A datetime object.
:type value: datetime.datetime, datetime.date, datetime.time.
:param format_string: A string specifying formatting the directives
or to use.
:type format_string: str.
:returns: str -- the formatted datetime.
:raises: TypeError
.. versionchanged:: 0.4.0
``TypeError`` is now raised
.. versionadded:: 0.3.0
"""
if not _is_date_type(value):
message = "'{0}' object is not a valid date or time."
raise TypeError(message.format(type(value).__name__))
# Check to see if `format_string` is a value from the `formats`
# class. If it is, obtain the real value from
# `locale.nl_langinfo()`.
if format_string in formats:
format_string = locale.nl_langinfo(getattr(locale, format_string))
return value.strftime(format_string)
def future(years=0, months=0, weeks=0, days=0, hours=0, minutes=0,
seconds=0, milliseconds=0, microseconds=0, utc=False):
"""Get a datetime in the future.
``future()`` accepts the all of the parameters of
``datetime.timedelta``, plus includes the parameters ``years`` and
``months``. ``years`` and ``months`` will add their respective units
of time to the datetime.
By default ``future()`` will return the datetime in the system's
local time. If the ``utc`` parameter is set to ``True`` or
``set_utc()`` has been called, the datetime will be based on UTC
instead.
:param years: The number of years to add.
:type years: int.
:param months: The number of months to add.
:type months: int.
:param weeks: The number of weeks to add.
:type weeks: int.
:param days: The number of days to add.
:type days: int.
:param hours: The number of hours to add.
:type hours: int.
:param minutes: The number of minutes to add.
:type minutes: int.
:param seconds: The number of seconds to add.
:type seconds: int.
:param milliseconds: The number of milliseconds to add.
:type milliseconds: int.
:param microseconds: The number of microseconds to add.
:type microseconds: int.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
"""
return _add_time(now(utc), years=years, months=months, weeks=weeks,
days=days, hours=hours, minutes=minutes, seconds=seconds,
milliseconds=milliseconds, microseconds=microseconds)
def how_many_leap_days(from_date, to_date):
"""Get the number of leap days between two dates
:param from_date: A datetime object. If only a year is specified,
will use January 1.
:type from_date: datetime.datetime, datetime.date
:param to_date: A datetime object.. If only a year is specified,
will use January 1.
:type to_date: datetime.datetime, datetime.date
:returns: int -- the number of leap days.
:raises: TypeError, ValueError
.. versionchanged:: 0.4.0
``TypeError`` is now raised
``ValueError`` is now raised
.. versionadded:: 0.3.0
"""
if isinstance(from_date, int):
from_date = datetime.date(from_date, 1, 1)
if isinstance(to_date, int):
to_date = datetime.date(to_date, 1, 1)
if not _is_date_type(from_date):
message = "'{0}' object is not a valid date or time."
raise TypeError(message.format(type(from_date).__name__))
if not _is_date_type(to_date):
message = "'{0}' object is not a valid date or time."
raise TypeError(message.format(type(to_date).__name__))
# Both `from_date` and `to_date` need to be of the same type.
# Since both `datetime.date` and `datetime.datetime` will pass the
# above assertions, cast any `datetime.datetime` values to
# `datetime.date`.
if isinstance(from_date, datetime.datetime):
from_date = from_date.date()
if isinstance(to_date, datetime.datetime):
to_date = to_date.date()
if from_date > to_date:
message = ("The value of 'from_date' must be before the value of "
"'to_date'.")
raise ValueError(message)
number_of_leaps = calendar.leapdays(from_date.year, to_date.year)
# `calendar.leapdays()` calculates the number of leap days by using
# January 1 for the specified years. If `from_date` occurs after
# February 28 in a leap year, remove one leap day from the total. If
# `to_date` occurs after February 28 in a leap year, add one leap
# day to the total.
if calendar.isleap(from_date.year):
month, day = from_date.month, from_date.day
if month > 2 or (month == 2 and day > 28):
number_of_leaps -= 1
if calendar.isleap(to_date.year):
month, day = to_date.month, to_date.day
if month > 2 or (month == 2 and day > 28):
number_of_leaps += 1
return number_of_leaps
def is_5_oclock():
# Congratulations, you've found an easter egg!
#
# Returns a `datetime.timedelta` object representing how much time
# is remaining until 5 o'clock. If the current time is between 5pm
# and midnight, a negative value will be returned. Keep in mind, a
# `timedelta` is considered negative when the `days` attribute is
# negative; the values for `seconds` and `microseconds` will always
# be positive.
#
# All values will be `0` at 5 o'clock.
# Because this method deals with local time, the force UTC flag will
# need to be turned off and back on if it has been set.
force = _FORCE_UTC
if force:
unset_utc()
# A `try` is used here to ensure that the UTC flag will be restored
# even if an exception is raised when calling `now()`. This should
# never be the case, but better safe than sorry.
try:
the_datetime = now()
finally:
if force:
set_utc()
five = datetime.time(17)
return datetime.datetime.combine(the_datetime.date(), five) - the_datetime
def is_timezone_aware(value):
"""Check if a datetime is time zone aware.
`is_timezone_aware()` is the inverse of `is_timezone_naive()`.
:param value: A valid datetime object.
:type value: datetime.datetime, datetime.time
:returns: bool -- if the object is time zone aware.
:raises: TypeError
.. versionchanged:: 0.4.0
``TypeError`` is raised
.. versionadded:: 0.3.0
"""
if not hasattr(value, 'tzinfo'):
message = "'{0}' object is not a valid time."
raise TypeError(message.format(type(value).__name__))
return not (value.tzinfo is None or value.tzinfo.utcoffset(value) is None)
def is_timezone_naive(value):
"""Check if a datetime is time zone naive.
`is_timezone_naive()` is the inverse of `is_timezone_aware()`.
:param value: A valid datetime object.
:type value: datetime.datetime, datetime.time
:returns: bool -- if the object is time zone naive.
:raises: TypeError
.. versionchanged:: 0.4.0
``TypeError`` is now raised
.. versionadded:: 0.3.0
"""
if not hasattr(value, 'tzinfo'):
message = "'{0}' object is not a valid time."
raise TypeError(message.format(type(value).__name__))
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def now(utc=False):
"""Get a datetime representing the current date and time.
By default ``now()`` will return the datetime in the system's local
time. If the ``utc`` parameter is set to ``True`` or ``set_utc()``
has been called, the datetime will be based on UTC instead.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the current datetime.
"""
if _FORCE_UTC or utc:
return datetime.datetime.utcnow()
else:
return datetime.datetime.now()
def past(years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0, utc=False):
"""Get a datetime in the past.
``past()`` accepts the all of the parameters of
``datetime.timedelta``, plus includes the parameters ``years`` and
``months``. ``years`` and ``months`` will add their respective units
of time to the datetime.
By default ``past()`` will return the datetime in the system's local
time. If the ``utc`` parameter is set to ``True`` or ``set_utc()``
has been called, the datetime will be based on UTC instead.
:param years: The number of years to subtract.
:type years: int.
:param months: The number of months to subtract.
:type months: int.
:param weeks: The number of weeks to subtract.
:type weeks: int.
:param days: The number of days to subtract.
:type days: int.
:param hours: The number of hours to subtract.
:type hours: int.
:param minutes: The number of minutes to subtract.
:type minutes: int.
:param seconds: The number of seconds to subtract.
:type seconds: int.
:param milliseconds: The number of milliseconds to subtract.
:type milliseconds: int.
:param microseconds: The number of microseconds to subtract.
:type microseconds: int.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
"""
return _add_time(now(utc), years=-years, months=-months, weeks=-weeks,
days=-days, hours=-hours, minutes=-minutes,
seconds=-seconds, milliseconds=milliseconds,
microseconds=microseconds)
def set_utc():
"""Set all datetimes to UTC.
The ``utc`` parameter of other methods will be ignored, with the
global setting taking precedence.
This can be reset by calling ``unset_utc()``.
"""
global _FORCE_UTC # Causes pylint W0603
_FORCE_UTC = True
def shift(value, from_tz=None, to_tz=None, utc=False):
"""Convert a datetime from one time zone to another.
``value`` will be converted from its time zone (when it is time zone
aware) or the time zone specified by ``from_tz`` (when it is time
zone naive) to the time zone specified by ``to_tz``. These values
can either be strings containing the name of the time zone (see
``pytz.all_timezones`` for a list of all supported values) or a
``datetime.tzinfo`` object.
If no value is provided for either ``from_tz`` (when ``value`` is
time zone naive) or ``to_tz``, the current system time zone will be
used. If the ``utc`` parameter is set to ``True`` or ``set_utc()``
has been called, however, UTC will be used instead.
:param value: A datetime object.
:type value: datetime.datetime, datetime.time.
:param from_tz: The time zone to shift from.
:type from_tz: datetime.tzinfo, str.
:param to_tz: The time zone to shift to.
:type to_tz: datetime.tzinfo, str.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
:raises: TypeError
.. versionchanged:: 0.4.0
``TypeError`` is now raised
"""
if not hasattr(value, 'tzinfo'):
message = "'{0}' object is not a valid time."
raise TypeError(message.format(type(value).__name__))
# Check for a from timezone
# If the datetime is time zone aware, its time zone should be used. If it's
# naive, from_tz must be supplied.
if is_timezone_aware(value):
from_tz = value.tzinfo
else:
if not from_tz:
if _FORCE_UTC or utc:
from_tz = pytz.UTC
else:
from_tz = timezone_object() # Use the system's time zone
else:
if not isinstance(from_tz, datetime.tzinfo):
# This will raise pytz.UnknownTimeZoneError
from_tz = pytz.timezone(from_tz)
# Check for a to timezone
if not to_tz:
if _FORCE_UTC or utc:
to_tz = pytz.UTC
else:
to_tz = timezone_object() # Use the system's time zone
else:
if not isinstance(to_tz, datetime.tzinfo):
# This will raise pytz.UnknownTimeZoneError
to_tz = pytz.timezone(to_tz)
if from_tz == to_tz:
return value
# If the datetime is time zone naive, pytz provides a convenient way to
# covert it to time zone aware. Using replace() directly on the datetime
# results in losing an hour when converting ahead.
if is_timezone_naive(value):
value = from_tz.localize(value)
return value.astimezone(to_tz).replace(tzinfo=None)
def timezone():
"""Get the name of the current system time zone.
:returns: str -- the name of the system time zone.
"""
# Check for the time zone:
# 1. as an environment settings (most likely not)
# 2. in /etc/timezone (hopefully)
# 3. in /etc/localtime (last chance)
tz = (_timezone_from_env()
or _timezone_from_etc_timezone()
or _timezone_from_etc_localtime())
return '{0}'.format(tz)
def _timezone_from_env():
""" get the system time zone from os.environ """
if 'TZ' in os.environ:
try:
return pytz.timezone(os.environ['TZ'])
except pytz.UnknownTimeZoneError:
pass
return None
def _timezone_from_etc_localtime():
""" get the system time zone from /etc/localtime """
matches = []
if os.path.exists('/etc/localtime'):
path = '/etc/localtime'
realpath = os.path.realpath(path)
# On OSX 10.9.5, using just /etc/localtime without resolving the
# realpath leads to the returned timezone being "/etc/localtime".
localtime = pytz.tzfile.build_tzinfo(realpath,
open(realpath))
for tzname in pytz.all_timezones:
tz = pytz.timezone(tzname)
if localtime.zone.endswith(tz.zone):
# Continuing with the OSX 10.9.5 example, the comparisons below
# continue incorrectly when comparing localtime._transition_info
# with tz._transition_info, as the tz version has one
# more entry than the localtime version.
matches.append(tz.zone)
continue
if dir(tz) != dir(localtime):
continue
for attr in dir(tz):
if callable(getattr(tz, attr)) or attr.startswith('__'):
continue
if attr == 'zone' or attr == '_tzinfos':
continue
if getattr(tz, attr) != getattr(localtime, attr):
break
else:
matches.append(tzname)
if matches:
return pytz.timezone(matches[0])
else:
# Causes pylint W0212
pytz._tzinfo_cache['/etc/localtime'] = localtime
return localtime
def _timezone_from_etc_timezone():
""" get the system time zone from /etc/timezone """
if os.path.exists('/etc/timezone'):
tz = open('/etc/timezone').read().strip()
try:
return pytz.timezone(tz)
except pytz.UnknownTimeZoneError:
pass
return None
def timezone_object(tz_name=None):
"""Get the current system time zone.
:param tz_name: The name of the time zone.
:type tz_name: str.
:returns: datetime.tzinfo -- the time zone, defaults to system time
zone.
"""
return pytz.timezone(tz_name if tz_name else timezone())
def today():
"""Get a date representing the current date.
:returns: datetime.date -- the current date.
"""
return datetime.date.today()
def tomorrow():
"""Get a date representing tomorrow's date.
:returns: datetime.date -- the current date plus one day.
"""
return datetime.date.today() + datetime.timedelta(days=1)
def unset_utc():
"""Set all datetimes to system time.
The ``utc`` parameter of other methods will be used.
This can be changed by calling ``set_utc()``.
"""
global _FORCE_UTC # Causes pylint W0603
_FORCE_UTC = False
def yesterday():
"""Get a date representing yesterday's date.
:returns: datetime.date -- the current date minus one day.
"""
return datetime.date.today() - datetime.timedelta(days=1)
|
{
"content_hash": "8e5d31ca80697cc590b56176fcd8cd91",
"timestamp": "",
"source": "github",
"line_count": 771,
"max_line_length": 80,
"avg_line_length": 32.86381322957198,
"alnum_prop": 0.6217144210277055,
"repo_name": "dirn/When.py",
"id": "1acf979a29c395a7751ca51847808ee490a4b050",
"size": "25363",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "when.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "176"
},
{
"name": "Python",
"bytes": "42979"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
version = '1.0.2'
setup(name='pyactiveresource',
version=version,
description='ActiveResource for Python',
author='Jared Kuolt',
author_email='me@superjared.com',
url='http://code.google.com/p/pyactiveresource/',
packages=['pyactiveresource'],
package_dir={'pyactiveresource':'src'},
license='MIT License',
platforms=['any'],
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules']
)
|
{
"content_hash": "6d524956e9148d9bfa5c006f40a13d2f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 38.541666666666664,
"alnum_prop": 0.5697297297297297,
"repo_name": "PiratenBayernIT/pyactiveresource",
"id": "33b92a6de5c048182686284a1768df3b7f08e684",
"size": "925",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120051"
}
],
"symlink_target": ""
}
|
import gettext
import pycountry
from flask import request
from flask_login import login_required
from . import bp
from ..decorators import jsonify
@bp.route('/currencies/')
@login_required
@jsonify()
def get_currencies():
def prepare_currency(currency):
return {
'symbol': currency.letter,
'name': _(currency.name)
}
lang = request.args.get('language', 'en')
try:
language = gettext.translation('iso4217', pycountry.LOCALES_DIR, languages=[lang])
language.install()
_ = language.gettext
except FileNotFoundError:
_ = lambda x: x
all_currencies = [prepare_currency(x) for x in pycountry.currencies.objects]
return {'currencies': all_currencies}
|
{
"content_hash": "c4b82a4edbbc3e69372537d8c0a95f02",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 90,
"avg_line_length": 24.866666666666667,
"alnum_prop": 0.6568364611260054,
"repo_name": "monetario/core",
"id": "8371c15512dd6fe9af3b90a759c60bacb3d835c9",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monetario/views/api/v1/currency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "454"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "250980"
}
],
"symlink_target": ""
}
|
"""
Module to set up run time parameters for Clawpack -- classic code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='classic'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "classic" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'classic', "Expected claw_pkg = 'classic'"
num_dim = 1
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
probdata = rundata.new_UserData(name='probdata', fname='setprob.data')
probdata.add_param('grav', 9.81, 'gravitational constant')
probdata.add_param('beta', 100., 'Gaussian width parameter')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 0.000000e+00 # xlower
clawdata.upper[0] = 2.000000e+00 # xupper
# Number of grid cells:
clawdata.num_cells[0] = 100 # mx
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 2
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 0
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.000000
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.qNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.q0006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 20
clawdata.tfinal = 1.000000
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 2
clawdata.total_steps = 4
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.000000e-01
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.000000e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.900000
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.000000
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 500
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 2
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['mc', 'mc']
clawdata.use_fwaves = False # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 0
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'wall' # at xlower
clawdata.bc_upper[0] = 'wall' # at xupper
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
{
"content_hash": "73ff167a0ab33a7c8e752f412f811219",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 92,
"avg_line_length": 31.986607142857142,
"alnum_prop": 0.5589672016748081,
"repo_name": "amath574w2015/am574-class",
"id": "07cccc8cc7aea878f2487c02e8d755dd9bae83e2",
"size": "7165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labs/lab8/shallow_1d_example1/setrun.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16448"
},
{
"name": "FORTRAN",
"bytes": "8434"
},
{
"name": "HTML",
"bytes": "992"
},
{
"name": "Makefile",
"bytes": "12639"
},
{
"name": "Matlab",
"bytes": "1260"
},
{
"name": "Python",
"bytes": "62477"
},
{
"name": "Shell",
"bytes": "183"
},
{
"name": "TeX",
"bytes": "48331"
}
],
"symlink_target": ""
}
|
"""
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from types import ListType, StringType
import os, re, sys, time, urllib
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler','demo_app']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if type(headers) is not ListType:
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will
want to redefine this method, such that it sets up callbacks
in the event loop to iterate over the data, and to call
'self.close()' once the response is finished.
"""
if not self.result_is_file() and not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError,AttributeError,NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert type(status) is StringType,"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert type(name) is StringType,"Header names must be strings"
assert type(val) is StringType,"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % time.asctime(time.gmtime(time.time()))
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert type(data) is StringType,"write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException, e
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
sys.stderr.write("[%s] %s\n" % (self.log_date_time_string(), format % args))
class AdminMediaHandler(object):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
"""
def __init__(self, application, media_dir=None):
from django.conf import settings
self.application = application
if not media_dir:
import django
self.media_dir = django.__path__[0] + '/contrib/admin/media'
else:
self.media_dir = media_dir
self.media_url = settings.ADMIN_MEDIA_PREFIX
def __call__(self, environ, start_response):
import os.path
# Ignore requests that aren't under ADMIN_MEDIA_PREFIX. Also ignore
# all requests if ADMIN_MEDIA_PREFIX isn't a relative URL.
if self.media_url.startswith('http://') or self.media_url.startswith('https://') \
or not environ['PATH_INFO'].startswith(self.media_url):
return self.application(environ, start_response)
# Find the admin file and serve it up, if it exists and is readable.
relative_url = environ['PATH_INFO'][len(self.media_url):]
file_path = os.path.join(self.media_dir, relative_url)
if not os.path.exists(file_path):
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % file_path]
else:
try:
fp = open(file_path, 'rb')
except IOError:
status = '401 UNAUTHORIZED'
headers = {'Content-type': 'text/plain'}
output = ['Permission denied: %s' % file_path]
else:
status = '200 OK'
headers = {}
output = [fp.read()]
fp.close()
start_response(status, headers.items())
return output
def run(addr, port, wsgi_handler):
server_address = (addr, port)
httpd = WSGIServer(server_address, WSGIRequestHandler)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
|
{
"content_hash": "cdb7c2aaaa730307379e3e03cfbf5a33",
"timestamp": "",
"source": "github",
"line_count": 642,
"max_line_length": 94,
"avg_line_length": 36.73520249221184,
"alnum_prop": 0.6002798507462687,
"repo_name": "jonaustin/advisoryscan",
"id": "80a0bf6a91c66ef16d17f90b5bd1ad5081dcdf44",
"size": "23584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/django/core/servers/basehttp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63725"
},
{
"name": "JavaScript",
"bytes": "159708"
},
{
"name": "Perl",
"bytes": "89271"
},
{
"name": "Python",
"bytes": "2194026"
},
{
"name": "Shell",
"bytes": "3612"
}
],
"symlink_target": ""
}
|
import versioneer
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Read long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="tohu",
version=versioneer.get_version(),
description="Create random data in a controllable way",
long_description=long_description,
url="https://github.com/maxalbert/tohu",
author="Maximilian Albert",
author_email="maximilian.albert@gmail.com",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Software Development :: Testing",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
packages=["tohu", "tohu/v4", "tohu/v6", "tohu/v6/custom_generator/", "tohu/v7"],
install_requires=["attrs", "bidict", "faker", "geojson", "pandas", "psycopg2-binary", "shapely", "sqlalchemy", "tqdm"],
extras_require={"dev": ["ipython", "jupyter"], "test": ["pytest", "nbval"]},
cmdclass=versioneer.get_cmdclass(),
)
|
{
"content_hash": "1db3f8ccbd28549ffd627692b673f9a3",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 123,
"avg_line_length": 37,
"alnum_prop": 0.6448948948948949,
"repo_name": "maxalbert/tohu",
"id": "ac9974ddeeb82d17d5e9f85e567b8f163a7ba675",
"size": "1332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "244324"
},
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "568361"
}
],
"symlink_target": ""
}
|
import importlib
import json
import numpy as np
import os
import sys
import time
import uuid
import copy
from hypergan.discriminators import *
from hypergan.distributions import *
from hypergan.generators import *
from hypergan.inputs import *
from hypergan.samplers import *
from hypergan.trainers import *
import hyperchamber as hc
from hyperchamber import Config
from hypergan.ops import TensorflowOps
import hypergan as hg
from hypergan.gan_component import ValidationException, GANComponent
from ..base_gan import BaseGAN
from hypergan.distributions.uniform_distribution import UniformDistribution
class AlignedAliGAN3(BaseGAN):
"""
"""
def __init__(self, *args, **kwargs):
BaseGAN.__init__(self, *args, **kwargs)
def required(self):
"""
`input_encoder` is a discriminator. It encodes X into Z
`discriminator` is a standard discriminator. It measures X, reconstruction of X, and G.
`generator` produces two samples, input_encoder output and a known random distribution.
"""
return "discriminator ".split()
def create(self):
config = self.config
ops = self.ops
with tf.device(self.device):
#x_input = tf.identity(self.inputs.x, name='input')
xa_input = tf.identity(self.inputs.xa, name='xa_i')
xb_input = tf.identity(self.inputs.xb, name='xb_i')
if config.same_g:
ga = self.create_component(config.generator, input=xb_input, name='a_generator')
gb = self.create_component(config.generator, input=xa_input, name='a_generator', reuse=True)
elif config.two_g:
ga = self.create_component(config.generator1, input=xb_input, name='a_generator')
gb = self.create_component(config.generator2, input=xa_input, name='b_generator')
else:
ga = self.create_component(config.generator, input=xb_input, name='a_generator')
gb = self.create_component(config.generator, input=xa_input, name='b_generator')
za = ga.controls["z"]
zb = gb.controls["z"]
self.uniform_sample = ga.sample
xba = ga.sample
xab = gb.sample
xa_hat = ga.sample
xb_hat = gb.sample
#xa_hat = ga.reuse(gb.sample)
#xb_hat = gb.reuse(ga.sample)
z_shape = self.ops.shape(za)
uz_shape = z_shape
uz_shape[-1] = uz_shape[-1] // len(config.latent.projections or [1])
ue = UniformDistribution(self, config.latent, output_shape=uz_shape)
ue2 = UniformDistribution(self, config.latent, output_shape=uz_shape)
ue3 = UniformDistribution(self, config.latent, output_shape=uz_shape)
ue4 = UniformDistribution(self, config.latent, output_shape=uz_shape)
print('ue', ue.sample)
zua = ue.sample
zub = ue2.sample
uga = ga.sample#ga.reuse(tf.zeros_like(xb_input), replace_controls={"z":zua})
ugb = gb.sample#gb.reuse(tf.zeros_like(xa_input), replace_controls={"z":zub})
xa = xa_input
xb = xb_input
re_ga = self.create_component(config.generator, input=gb.sample, name='a_generator', reuse=True)
re_gb = self.create_component(config.generator, input=ga.sample, name='b_generator', reuse=True)
re_zb = zb#re_gb.controls['z']
t0 = tf.concat([xb, xb], axis=3)
t1 = tf.concat([gb.sample, gb.sample], axis=3)
zaxis = len(self.ops.shape(za))-1
f0 = tf.concat([za, za], axis=zaxis)
f1 = tf.concat([zb, zb], axis=zaxis)
#f0 = tf.concat([zb, za], axis=zaxis)
#f1 = tf.concat([za, zb], axis=zaxis)
stack = [t0, t1]
stacked = ops.concat(stack, axis=0)
features = ops.concat([f0, f1], axis=0)
d = self.create_component(config.discriminator, name='d_ab', input=stacked, features=[features])
self.za = za
self.discriminator = d
l = self.create_loss(config.loss, d, None, None, len(stack))
loss = l
d1_lambda = config.d1_lambda
d2_lambda = config.d2_lambda
d_loss1 = d1_lambda * l.d_loss
g_loss1 = d1_lambda * l.g_loss
d_vars1 = d.variables()
d_loss = l.d_loss
g_loss = l.g_loss
metrics = {
'g_loss': l.g_loss,
'd_loss': l.d_loss
}
self._g_vars = ga.variables() + gb.variables()
self._d_vars = d_vars1
self.loss=loss
self.generator = gb
trainer = self.create_component(config.trainer)
self.initialize_variables()
self.trainer = trainer
self.latent = hc.Config({'sample':zb})
self.generator = gb
self.encoder = gb # this is the other gan
self.uniform_distribution = hc.Config({"sample":zb})#uniform_encoder
self.zb = zb
self.z_hat = gb.sample
self.x_input = xa_input
self.autoencoded_x = xa_hat
self.cyca = xa_hat
self.cycb = xb_hat
self.xba = xba
self.xab = xab
self.uga = uga
self.ugb = ugb
rgb = tf.cast((self.generator.sample+1)*127.5, tf.int32)
self.generator_int = tf.bitwise.bitwise_or(rgb, 0xFF000000, name='generator_int')
def d_vars(self):
return self._d_vars
def g_vars(self):
return self._g_vars
def create_discriminator(self, _input, reuse=False):
return self.create_component(self.config.discriminator, name='d_ab', input=_input, features=[tf.zeros_like(self.za)], reuse=reuse)
def create_loss(self, loss_config, discriminator, x, generator, split):
loss = self.create_component(loss_config, discriminator = discriminator, x=x, generator=generator, split=split)
return loss
def create_encoder(self, x_input, name='input_encoder'):
config = self.config
input_encoder = dict(config.input_encoder or config.g_encoder or config.generator)
encoder = self.create_component(input_encoder, name=name, input=x_input)
return encoder
def create_z_discriminator(self, z, z_hat):
config = self.config
z_discriminator = dict(config.z_discriminator or config.discriminator)
z_discriminator['layer_filter']=None
net = tf.concat(axis=0, values=[z, z_hat])
encoder_discriminator = self.create_component(z_discriminator, name='z_discriminator', input=net)
return encoder_discriminator
def create_cycloss(self, x_input, x_hat):
config = self.config
ops = self.ops
distance = config.distance or ops.lookup('l1_distance')
pe_layers = self.gan.skip_connections.get_array("progressive_enhancement")
cycloss_lambda = config.cycloss_lambda
if cycloss_lambda is None:
cycloss_lambda = 10
if(len(pe_layers) > 0):
mask = self.progressive_growing_mask(len(pe_layers)//2+1)
cycloss = tf.reduce_mean(distance(mask*x_input,mask*x_hat))
cycloss *= mask
else:
cycloss = tf.reduce_mean(distance(x_input, x_hat))
cycloss *= cycloss_lambda
return cycloss
def create_z_cycloss(self, z, x_hat, encoder, generator):
config = self.config
ops = self.ops
total = None
distance = config.distance or ops.lookup('l1_distance')
if config.z_hat_lambda:
z_hat_cycloss_lambda = config.z_hat_cycloss_lambda
recode_z_hat = encoder.reuse(x_hat)
z_hat_cycloss = tf.reduce_mean(distance(z_hat,recode_z_hat))
z_hat_cycloss *= z_hat_cycloss_lambda
if config.z_cycloss_lambda:
recode_z = encoder.reuse(generator.reuse(z))
z_cycloss = tf.reduce_mean(distance(z,recode_z))
z_cycloss_lambda = config.z_cycloss_lambda
if z_cycloss_lambda is None:
z_cycloss_lambda = 0
z_cycloss *= z_cycloss_lambda
if config.z_hat_lambda and config.z_cycloss_lambda:
total = z_cycloss + z_hat_cycloss
elif config.z_cycloss_lambda:
total = z_cycloss
elif config.z_hat_lambda:
total = z_hat_cycloss
return total
def input_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [self.mask_generator.sample]
else:
extras = []
return extras + [
self.x_input
]
def output_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [
self.mask_generator.sample,
self.generator.g1x,
self.generator.g2x
]
else:
extras = []
return extras + [
self.encoder.sample,
self.generator.sample,
self.uniform_sample,
self.generator_int
]
|
{
"content_hash": "2c220c6b6500f6bc1384bb7919980b1d",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 138,
"avg_line_length": 35.66795366795367,
"alnum_prop": 0.5830266291405066,
"repo_name": "255BITS/HyperGAN",
"id": "2d771d478ecdbfbaa1d335861d6ae0418eff7727",
"size": "9238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hypergan/gans/needs_pytorch/aligned_ali_gan3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "204346"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
import sys
import re
import contextlib
import subprocess
import signal
import math
from time import time
from . import basecase
from os.path import join, normpath
def is_win():
return sys.platform in ("cygwin", "win32")
if is_win():
from .winpty import WinPty
DEFAULT_PREFIX = ''
else:
import pty
DEFAULT_PREFIX = os.linesep
DEFAULT_CQLSH_PROMPT = DEFAULT_PREFIX + '(\S+@)?cqlsh(:\S+)?> '
DEFAULT_CQLSH_TERM = 'xterm'
try:
Pattern = re._pattern_type
except AttributeError:
# Python 3.7+
Pattern = re.Pattern
def get_smm_sequence(term='xterm'):
"""
Return the set meta mode (smm) sequence, if any.
On more recent Linux systems, xterm emits the smm sequence
before each prompt.
"""
result = ''
if not is_win():
tput_proc = subprocess.Popen(['tput', '-T{}'.format(term), 'smm'], stdout=subprocess.PIPE)
tput_stdout = tput_proc.communicate()[0]
if (tput_stdout and (tput_stdout != b'')):
result = tput_stdout
if isinstance(result, bytes):
result = result.decode("utf-8")
return result
DEFAULT_SMM_SEQUENCE = get_smm_sequence()
cqlshlog = basecase.cqlshlog
def set_controlling_pty(master, slave):
os.setsid()
os.close(master)
for i in range(3):
os.dup2(slave, i)
if slave > 2:
os.close(slave)
os.close(os.open(os.ttyname(1), os.O_RDWR))
@contextlib.contextmanager
def raising_signal(signum, exc):
"""
Within the wrapped context, the given signal will interrupt signal
calls and will raise the given exception class. The preexisting signal
handling will be reinstated on context exit.
"""
def raiser(signum, frames):
raise exc()
oldhandlr = signal.signal(signum, raiser)
try:
yield
finally:
signal.signal(signum, oldhandlr)
class TimeoutError(Exception):
pass
@contextlib.contextmanager
def timing_out_itimer(seconds):
if seconds is None:
yield
return
with raising_signal(signal.SIGALRM, TimeoutError):
oldval, oldint = signal.getitimer(signal.ITIMER_REAL)
if oldval != 0.0:
raise RuntimeError("ITIMER_REAL already in use")
signal.setitimer(signal.ITIMER_REAL, seconds)
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
@contextlib.contextmanager
def timing_out_alarm(seconds):
if seconds is None:
yield
return
with raising_signal(signal.SIGALRM, TimeoutError):
oldval = signal.alarm(int(math.ceil(seconds)))
if oldval != 0:
signal.alarm(oldval)
raise RuntimeError("SIGALRM already in use")
try:
yield
finally:
signal.alarm(0)
if is_win():
try:
import eventlet
except ImportError as e:
sys.exit("evenlet library required to run cqlshlib tests on Windows")
def timing_out(seconds):
return eventlet.Timeout(seconds, TimeoutError)
else:
# setitimer is new in 2.6, but it's still worth supporting, for potentially
# faster tests because of sub-second resolution on timeouts.
if hasattr(signal, 'setitimer'):
timing_out = timing_out_itimer
else:
timing_out = timing_out_alarm
def noop(*a):
pass
class ProcRunner:
def __init__(self, path, tty=True, env=None, args=()):
self.exe_path = path
self.args = args
self.tty = bool(tty)
self.realtty = self.tty and not is_win()
if env is None:
env = {}
self.env = env
self.readbuf = ''
self.start_proc()
def start_proc(self):
preexec = noop
stdin = stdout = stderr = None
cqlshlog.info("Spawning %r subprocess with args: %r and env: %r"
% (self.exe_path, self.args, self.env))
if self.realtty:
masterfd, slavefd = pty.openpty()
preexec = (lambda: set_controlling_pty(masterfd, slavefd))
self.proc = subprocess.Popen((self.exe_path,) + tuple(self.args),
env=self.env, preexec_fn=preexec,
stdin=stdin, stdout=stdout, stderr=stderr,
close_fds=False)
os.close(slavefd)
self.childpty = masterfd
self.send = self.send_tty
self.read = self.read_tty
else:
stdin = stdout = subprocess.PIPE
stderr = subprocess.STDOUT
self.proc = subprocess.Popen((self.exe_path,) + tuple(self.args),
env=self.env, stdin=stdin, stdout=stdout,
stderr=stderr, bufsize=0, close_fds=False)
self.send = self.send_pipe
if self.tty:
self.winpty = WinPty(self.proc.stdout)
self.read = self.read_winpty
else:
self.read = self.read_pipe
def close(self):
cqlshlog.info("Closing %r subprocess." % (self.exe_path,))
if self.realtty:
os.close(self.childpty)
else:
self.proc.stdin.close()
cqlshlog.debug("Waiting for exit")
return self.proc.wait()
def send_tty(self, data):
if not isinstance(data, bytes):
data = data.encode("utf-8")
os.write(self.childpty, data)
def send_pipe(self, data):
self.proc.stdin.write(data)
def read_tty(self, blksize, timeout=None):
buf = os.read(self.childpty, blksize)
if isinstance(buf, bytes):
buf = buf.decode("utf-8")
return buf
def read_pipe(self, blksize, timeout=None):
buf = self.proc.stdout.read(blksize)
if isinstance(buf, bytes):
buf = buf.decode("utf-8")
return buf
def read_winpty(self, blksize, timeout=None):
buf = self.winpty.read(blksize, timeout)
if isinstance(buf, bytes):
buf = buf.decode("utf-8")
return buf
def read_until(self, until, blksize=4096, timeout=None,
flags=0, ptty_timeout=None, replace=[]):
if not isinstance(until, Pattern):
until = re.compile(until, flags)
cqlshlog.debug("Searching for %r" % (until.pattern,))
got = self.readbuf
self.readbuf = ''
with timing_out(timeout):
while True:
val = self.read(blksize, ptty_timeout)
for replace_target in replace:
if (replace_target != ''):
val = val.replace(replace_target, '')
cqlshlog.debug("read %r from subproc" % (val,))
if val == '':
raise EOFError("'until' pattern %r not found" % (until.pattern,))
got += val
m = until.search(got)
if m is not None:
self.readbuf = got[m.end():]
got = got[:m.end()]
return got
def read_lines(self, numlines, blksize=4096, timeout=None):
lines = []
with timing_out(timeout):
for n in range(numlines):
lines.append(self.read_until('\n', blksize=blksize))
return lines
def read_up_to_timeout(self, timeout, blksize=4096):
got = self.readbuf
self.readbuf = ''
curtime = time()
stoptime = curtime + timeout
while curtime < stoptime:
try:
with timing_out(stoptime - curtime):
stuff = self.read(blksize)
except TimeoutError:
break
cqlshlog.debug("read %r from subproc" % (stuff,))
if stuff == '':
break
got += stuff
curtime = time()
return got
class CqlshRunner(ProcRunner):
def __init__(self, path=None, host=None, port=None, keyspace=None, cqlver=None,
args=(), prompt=DEFAULT_CQLSH_PROMPT, env=None,
win_force_colors=True, tty=True, **kwargs):
if path is None:
path = join(basecase.cqlsh_dir, 'cqlsh')
if host is None:
host = basecase.TEST_HOST
if port is None:
port = basecase.TEST_PORT
if env is None:
env = {}
if is_win():
env['PYTHONUNBUFFERED'] = '1'
env.update(os.environ.copy())
env.setdefault('TERM', 'xterm')
env.setdefault('CQLSH_NO_BUNDLED', os.environ.get('CQLSH_NO_BUNDLED', ''))
env.setdefault('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
coverage = False
if ('CQLSH_COVERAGE' in env.keys()):
coverage = True
args = tuple(args) + (host, str(port))
if cqlver is not None:
args += ('--cqlversion', str(cqlver))
if keyspace is not None:
args += ('--keyspace', keyspace.lower())
if tty and is_win():
args += ('--tty',)
args += ('--encoding', 'utf-8')
if win_force_colors:
args += ('--color',)
if coverage:
args += ('--coverage',)
self.keyspace = keyspace
env.setdefault('CQLSH_PYTHON', sys.executable) # run with the same interpreter as the test
ProcRunner.__init__(self, path, tty=tty, args=args, env=env, **kwargs)
self.prompt = prompt
if self.prompt is None:
self.output_header = ''
else:
self.output_header = self.read_to_next_prompt()
def read_to_next_prompt(self, timeout=10.0):
return self.read_until(self.prompt, timeout=timeout, ptty_timeout=3, replace=[DEFAULT_SMM_SEQUENCE,])
def read_up_to_timeout(self, timeout, blksize=4096):
output = ProcRunner.read_up_to_timeout(self, timeout, blksize=blksize)
# readline trying to be friendly- remove these artifacts
output = output.replace(' \r', '')
output = output.replace('\r', '')
return output
def cmd_and_response(self, cmd):
self.send(cmd + '\n')
output = self.read_to_next_prompt()
# readline trying to be friendly- remove these artifacts
output = output.replace(' \r', '')
output = output.replace('\r', '')
output = output.replace(' \b', '')
if self.realtty:
echo, output = output.split('\n', 1)
assert echo == cmd, "unexpected echo %r instead of %r" % (echo, cmd)
try:
output, promptline = output.rsplit('\n', 1)
except ValueError:
promptline = output
output = ''
assert re.match(self.prompt, DEFAULT_PREFIX + promptline), \
'last line of output %r does not match %r?' % (promptline, self.prompt)
return output + '\n'
def run_cqlsh(**kwargs):
return contextlib.closing(CqlshRunner(**kwargs))
def call_cqlsh(**kwargs):
kwargs.setdefault('prompt', None)
proginput = kwargs.pop('input', '')
kwargs['tty'] = False
c = CqlshRunner(**kwargs)
output, _ = c.proc.communicate(proginput)
result = c.close()
if isinstance(output, bytes):
output = output.decode("utf-8")
return output, result
|
{
"content_hash": "812a7c7aff678ee4ee047416072bb17e",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 109,
"avg_line_length": 33.389380530973455,
"alnum_prop": 0.5614453573637247,
"repo_name": "szhou1234/cassandra",
"id": "cd14b7fd5718915af38f2653eb72629e07f94294",
"size": "12148",
"binary": false,
"copies": "3",
"ref": "refs/heads/trunk",
"path": "pylib/cqlshlib/test/run_cqlsh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "801"
},
{
"name": "Batchfile",
"bytes": "37481"
},
{
"name": "GAP",
"bytes": "86323"
},
{
"name": "HTML",
"bytes": "264240"
},
{
"name": "Java",
"bytes": "19245578"
},
{
"name": "Lex",
"bytes": "10151"
},
{
"name": "PowerShell",
"bytes": "39138"
},
{
"name": "Python",
"bytes": "514138"
},
{
"name": "Shell",
"bytes": "81905"
}
],
"symlink_target": ""
}
|
import json
import os
import textwrap
import unittest
from six import StringIO
from conans import __version__
from conans.client.cache.editable import EDITABLE_PACKAGES_FILE
from conans.client.migrations import migrate_plugins_to_hooks, migrate_to_default_profile, \
migrate_editables_use_conanfile_name, remove_buggy_cacert
from conans.client.output import ConanOutput
from conans.client.rest.cacert import cacert_default
from conans.client.tools.version import Version
from conans.migrations import CONAN_VERSION
from conans.model.ref import ConanFileReference, PackageReference
from conans.paths import EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME, PACKAGE_TGZ_NAME, CACERT_FILE
from conans.test.utils.mocks import TestBufferConanOutput
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient, GenConanfile, NO_SETTINGS_PACKAGE_ID
from conans.util.files import load, save
from conans.client import migrations_settings
from conans.client.conf import get_default_settings_yml
class TestMigrations(unittest.TestCase):
def test_migrations_matches_config(self):
# Check that the current settings matches what is stored in the migrations file
current_settings = get_default_settings_yml()
v = Version(__version__)
var_name = "settings_{}".format("_".join([v.major, v.minor, v.patch]))
self.assertTrue(hasattr(migrations_settings, var_name),
"Migrations var '{}' not found".format(var_name))
migrations_settings_content = getattr(migrations_settings, var_name)
assert current_settings == migrations_settings_content
def test_is_there_var_for_settings_previous_version(self):
from conans import __version__ as current_version
tmp = Version(current_version)
if int(tmp.minor) == 0:
return unittest.skip("2.0, this will make sense for 2.1")
if int(tmp.patch) > 0:
previous_version = "{}.{}.{}".format(tmp.major, tmp.minor, int(tmp.patch) - 1)
else:
previous_version = "{}.{}.0".format(tmp.major, int(tmp.minor) - 1)
from conans.client import migrations_settings
var_name = "settings_{}".format(previous_version.replace(".", "_"))
self.assertTrue(any([i for i in dir(migrations_settings) if i == var_name]),
"Introduce the previous settings.yml file in the 'migrations_settings.yml")
def test_migrate_revision_metadata(self):
# https://github.com/conan-io/conan/issues/4898
client = TestClient()
client.save({"conanfile.py": GenConanfile().with_name("Hello").with_version("0.1")})
client.run("create . user/testing")
ref = ConanFileReference.loads("Hello/0.1@user/testing")
layout1 = client.cache.package_layout(ref)
metadata = json.loads(load(layout1.package_metadata()))
metadata["recipe"]["revision"] = None
metadata["packages"]["WRONG"] = {"revision": ""}
metadata["packages"]["5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9"]["revision"] = None
metadata["packages"]["5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9"]["recipe_revision"] = None
save(layout1.package_metadata(), json.dumps(metadata))
client.run("create . user/stable")
ref2 = ConanFileReference.loads("Hello/0.1@user/stable")
layout2 = client.cache.package_layout(ref2)
metadata = json.loads(load(layout2.package_metadata()))
metadata["recipe"]["revision"] = "Other"
save(layout2.package_metadata(), json.dumps(metadata))
version_file = os.path.join(client.cache_folder, CONAN_VERSION)
save(version_file, "1.14.1")
client.run("search") # This will fire a migration
metadata_ref1 = client.cache.package_layout(ref).load_metadata()
self.assertEqual(metadata_ref1.recipe.revision, "f9e0ab84b47b946f4c7c848d8f82d14e")
pkg_metadata = metadata_ref1.packages["5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9"]
self.assertEqual(pkg_metadata.recipe_revision, "f9e0ab84b47b946f4c7c848d8f82d14e")
self.assertEqual(pkg_metadata.revision, "fa1923ec4342a0d9dc33eff7250432e8")
self.assertEqual(list(metadata_ref1.packages.keys()),
["5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9"])
metadata_ref2 = client.cache.package_layout(ref2).load_metadata()
self.assertEqual(metadata_ref2.recipe.revision, "Other")
def test_migrate_config_install(self):
client = TestClient()
client.run('config set general.config_install="url, http:/fake.url, None, None"')
version_file = os.path.join(client.cache_folder, CONAN_VERSION)
save(version_file, "1.12.0")
client.run("search")
self.assertEqual(load(version_file), __version__)
conf = load(client.cache.conan_conf_path)
self.assertNotIn("http:/fake.url", conf)
self.assertNotIn("config_install", conf)
self.assertIn("http:/fake.url", load(client.cache.config_install_file))
def test_migration_to_default_profile(self):
tmp = temp_folder()
old_conf = """
[general]
the old general
[settings_defaults]
some settings
[other_section]
with other values
"""
conf_path = os.path.join(tmp, "conan.conf")
default_profile_path = os.path.join(tmp, "conan_default")
save(conf_path, old_conf)
migrate_to_default_profile(conf_path, default_profile_path)
new_content = load(conf_path)
self.assertEqual(new_content, """
[general]
the old general
[other_section]
with other values
""")
default_profile = load(default_profile_path)
self.assertEqual(default_profile, """[settings]
some settings""")
old_conf = """
[general]
the old general
[settings_defaults]
some settings
"""
conf_path = os.path.join(tmp, "conan.conf")
default_profile_path = os.path.join(tmp, "conan_default")
save(conf_path, old_conf)
migrate_to_default_profile(conf_path, default_profile_path)
default_profile = load(default_profile_path)
self.assertEqual(default_profile, """[settings]
some settings""")
new_content = load(conf_path)
self.assertEqual(new_content, """
[general]
the old general
""")
def test_migration_from_plugins_to_hooks(self):
def _create_old_layout():
old_user_home = temp_folder()
old_conan_folder = old_user_home
old_conf_path = os.path.join(old_conan_folder, "conan.conf")
old_attribute_checker_plugin = os.path.join(old_conan_folder, "plugins",
"attribute_checker.py")
save(old_conf_path, "\n[general]\n[plugins] # CONAN_PLUGINS\nattribute_checker")
save(old_attribute_checker_plugin, "")
# Do not adjust cpu_count, it is reusing a cache
cache = TestClient(cache_folder=old_user_home, cpu_count=False).cache
assert old_conan_folder == cache.cache_folder
return old_user_home, old_conan_folder, old_conf_path, \
old_attribute_checker_plugin, cache
output = ConanOutput(StringIO())
_, old_cf, old_cp, old_acp, cache = _create_old_layout()
migrate_plugins_to_hooks(cache, output=output)
self.assertFalse(os.path.exists(old_acp))
self.assertTrue(os.path.join(old_cf, "hooks"))
conf_content = load(old_cp)
self.assertNotIn("[plugins]", conf_content)
self.assertIn("[hooks]", conf_content)
# Test with a hook folder: Maybe there was already a hooks folder and a plugins folder
_, old_cf, old_cp, old_acp, cache = _create_old_layout()
existent_hook = os.path.join(old_cf, "hooks", "hook.py")
save(existent_hook, "")
migrate_plugins_to_hooks(cache, output=output)
self.assertTrue(os.path.exists(old_acp))
self.assertTrue(os.path.join(old_cf, "hooks"))
conf_content = load(old_cp)
self.assertNotIn("[plugins]", conf_content)
self.assertIn("[hooks]", conf_content)
def test_migration_editables_to_conanfile_name(self):
# Create the old editable_packages.json file (and user workspace)
tmp_folder = temp_folder()
conanfile1 = os.path.join(tmp_folder, 'dir1', 'conanfile.py')
conanfile2 = os.path.join(tmp_folder, 'dir2', 'conanfile.py')
save(conanfile1, "anything")
save(conanfile2, "anything")
save(os.path.join(tmp_folder, EDITABLE_PACKAGES_FILE),
json.dumps({"name/version": {"path": os.path.dirname(conanfile1), "layout": None},
"other/version@user/testing": {"path": os.path.dirname(conanfile2),
"layout": "anyfile"}}))
cache = TestClient(cache_folder=tmp_folder).cache
migrate_editables_use_conanfile_name(cache)
# Now we have same info and full paths
with open(os.path.join(tmp_folder, EDITABLE_PACKAGES_FILE)) as f:
data = json.load(f)
self.assertEqual(data["name/version"]["path"], conanfile1)
self.assertEqual(data["name/version"]["layout"], None)
self.assertEqual(data["other/version@user/testing"]["path"], conanfile2)
self.assertEqual(data["other/version@user/testing"]["layout"], "anyfile")
def test_migration_tgz_location(self):
client = TestClient(default_server_user=True)
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
exports = "*.txt"
exports_sources = "*.h"
""")
client.save({"conanfile.py": conanfile,
"file.h": "contents",
"file.txt": "contents"})
client.run("create . pkg/1.0@")
ref = ConanFileReference.loads("pkg/1.0")
layout = client.cache.package_layout(ref)
export_tgz = os.path.join(layout.export(), EXPORT_TGZ_NAME)
export_src_tgz = os.path.join(layout.export(), EXPORT_SOURCES_TGZ_NAME)
pref = PackageReference(ref, NO_SETTINGS_PACKAGE_ID)
pkg_tgz = os.path.join(layout.package(pref), PACKAGE_TGZ_NAME)
save(export_tgz, "")
save(export_src_tgz, "")
save(pkg_tgz, "")
self.assertTrue(os.path.isfile(export_tgz))
self.assertTrue(os.path.isfile(export_src_tgz))
self.assertTrue(os.path.isfile(pkg_tgz))
client2 = TestClient(client.cache_folder)
save(os.path.join(client.cache_folder, "version.txt"), "1.30.0")
client2.run("search")
self.assertIn("Removing temporary .tgz files, they are stored in a different location now",
client2.out)
self.assertFalse(os.path.isfile(export_tgz))
self.assertFalse(os.path.isfile(export_src_tgz))
self.assertFalse(os.path.isfile(pkg_tgz))
def test_cacert_migration(self):
client = TestClient()
client.run("search foo")
cacert_path = os.path.join(client.cache_folder, CACERT_FILE)
out = TestBufferConanOutput()
remove_buggy_cacert(client.cache, out)
assert "Conan 'cacert.pem' is up to date..." in out
modified_cacert_content = load(cacert_path) + "other_info"
save(cacert_path, modified_cacert_content)
remove_buggy_cacert(client.cache, out)
assert "'cacert.pem' is locally modified, can't be updated" in out
new_path = cacert_path + ".new"
assert os.path.exists(new_path)
old_cacert = cacert_default
save(cacert_path, old_cacert)
remove_buggy_cacert(client.cache, out)
assert "Removing the 'cacert.pem' file..." in out
assert not os.path.exists(cacert_path)
|
{
"content_hash": "690cfe5383777937bbca19b6f24a6573",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 99,
"avg_line_length": 43.01090909090909,
"alnum_prop": 0.6440649306729793,
"repo_name": "conan-io/conan",
"id": "7f01599df6aefa9c03808c5be8c5ed69b25a4f8f",
"size": "11828",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/integration/test_migrations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
}
|
import os
import re
import time
import uuid
from datetime import timedelta
from inspect import signature
import click
from fate_test._client import Clients
from fate_test._config import Config
from fate_test._io import LOGGER, echo
from fate_test._parser import BenchmarkSuite
from fate_test.scripts._options import SharedOptions
from fate_test.scripts._utils import _upload_data, _delete_data, _load_testsuites, _load_module_from_script
from fate_test.utils import show_data, match_metrics
DATA_DISPLAY_PATTERN = re.compile("^FATE")
@click.command(name="benchmark-quality")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *benchmark.json under these paths")
@click.option('-e', '--exclude', type=click.Path(exists=True), multiple=True,
help="exclude *benchmark.json under these paths")
@click.option('-g', '--glob', type=str,
help="glob string to filter sub-directory of path specified by <include>")
@click.option('-t', '--tol', type=float,
help="tolerance (absolute error) for metrics to be considered almost equal. "
"Comparison is done by evaluating abs(a-b) <= max(relative_tol * max(abs(a), abs(b)), absolute_tol)")
@click.option('-s', '--storage-tag', type=str,
help="tag for storing metrics, for future metrics info comparison")
@click.option('-v', '--history-tag', type=str, multiple=True,
help="Extract metrics info from history tags for comparison")
@click.option('-d', '--match-details', type=click.Choice(['all', 'relative', 'absolute', 'none']),
default="all", help="Error value display in algorithm comparison")
@click.option('--skip-data', is_flag=True, default=False,
help="skip uploading data specified in benchmark conf")
@click.option("--disable-clean-data", "clean_data", flag_value=False, default=None)
@click.option("--enable-clean-data", "clean_data", flag_value=True, default=None)
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def run_benchmark(ctx, include, exclude, glob, skip_data, tol, clean_data, storage_tag, history_tag, match_details,
**kwargs):
"""
process benchmark suite, alias: bq
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
config_inst.extend_sid = ctx.obj["extend_sid"]
config_inst.auto_increasing_sid = ctx.obj["auto_increasing_sid"]
if clean_data is None:
clean_data = config_inst.clean_data
data_namespace_mangling = ctx.obj["namespace_mangling"]
yes = ctx.obj["yes"]
echo.welcome("benchmark")
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
suites = _load_testsuites(includes=include, excludes=exclude, glob=glob,
suffix="benchmark.json", suite_type="benchmark")
for suite in suites:
echo.echo(f"\tdataset({len(suite.dataset)}) benchmark groups({len(suite.pairs)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
with Clients(config_inst) as client:
fate_version = client["guest_0"].get_version()
for i, suite in enumerate(suites):
# noinspection PyBroadException
try:
start = time.time()
echo.echo(f"[{i + 1}/{len(suites)}]start at {time.strftime('%Y-%m-%d %X')} {suite.path}", fg='red')
if not skip_data:
try:
_upload_data(client, suite, config_inst)
except Exception as e:
raise RuntimeError(f"exception occur while uploading data for {suite.path}") from e
try:
_run_benchmark_pairs(config_inst, suite, tol, namespace, data_namespace_mangling, storage_tag,
history_tag, fate_version, match_details)
except Exception as e:
raise RuntimeError(f"exception occur while running benchmark jobs for {suite.path}") from e
if not skip_data and clean_data:
_delete_data(client, suite)
echo.echo(f"[{i + 1}/{len(suites)}]elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception in {suite.path}, exception_id={exception_id}", err=True, fg='red')
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@LOGGER.catch
def _run_benchmark_pairs(config: Config, suite: BenchmarkSuite, tol: float, namespace: str,
data_namespace_mangling: bool, storage_tag, history_tag, fate_version, match_details):
# pipeline demo goes here
pair_n = len(suite.pairs)
fate_base = config.fate_base
PYTHONPATH = os.environ.get('PYTHONPATH') + ":" + os.path.join(fate_base, "python")
os.environ['PYTHONPATH'] = PYTHONPATH
for i, pair in enumerate(suite.pairs):
echo.echo(f"Running [{i + 1}/{pair_n}] group: {pair.pair_name}")
results = {}
# data_summary = None
job_n = len(pair.jobs)
for j, job in enumerate(pair.jobs):
try:
echo.echo(f"Running [{j + 1}/{job_n}] job: {job.job_name}")
job_name, script_path, conf_path = job.job_name, job.script_path, job.conf_path
param = Config.load_from_file(conf_path)
mod = _load_module_from_script(script_path)
input_params = signature(mod.main).parameters
# local script
if len(input_params) == 1:
data, metric = mod.main(param=param)
elif len(input_params) == 2:
data, metric = mod.main(config=config, param=param)
# pipeline script
elif len(input_params) == 3:
if data_namespace_mangling:
data, metric = mod.main(config=config, param=param, namespace=f"_{namespace}")
else:
data, metric = mod.main(config=config, param=param)
else:
data, metric = mod.main()
results[job_name] = metric
echo.echo(f"[{j + 1}/{job_n}] job: {job.job_name} Success!\n")
if data and DATA_DISPLAY_PATTERN.match(job_name):
# data_summary = data
show_data(data)
# if data_summary is None:
# data_summary = data
except Exception as e:
exception_id = uuid.uuid1()
echo.echo(f"exception while running [{j + 1}/{job_n}] job, exception_id={exception_id}", err=True,
fg='red')
LOGGER.exception(f"exception id: {exception_id}, error message: \n{e}")
continue
rel_tol = pair.compare_setting.get("relative_tol")
# show_data(data_summary)
match_metrics(evaluate=True, group_name=pair.pair_name, abs_tol=tol, rel_tol=rel_tol,
storage_tag=storage_tag, history_tag=history_tag, fate_version=fate_version,
cache_directory=config.cache_directory, match_details=match_details, **results)
|
{
"content_hash": "0addf434ad7b5fb14cf1f44ba211c1e1",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 120,
"avg_line_length": 50.691275167785236,
"alnum_prop": 0.5947305706341851,
"repo_name": "FederatedAI/FATE",
"id": "f814cc5876c6d37709209e720d0ad066ebe46b37",
"size": "7553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/fate_test/fate_test/scripts/benchmark_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
}
|
import sys
if len(sys.argv)==1:
print "Usage: main.py cfg_file [cfg_site]"
print "Example:"
print " main.py cfg_gnu"
print " main.py cfg_gnu custom_cfg_site"
sys.exit(1)
args = []
args.extend(sys.argv)
args.remove(args[1])
args.remove(args[0])
cfg_file = __import__(sys.argv[1])
builders = cfg_file.create_builder(args)
for builder in builders:
builder.execute()
|
{
"content_hash": "35ed17ea1e07f938e4d4ce86d2515858",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 46,
"avg_line_length": 19.7,
"alnum_prop": 0.6522842639593909,
"repo_name": "yuezhou/telephony2",
"id": "8a99371455696255e816d28378f4ca63806ceb4d",
"size": "1229",
"binary": false,
"copies": "107",
"ref": "refs/heads/master",
"path": "telephony/Classes/pjproject-2.2.1/tests/cdash/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "7190"
},
{
"name": "C",
"bytes": "11072890"
},
{
"name": "C++",
"bytes": "976668"
},
{
"name": "CSS",
"bytes": "21213"
},
{
"name": "HTML",
"bytes": "3320"
},
{
"name": "Java",
"bytes": "40809"
},
{
"name": "Makefile",
"bytes": "130775"
},
{
"name": "Objective-C",
"bytes": "143507"
},
{
"name": "Pan",
"bytes": "288"
},
{
"name": "Python",
"bytes": "526512"
},
{
"name": "QML",
"bytes": "1121"
},
{
"name": "QMake",
"bytes": "3168"
},
{
"name": "Ruby",
"bytes": "6545"
},
{
"name": "Shell",
"bytes": "344351"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import absolute_import
import six
from ..exceptions import ValidationError
class BaseProperty(object):
"""
The base class for properties in Datastore. Instances of this class may be added to subclasses of `Entity` to store
data.
A property has a value, can have validators and a default value. Useful attributes of a property include:
* :attr:`db_field` the name of this property in the Datastore.
This class shouldn't be used directly. Instead, it is intended to be extended by concrete property implementations.
"""
def __init__(self, name=None, db_name=None, required=False, default=None, choices=None, help_text=None,
verbose_name=None, exclude_from_index=False):
"""
Initialise a property.
:param str name: The name used for this property on the entity. Defaults to the the attribute name used for
this property on an :class:`~gcloudoem.entity.Entity`.
:param str db_name: The datastore name used for this property. Defaults to `name`.
:param bool required: Is this property is required? Defaults to False.
:param default: (optional) The default value of this property of no value has been set? Can be a callable.
:param list choices: (optional) A list of values this property should have.
:param str help_text: (optional) The help text for this property. Might be used by implementers of a GUI.
:param str verbose_name: The verbose name for this property. Designed to be human readable. Might be used by
implementers of a GUI.
:param bool exclude_from_index: Weather to exclude this property from the entity's index.
"""
self.name = self.db_name = name
self.db_name = db_name
self.required = required
self.default = default
self.choices = choices
self.help_text = help_text
self.verbose = verbose_name
self.exclude_from_index = exclude_from_index
def __get__(self, instance, owner):
if not instance: # being called on an entity class
return self
return instance._data.get(self.name)
def __set__(self, instance, value):
if value is None and self.default is not None:
value = self.default
if callable(value):
value = value()
instance._data[self.name] = value
def from_protobuf(self, pb_value):
"""
Given a protobuf value for a Property, get the correct value.
The Cloud Datastore Protobuf API returns a Property Protobuf which has one value set and the rest blank.
This function retrieves the the one value provided.
Some work is done to coerce the return value into a more useful python type.
:type property_pb: :class:`gcloudoem.datastore.datastore_v1_pb2.Property`
:param property_pb: The Property Protobuf.
:returns: The python value provided by the Protobuf.
"""
raise NotImplementedError
def to_protobuf(self, value):
"""
Given a value, return the protobuf attribute name and proper value.
The Protobuf API uses different attribute names based on value types rather than inferring the type. This
function simply returns the proper attribute name for this Property as well as a properly formatted value.
Certain value types need to be coerced into a different type. This function handles that for you.
.. note::
Values which are "text" ('unicode' in Python2, 'str' in Python3) map to 'string_value' in the datastore;
values which are "bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
For example:
>>> _pb_attr_value(1234)
('integer_value', 1234)
>>> _pb_attr_value('my_string')
('string_value', 'my_string')
:param object val: The value to be scrutinized.
:rtype: tuple
:returns: A tuple of the attribute name and proper Protobuf value type.
"""
raise NotImplementedError
def validate(self, value):
"""
Validate this property.
Should be overridden by subclasses to provide custom validation.
:raise: :exception:`~gcloudoem.queryset.exceptions.ValidationError`.
"""
pass
def _validate(self, value):
if self.choices:
choice_list = self.choices
if isinstance(self.choices[0], (list, tuple)):
choice_list = [k for k, v in self.choices]
if value not in choice_list:
self.error('Value must be one of %s' % six.text_type(choice_list))
if self.required and value is None:
self.error('Value is required.')
if value is not None:
self.validate(value)
def error(self, message="", errors=None, field_name=None):
"""Raises a ValidationError."""
field_name = field_name if field_name else self.name
raise ValidationError(message, errors=errors, field_name=field_name)
class ContainerBaseProperty(BaseProperty):
"""
A ContainerBaseProperty is designed for use with any property that is meant to be a container of other properties
(like a list for example). It handles correctly fetching things like ReferenceProperties etc.
Subclasses of this class must have a ``property`` attribute which contains the BaseProperty instance this class is a
container for.
"""
def __get__(self, instance, owner):
if not instance: # being called on entity calss
return self
value = super(ContainerBaseProperty, self).__get__(instance, owner)
if isinstance(value, (list, tuple)):
from ..properties import ReferenceProperty
if self.property and isinstance(self.property, ReferenceProperty):
from .. import Entity, Key
for i, k in enumerate(value):
if not isinstance(k, Entity):
value[i] = self.property.entity_cls.objects.get(pk=k.name_or_id)
setattr(instance, self.name, value) # cache any fetched entities
return value
|
{
"content_hash": "fa4dcc6c71c55c664bfa84a7acaa0dda",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 120,
"avg_line_length": 40.714285714285715,
"alnum_prop": 0.6446570972886763,
"repo_name": "Kapiche/gcloud-datastore-oem",
"id": "1ec6f62205736dca95595ff1e55a83d31a3c6216",
"size": "6349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcloudoem/base/properties.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "27693"
},
{
"name": "Python",
"bytes": "255564"
}
],
"symlink_target": ""
}
|
from SakaiPy import SakaiSession
class WebContent(object):
"""
Contains logic for the Sakai WebContent tool.
More information about the RESTful interface can be found at:
https://trunk-mysql.nightly.sakaiproject.org/webcontent/news/describe
"""
def __init__(self, sess):
"""
Create a standalone WebContent Object
:param sess: The Session to use.
:return: A WebContent object
"""
assert isinstance(sess, SakaiSession.SakaiSession)
self.session = sess
def getWebContentForSite(self, siteid):
"""
YOU GET A WEBCONTENT, YOU GET A WEBCONTENT, EVERYONE GETS A WEBCONTENT!
:param siteid: The siteid you wish to use.
:return: Webcontent information for the specified site.
"""
return self.requester.executeRequest('GET', '/webcontent/site/{0}.json'.format(siteid))
|
{
"content_hash": "26b81519154fba6b63b53bb412c4d00a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 95,
"avg_line_length": 33.111111111111114,
"alnum_prop": 0.656599552572707,
"repo_name": "willkara/SakaiPy",
"id": "aea0c192ad2d2f0ea412ba8f4d673516f3f0b9a0",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SakaiPy/SakaiTools/WebContent.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18179"
}
],
"symlink_target": ""
}
|
"""
An interactive, stateful AJAX shell that runs Python code on the server.
"""
from __future__ import print_function
from gluon._compat import ClassType, pickle, StringIO
import logging
import new
import sys
import traceback
import types
import threading
locker = threading.RLock()
# Set to True if stack traces should be shown in the browser, etc.
_DEBUG = True
# The entity kind for shell historys. Feel free to rename to suit your app.
_HISTORY_KIND = '_Shell_History'
# Types that can't be pickled.
UNPICKLABLE_TYPES = [
types.ModuleType,
type,
ClassType,
types.FunctionType,
]
# Unpicklable statements to seed new historys with.
INITIAL_UNPICKLABLES = [
'import logging',
'import os',
'import sys',
]
class History:
"""A shell history. Stores the history's globals.
Each history globals is stored in one of two places:
If the global is picklable, it's stored in the parallel globals and
global_names list properties. (They're parallel lists to work around the
unfortunate fact that the datastore can't store dictionaries natively.)
If the global is not picklable (e.g. modules, classes, and functions), or if
it was created by the same statement that created an unpicklable global,
it's not stored directly. Instead, the statement is stored in the
unpicklables list property. On each request, before executing the current
statement, the unpicklable statements are evaluated to recreate the
unpicklable globals.
The unpicklable_names property stores all of the names of globals that were
added by unpicklable statements. When we pickle and store the globals after
executing a statement, we skip the ones in unpicklable_names.
Using Text instead of string is an optimization. We don't query on any of
these properties, so they don't need to be indexed.
"""
global_names = []
globals = []
unpicklable_names = []
unpicklables = []
def set_global(self, name, value):
"""Adds a global, or updates it if it already exists.
Also removes the global from the list of unpicklable names.
Args:
name: the name of the global to remove
value: any picklable value
"""
blob = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
if name in self.global_names:
index = self.global_names.index(name)
self.globals[index] = blob
else:
self.global_names.append(name)
self.globals.append(blob)
self.remove_unpicklable_name(name)
def remove_global(self, name):
"""Removes a global, if it exists.
Args:
name: string, the name of the global to remove
"""
if name in self.global_names:
index = self.global_names.index(name)
del self.global_names[index]
del self.globals[index]
def globals_dict(self):
"""Returns a dictionary view of the globals.
"""
return dict((name, pickle.loads(val))
for name, val in zip(self.global_names, self.globals))
def add_unpicklable(self, statement, names):
"""Adds a statement and list of names to the unpicklables.
Also removes the names from the globals.
Args:
statement: string, the statement that created new unpicklable global(s).
names: list of strings; the names of the globals created by the statement.
"""
self.unpicklables.append(statement)
for name in names:
self.remove_global(name)
if name not in self.unpicklable_names:
self.unpicklable_names.append(name)
def remove_unpicklable_name(self, name):
"""Removes a name from the list of unpicklable names, if it exists.
Args:
name: string, the name of the unpicklable global to remove
"""
if name in self.unpicklable_names:
self.unpicklable_names.remove(name)
def represent(obj):
"""Returns a string representing the given object's value, which should allow the
code below to determine whether the object changes over time.
"""
try:
return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
except:
return repr(obj)
def run(history, statement, env={}):
"""
Evaluates a python statement in a given history and returns the result.
"""
history.unpicklables = INITIAL_UNPICKLABLES
# extract the statement to be run
if not statement:
return ''
# the python compiler doesn't like network line endings
statement = statement.replace('\r\n', '\n')
# add a couple newlines at the end of the statement. this makes
# single-line expressions such as 'class Foo: pass' evaluate happily.
statement += '\n\n'
# log and compile the statement up front
try:
logging.info('Compiling and evaluating:\n%s' % statement)
compiled = compile(statement, '<string>', 'single')
except:
return str(traceback.format_exc())
# create a dedicated module to be used as this statement's __main__
statement_module = new.module('__main__')
# use this request's __builtin__, since it changes on each request.
# this is needed for import statements, among other things.
import __builtin__
statement_module.__builtins__ = __builtin__
# load the history from the datastore
history = History()
# swap in our custom module for __main__. then unpickle the history
# globals, run the statement, and re-pickle the history globals, all
# inside it.
old_main = sys.modules.get('__main__')
output = StringIO()
try:
sys.modules['__main__'] = statement_module
statement_module.__name__ = '__main__'
statement_module.__dict__.update(env)
# re-evaluate the unpicklables
for code in history.unpicklables:
exec(code, statement_module.__dict__)
# re-initialize the globals
for name, val in history.globals_dict().items():
try:
statement_module.__dict__[name] = val
except:
msg = 'Dropping %s since it could not be unpickled.\n' % name
output.write(msg)
logging.warning(msg + traceback.format_exc())
history.remove_global(name)
# run!
old_globals = dict((key, represent(
value)) for key, value in statement_module.__dict__.items())
try:
old_stdout, old_stderr = sys.stdout, sys.stderr
try:
sys.stderr = sys.stdout = output
locker.acquire()
exec(compiled, statement_module.__dict__)
finally:
locker.release()
sys.stdout, sys.stderr = old_stdout, old_stderr
except:
output.write(str(traceback.format_exc()))
return output.getvalue()
# extract the new globals that this statement added
new_globals = {}
for name, val in statement_module.__dict__.items():
if name not in old_globals or represent(val) != old_globals[name]:
new_globals[name] = val
if True in [isinstance(val, tuple(UNPICKLABLE_TYPES))
for val in new_globals.values()]:
# this statement added an unpicklable global. store the statement and
# the names of all of the globals it added in the unpicklables.
history.add_unpicklable(statement, new_globals.keys())
logging.debug('Storing this statement as an unpicklable.')
else:
# this statement didn't add any unpicklables. pickle and store the
# new globals back into the datastore.
for name, val in new_globals.items():
if not name.startswith('__'):
try:
history.set_global(name, val)
except (TypeError, pickle.PicklingError) as ex:
UNPICKLABLE_TYPES.append(type(val))
history.add_unpicklable(statement, new_globals.keys())
finally:
sys.modules['__main__'] = old_main
return output.getvalue()
if __name__ == '__main__':
history = History()
while True:
print(run(history, raw_input('>>> ')).rstrip())
|
{
"content_hash": "709e4312a7e0120674a2dce9897b8894",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 86,
"avg_line_length": 34.611570247933884,
"alnum_prop": 0.6224928366762178,
"repo_name": "xiang12835/python_web",
"id": "638d3c81864cb26451671434bc41069dbb9445c4",
"size": "9540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py2_web2py/web2py/gluon/contrib/shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3341"
},
{
"name": "Python",
"bytes": "17420"
}
],
"symlink_target": ""
}
|
from django.db.models.signals import pre_save
from django.db import models
from django.utils.translation import ugettext_lazy as _
from social_graph.signals import object_visited
from content_interactions.signals import (
item_liked,
item_disliked,
item_rated,
item_rate_modified,
item_marked_as_favorite,
item_unmarked_as_favorite,
item_shared,
item_denounced,
item_denounce_removed,
item_commented,
item_comment_removed
)
from handlers import (
like_handler,
dislike_handler,
new_rating_handler,
updated_rating_handler,
update_cached_rating,
favorite_mark_handler,
favorite_unmark_handler,
share_handler,
denounce_handler,
denounce_remove_handler,
comment_handler,
comment_deleted_handler,
visit_handler
)
class BaseProcessor(object):
fields = None
pre_save_handlers = None
handlers = None
def __init__(self, stats_clazz):
super(BaseProcessor, self).__init__()
for name, field in self.get_fields():
field.contribute_to_class(stats_clazz, name)
pre_save_handlers = self.get_pre_save_handlers()
if pre_save_handlers:
for handler_code, pre_save_handler in pre_save_handlers:
pre_save.connect(pre_save_handler, sender=stats_clazz, dispatch_uid='%s' % handler_code)
handlers = self.get_handlers()
if handlers:
for handler_code, signal, handler in handlers:
signal.connect(handler, dispatch_uid='%s_process' % handler_code)
self.stats_clazz = stats_clazz
def get_fields(self):
assert self.fields, "No fields defined for this stats processor."
return self.fields
def get_pre_save_handlers(self):
return self.pre_save_handlers
def get_handlers(self):
return self.handlers
class Likes(BaseProcessor):
fields = (
('likes', models.IntegerField(default=0, verbose_name=_('Likes'))),
)
handlers = (
('item_liked', item_liked, like_handler),
('item_disliked', item_disliked, dislike_handler),
)
class Ratings(BaseProcessor):
fields = (
('ratings', models.IntegerField(default=0, verbose_name=_('Ratings'))),
('rating_5_count', models.IntegerField(default=0, verbose_name=_('Ratings of 5'))),
('rating_4_count', models.IntegerField(default=0, verbose_name=_('Ratings of 4'))),
('rating_3_count', models.IntegerField(default=0, verbose_name=_('Ratings of 3'))),
('rating_2_count', models.IntegerField(default=0, verbose_name=_('Ratings of 2'))),
('rating_1_count', models.IntegerField(default=0, verbose_name=_('Ratings of 1'))),
('rating', models.DecimalField(default=0, decimal_places=1, max_digits=2, verbose_name=_('Average Rating'))),
)
pre_save_handlers = (
('update_cached_rating', update_cached_rating),
)
handlers = (
('item_rated', item_rated, new_rating_handler),
('item_rate_modified', item_rate_modified, updated_rating_handler),
)
class FavoriteMarks(BaseProcessor):
fields = (
('favorite_marks', models.IntegerField(default=0, verbose_name=_('Favorite Marks'))),
)
handlers = (
('item_marked_favorite', item_marked_as_favorite, favorite_mark_handler),
('item_unmarked_favorite', item_unmarked_as_favorite, favorite_unmark_handler),
)
class Shares(BaseProcessor):
fields = (
('shares', models.IntegerField(default=0, verbose_name=_('Shares'))),
)
handlers = (
('item_shared', item_shared, share_handler),
)
class Denounces(BaseProcessor):
fields = (
('denounces', models.IntegerField(default=0, verbose_name=_('Denounces'))),
)
handlers = (
('item_denounced', item_denounced, denounce_handler),
('item_denounce_removed', item_denounce_removed, denounce_remove_handler),
)
class Comments(BaseProcessor):
fields = (
('comments', models.IntegerField(default=0, verbose_name=_('comments'))),
)
handlers = (
('item_commented', item_commented, comment_handler),
('item_comment_removed', item_comment_removed, comment_deleted_handler),
)
class Visits(BaseProcessor):
fields = (
('visits', models.IntegerField(default=0, verbose_name=_('Visits'))),
)
handlers = (
('item_visited', object_visited, visit_handler),
)
|
{
"content_hash": "5356f9afda13e2a93c20edefda57073e",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 117,
"avg_line_length": 31.899280575539567,
"alnum_prop": 0.6409562471808751,
"repo_name": "suselrd/django-content-interactions",
"id": "80315cdf6bab47e47558d24d8194777aac0bf6af",
"size": "4449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "content_interactions_stats/processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4440"
},
{
"name": "JavaScript",
"bytes": "4671"
},
{
"name": "Python",
"bytes": "131299"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
import numba.unittest_support as unittest
from numba import jit, testing
class TestClosure(unittest.TestCase):
def run_jit_closure_variable(self, **jitargs):
Y = 10
def add_Y(x):
return x + Y
c_add_Y = jit('i4(i4)', **jitargs)(add_Y)
self.assertEqual(c_add_Y(1), 11)
# Like globals in Numba, the value of the closure is captured
# at time of JIT
Y = 12 # should not affect function
self.assertEqual(c_add_Y(1), 11)
def test_jit_closure_variable(self):
self.run_jit_closure_variable(forceobj=True)
def test_jit_closure_variable_npm(self):
self.run_jit_closure_variable(nopython=True)
def run_rejitting_closure(self, **jitargs):
Y = 10
def add_Y(x):
return x + Y
c_add_Y = jit('i4(i4)', **jitargs)(add_Y)
self.assertEqual(c_add_Y(1), 11)
# Redo the jit
Y = 12
c_add_Y_2 = jit('i4(i4)', **jitargs)(add_Y)
self.assertEqual(c_add_Y_2(1), 13)
Y = 13 # should not affect function
self.assertEqual(c_add_Y_2(1), 13)
self.assertEqual(c_add_Y(1), 11) # Test first function again
def test_rejitting_closure(self):
self.run_rejitting_closure(forceobj=True)
def test_rejitting_closure_npm(self):
self.run_rejitting_closure(nopython=True)
def run_jit_multiple_closure_variables(self, **jitargs):
Y = 10
Z = 2
def add_Y_mult_Z(x):
return (x + Y) * Z
c_add_Y_mult_Z = jit('i4(i4)', **jitargs)(add_Y_mult_Z)
self.assertEqual(c_add_Y_mult_Z(1), 22)
def test_jit_multiple_closure_variables(self):
self.run_jit_multiple_closure_variables(forceobj=True)
def test_jit_multiple_closure_variables_npm(self):
self.run_jit_multiple_closure_variables(nopython=True)
def run_jit_inner_function(self, **jitargs):
def mult_10(a):
return a * 10
c_mult_10 = jit('intp(intp)', **jitargs)(mult_10)
c_mult_10.disable_compile()
def do_math(x):
return c_mult_10(x + 4)
c_do_math = jit('intp(intp)', **jitargs)(do_math)
c_do_math.disable_compile()
old_refcts = sys.getrefcount(c_do_math), sys.getrefcount(c_mult_10)
self.assertEqual(c_do_math(1), 50)
self.assertEqual(old_refcts,
(sys.getrefcount(c_do_math), sys.getrefcount(c_mult_10)))
def test_jit_inner_function(self):
self.run_jit_inner_function(forceobj=True)
def test_jit_inner_function_npm(self):
self.run_jit_inner_function(nopython=True)
@testing.allow_interpreter_mode
def test_return_closure(self):
def outer(x):
def inner():
return x + 1
return inner
cfunc = jit(outer)
self.assertEqual(cfunc(10)(), outer(10)())
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9298777b065f90689abacf6b177e5d0e",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 82,
"avg_line_length": 27.814814814814813,
"alnum_prop": 0.5855525965379494,
"repo_name": "GaZ3ll3/numba",
"id": "6439b6ee83064118c28dc18e52e4eabd12dbef81",
"size": "3004",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numba/tests/test_closure.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "228078"
},
{
"name": "C++",
"bytes": "18847"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "3075839"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
"""
Support for Rain Bird Irrigation system LNK WiFi Module.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/sensor.rainbird/
"""
import logging
import voluptuous as vol
from homeassistant.components.rainbird import DATA_RAINBIRD
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
DEPENDENCIES = ['rainbird']
_LOGGER = logging.getLogger(__name__)
# sensor_type [ description, unit, icon ]
SENSOR_TYPES = {
'rainsensor': ['Rainsensor', None, 'mdi:water']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up a Rain Bird sensor."""
controller = hass.data[DATA_RAINBIRD]
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
sensors.append(
RainBirdSensor(controller, sensor_type))
add_devices(sensors, True)
class RainBirdSensor(Entity):
"""A sensor implementation for Rain Bird device."""
def __init__(self, controller, sensor_type):
"""Initialize the Rain Bird sensor."""
self._sensor_type = sensor_type
self._controller = controller
self._name = SENSOR_TYPES[self._sensor_type][0]
self._icon = SENSOR_TYPES[self._sensor_type][2]
self._unit_of_measurement = SENSOR_TYPES[self._sensor_type][1]
self._state = None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Updating sensor: %s", self._name)
if self._sensor_type == 'rainsensor':
self._state = self._controller.currentRainSensorState()
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit_of_measurement
@property
def icon(self):
"""Return icon."""
return self._icon
|
{
"content_hash": "72d215f2f05d3667cac1d3dcf9cadadc",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 75,
"avg_line_length": 29.7625,
"alnum_prop": 0.6707265854682907,
"repo_name": "ewandor/home-assistant",
"id": "875e9c37bd3206974ab61801c8a621aa0c2e2c3a",
"size": "2381",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/rainbird.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8860790"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12639"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import uuid
from django.conf import settings
from django.core.paginator import Paginator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from wagtail_embed_videos.edit_handlers import EmbedVideoChooserPanel
from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore import blocks
from wagtail.wagtailcore.fields import RichTextField, StreamField
from wagtail.wagtailimages.blocks import ImageChooserBlock
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailimages.models import Image
from wagtail.wagtailsearch import index
from premium.models import PremiumGrant, RewardNode
from premium.wagtail.models import PremiumPage
@python_2_unicode_compatible
class PatreonReward(models.Model):
id = models.UUIDField(primary_key = True, default = uuid.uuid4, editable = False)
created = models.DateTimeField(auto_now_add = True, null = False, blank = False)
modified = models.DateTimeField(auto_now = True, null = False, blank = False)
reward = models.ForeignKey(PremiumGrant, on_delete = models.CASCADE,
null = False, blank = False)
node = models.ForeignKey(RewardNode, on_delete = models.CASCADE,
null = False, blank = False)
def __str__(self):
return str(self.id)
class Index(Page):
parent_page_types = [Page]
image = models.ForeignKey(
'wagtailimages.Image',
null = True,
blank = True,
on_delete = models.SET_NULL,
related_name = '+'
)
content_panels = PremiumPage.content_panels + [ImageChooserPanel('image'),]
def serve(self, request):
live_pages = self.get_children().live().specific().order_by('-first_published_at')
paginator = Paginator(live_pages, getattr(settings, 'EXAMPLE_ITEMS_ON_INDEX_PAGE', 10))
if 'page' in request.GET:
try:
page_number = int(request.GET['page'])
except ValueError:
page_number = 1
if page_number not in paginator.page_range:
page_number = 1
else:
page_number = 1
# TODO: Remove this check. Once in production num_pages will never be 0
if paginator.num_pages:
request.page = paginator.page(page_number)
return super(Index, self).serve(request)
class GeneralAnnouncement(PremiumPage):
subpage_types = []
image = models.ForeignKey(
'wagtailimages.Image',
null = True,
blank = True,
on_delete = models.SET_NULL,
related_name = '+'
)
body = StreamField([
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
])
content_panels = PremiumPage.content_panels + [
ImageChooserPanel('image'), StreamFieldPanel('body'),
]
search_fields = PremiumPage.search_fields + [
index.SearchField('body'),
]
class VideoAnnouncement(PremiumPage):
subpage_types = []
image = models.ForeignKey(
'wagtailimages.Image',
null = True,
blank = True,
on_delete = models.SET_NULL,
related_name = '+'
)
video = models.ForeignKey(
'wagtail_embed_videos.EmbedVideo',
verbose_name = "Video",
null = True,
blank = True,
on_delete = models.SET_NULL,
related_name='+'
)
body = RichTextField()
content_panels = PremiumPage.content_panels + [
EmbedVideoChooserPanel('video'),
ImageChooserPanel('image'),
FieldPanel('body'),
]
search_fields = PremiumPage.search_fields + [
index.SearchField('body'),
]
|
{
"content_hash": "af9279feabb5e280d8cab9e683ee3e32",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 89,
"avg_line_length": 27.479674796747968,
"alnum_prop": 0.7304733727810651,
"repo_name": "PaulWGraham/ExampleSite",
"id": "7d4d64f5634bc0b17d4b3cdb97bf8239f971fddb",
"size": "3380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brogue/example/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "47808"
},
{
"name": "Python",
"bytes": "35513"
}
],
"symlink_target": ""
}
|
def main(request, response):
if request.method == "OPTIONS":
response.headers.set(b"Access-Control-Allow-Origin", b"*")
response.headers.set(b"Access-Control-Allow-Headers", b"*")
response.status = 200
elif request.method == "GET":
response.headers.set(b"Access-Control-Allow-Origin", b"*")
if request.headers.get(b"X-Test"):
response.headers.set(b"Content-Type", b"text/plain")
response.content = b"PASS"
else:
response.status = 400
|
{
"content_hash": "c0d648879025d1e5985d6caa5adfcbc1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 67,
"avg_line_length": 43.833333333333336,
"alnum_prop": 0.6083650190114068,
"repo_name": "scheib/chromium",
"id": "87d36160dffaa6401c95b5bfcac38506f2273ef5",
"size": "526",
"binary": false,
"copies": "21",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/external/wpt/xhr/resources/access-control-preflight-request-allow-headers-returns-star.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from uuid import uuid4
from preggy import expect
from tornado import gen
from level.config import Config
from level.extensions.pubsub.redis import PubSub
from tests.unit.base import TestCase, async_case
class RedisPubSubTestCase(TestCase):
def setUp(self):
self.config = Config(
REDIS_HOST='localhost',
REDIS_PORT=4448,
REDIS_DATABASE=0,
REDIS_PASSWORD=None,
)
def test_can_create_instance(self):
ps = PubSub(self.config)
expect(ps).not_to_be_null()
def test_can_define_defaults(self):
conf = Config()
ps = PubSub(conf)
ps.define_configuration_defaults()
expect(conf.REDIS_HOST).to_equal('localhost')
expect(conf.REDIS_PORT).to_equal(6379)
@async_case
async def test_can_initialize(self):
ps = PubSub(self.config)
await ps.initialize()
expect(ps.redis).not_to_be_null()
@async_case
async def test_can_cleanup(self):
ps = PubSub(self.config)
await ps.initialize()
expect(ps.redis).not_to_be_null()
await ps.cleanup()
expect(ps.redis_closed).to_be_true()
expect(ps.redis).to_be_null()
expect(ps.subs_closed).to_be_true()
expect(ps.subs).to_be_null()
await ps.cleanup()
expect(ps.redis_closed).to_be_true()
expect(ps.subs_closed).to_be_true()
@async_case
async def test_can_subscribe_to_channel(self):
ps = PubSub(self.config)
await ps.initialize()
expect(ps.redis).not_to_be_null()
received = {}
def on_message(chan, msg):
received[chan] = msg
chan = str(uuid4())
await ps.subscribe(chan, on_message)
await ps.publish(chan, 'qwe')
while chan not in received:
await gen.sleep(0.001)
expect(received).to_include(chan)
expect(received[chan]).to_be_like('qwe')
@async_case
async def test_can_subscribe_to_channel_twice(self):
ps = PubSub(self.config)
await ps.initialize()
expect(ps.redis).not_to_be_null()
received = {}
def on_message(chan, msg):
received[chan] = msg
chan = str(uuid4())
await ps.subscribe(chan, lambda chan, msg: '')
await ps.subscribe(chan, on_message)
await ps.publish(chan, 'qwe')
while chan not in received:
await gen.sleep(0.001)
expect(received).to_include(chan)
expect(received[chan]).to_be_like('qwe')
@async_case
async def test_redis_methods(self):
ps = PubSub(self.config)
await ps.initialize()
await ps.set('key', 'qwe')
res = await ps.get('key')
expect(res).to_equal('qwe')
|
{
"content_hash": "9945e8f427434215bee30ab4cc1b3af1",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 56,
"avg_line_length": 26.245283018867923,
"alnum_prop": 0.5880661394680087,
"repo_name": "heynemann/level",
"id": "daeeb0997960facebe37486bb227ff22d06242b5",
"size": "3044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/extensions/test_pubsub_redis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "59409"
},
{
"name": "Makefile",
"bytes": "1507"
},
{
"name": "Objective-C",
"bytes": "284053"
},
{
"name": "Objective-C++",
"bytes": "30603"
},
{
"name": "Python",
"bytes": "44683"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import re
import copy
import xml.etree.ElementTree as ET
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils import get_http_data, is_py2_old
from svtplay_dl.log import log
from svtplay_dl.fetcher.rtmp import RTMP
class Qbrick(Service, OpenGraphThumbMixin):
supported_domains = ['di.se']
def get(self, options):
error, data = self.get_urldata()
if error:
log.error("Can't get the page")
return
if self.exclude(options):
return
if re.findall(r"di.se", self.url):
match = re.search("src=\"(http://qstream.*)\"></iframe", data)
if not match:
log.error("Can't find video info for: %s", self.url)
return
error, data = get_http_data(match.group(1))
match = re.search(r"data-qbrick-ccid=\"([0-9A-Z]+)\"", data)
if not match:
log.error("Can't find video file for: %s", self.url)
return
host = "http://vms.api.qbrick.com/rest/v3/getplayer/%s" % match.group(1)
else:
log.error("Can't find any info for %s", self.url)
return
error, data = get_http_data(host)
xml = ET.XML(data)
try:
url = xml.find("media").find("item").find("playlist").find("stream").find("format").find("substream").text
except AttributeError:
log.error("Can't find video file")
return
live = xml.find("media").find("item").find("playlist").find("stream").attrib["isLive"]
if live == "true":
options.live = True
error, data = get_http_data(url)
xml = ET.XML(data)
server = xml.find("head").find("meta").attrib["base"]
streams = xml.find("body").find("switch")
if is_py2_old:
sa = list(streams.getiterator("video"))
else:
sa = list(streams.iter("video"))
for i in sa:
options.other = "-y '%s'" % i.attrib["src"]
yield RTMP(copy.copy(options), server, i.attrib["system-bitrate"])
|
{
"content_hash": "0465c01063d445af77bc0f932a7650d7",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 118,
"avg_line_length": 36.54237288135593,
"alnum_prop": 0.5584415584415584,
"repo_name": "OakNinja/svtplay-dl",
"id": "f61e39a358aea2e00106da61074cb181c65855bb",
"size": "2245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/svtplay_dl/service/qbrick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3731"
},
{
"name": "Perl",
"bytes": "4077"
},
{
"name": "Python",
"bytes": "154413"
},
{
"name": "Shell",
"bytes": "1995"
}
],
"symlink_target": ""
}
|
"""
WSGI config for testing project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
import envvars
envvars_file = os.path.join('env')
if os.path.exists(envvars_file):
envvars.load(envvars_file)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "77b2401db4b9f54284009bf816b65909",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 24.22222222222222,
"alnum_prop": 0.7591743119266054,
"repo_name": "mattseymour/django-skeleton",
"id": "7d214410fed86eb278318916482abb88c17165d0",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3171"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.economics import CurrencyType
log = logging.getLogger(__name__)
class TestCurrencyType(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_currencytype(self):
pyidf.validation_level = ValidationLevel.error
obj = CurrencyType()
# alpha
var_monetary_unit = "USD"
obj.monetary_unit = var_monetary_unit
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.currencytypes[0].monetary_unit, var_monetary_unit)
|
{
"content_hash": "95c5fe90166ebb421ac3a302ca520df7",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 80,
"avg_line_length": 23.789473684210527,
"alnum_prop": 0.6438053097345132,
"repo_name": "rbuffat/pyidf",
"id": "139cfa28ac0b9a76d5c8cb96e0abcd7947a1b28f",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_currencytype.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22271673"
}
],
"symlink_target": ""
}
|
"""Implements admin interface for page tags content"""
from django.contrib import admin
from pages.conf import settings
if settings.PAGES_PAGE_USE_EXT_CONTENT_TYPES:
from pagesext.models.pagetagscontent import PageTagsContent
class PageTagsContentAdmin(admin.ModelAdmin):
list_display = ('__str__', 'created_by', 'updated_by', 'date_created', 'date_updated',)
list_display_links = ['__str__']
fieldsets = [
(None, {'fields': [
('language', ),
('name', ),
('tags', ),
('comment', ),
]}),
]
exclude = ('type', 'sid', 'is_extended', 'created_by', 'updated_by',)
save_on_top = True
actions_on_bottom = True
admin.site.register(PageTagsContent, PageTagsContentAdmin)
class PageTagsContentInline(admin.StackedInline):
model = PageTagsContent
extra = 1
exclude = ('sid', 'is_extended', 'created_by', 'updated_by', 'date_created', 'date_updated',)
fieldsets = [
(None, {'fields': [
('language', ),
('name', ),
('tags', ),
('comment', ),
]}),
]
|
{
"content_hash": "19f5ce98a4e35df6ebe394a37e0ddb83",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 101,
"avg_line_length": 31.564102564102566,
"alnum_prop": 0.5207148659626321,
"repo_name": "dlancer/django-pages-cms-extensions",
"id": "b0a0a937c45e12679955cac928b061cef6770e35",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pagesext/admin/pagetagscontent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4913"
},
{
"name": "Python",
"bytes": "45531"
}
],
"symlink_target": ""
}
|
""" Creating multimodel predictions
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_model_steps as model_create
from . import create_ensemble_steps as ensemble_create
from . import create_prediction_steps as prediction_create
from . import compare_predictions_steps as compare_pred
class TestMultimodelPrediction(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a prediction from a multi model:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model with "<params>"
And I wait until the model is ready less than <time_3> secs
And I create a model with "<params>"
And I wait until the model is ready less than <time_3> secs
And I create a model with "<params>"
And I wait until the model is ready less than <time_3> secs
And I retrieve a list of remote models tagged with "<tag>"
And I create a local multi model
When I create a local prediction for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | params | tag | data_input | prediction |
| ../data/iris.csv | 10 | 10 | 10 | {"tags":["mytag"]} | mytag | {"petal width": 0.5} | Iris-setosa |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '10', '10', '10', '{"tags":["mytag"]}', 'mytag', '{"petal width": 0.5}', 'Iris-setosa']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model_with(self, example[4])
model_create.the_model_is_finished_in_less_than(self, example[3])
model_create.i_create_a_model_with(self, example[4])
model_create.the_model_is_finished_in_less_than(self, example[3])
model_create.i_create_a_model_with(self, example[4])
model_create.the_model_is_finished_in_less_than(self, example[3])
compare_pred.i_retrieve_a_list_of_remote_models(self, example[5])
compare_pred.i_create_a_local_multi_model(self)
compare_pred.i_create_a_local_prediction(self, example[6])
compare_pred.the_local_prediction_is(self, example[7])
def test_scenario2(self):
"""
Scenario: Successfully creating a local batch prediction from a multi model:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model with "<params>"
And I wait until the model is ready less than <time_3> secs
And I create a model with "<params>"
And I wait until the model is ready less than <time_3> secs
And I create a model with "<params>"
And I wait until the model is ready less than <time_3> secs
And I retrieve a list of remote models tagged with "<tag>"
And I create a local multi model
When I create a batch multimodel prediction for "<data_inputs>"
Then the predictions are "<predictions>"
Examples:
| data | time_1 | time_2 | time_3 | params | tag | data_inputs | predictions |
| ../data/iris.csv | 10 | 10 | 10 | {"tags":["mytag"]} | mytag | [{"petal width": 0.5}, {"petal length": 6, "petal width": 2}] | ["Iris-setosa", "Iris-virginica"] |
"""
print(self.test_scenario2.__doc__)
examples = [
['data/iris.csv', '10', '10', '10', '{"tags":["mytag"]}', 'mytag', '[{"petal width": 0.5}, {"petal length": 6, "petal width": 2}]', '["Iris-setosa", "Iris-virginica"]']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model_with(self, example[4])
model_create.the_model_is_finished_in_less_than(self, example[3])
model_create.i_create_a_model_with(self, example[4])
model_create.the_model_is_finished_in_less_than(self, example[3])
model_create.i_create_a_model_with(self, example[4])
model_create.the_model_is_finished_in_less_than(self, example[3])
compare_pred.i_retrieve_a_list_of_remote_models(self, example[5])
compare_pred.i_create_a_local_multi_model(self)
compare_pred.i_create_a_batch_prediction_from_a_multi_model(self, example[6])
compare_pred.the_batch_mm_predictions_are(self, example[7])
|
{
"content_hash": "2c30615a23398c353bb7ee807569c673",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 194,
"avg_line_length": 55.825688073394495,
"alnum_prop": 0.5704190632703369,
"repo_name": "mmerce/python",
"id": "d8e404c332e7290161c2f6d12b14c814978fff5a",
"size": "6687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigml/tests/test_11_multimodel_prediction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1531559"
}
],
"symlink_target": ""
}
|
from tests import SpecTestSuite
from marko import Markdown
from marko.ext.gfm import gfm
class TestCommonMark(SpecTestSuite):
@classmethod
def setup_class(cls):
cls.markdown = Markdown()
def test_greedy_consume_prefix(self):
md = "> 1. Item 1\n> ```code\n> indented code\n> ```"
html = (
'<blockquote><ol><li>Item 1<pre><code class="language-code">'
" indented code\n</code></pre></li></ol></blockquote>"
)
self.assert_case(md, html)
def test_parse_nbsp_no_crash(self):
md = "- \xa0A"
html = "<ul>\n<li>A</li>\n</ul>"
self.assert_case(md, html)
TestCommonMark.load_spec("commonmark")
GFM_IGNORE = ["autolinks_015", "autolinks_018", "autolinks_019"]
class TestGFM(SpecTestSuite):
@classmethod
def setup_class(cls):
cls.markdown = gfm
@classmethod
def ignore_case(cls, n):
return n in GFM_IGNORE
def test_parse_table_with_backslashes(self):
md = "\\\n\n| \\ |\n| - |\n| \\ |"
html = "<p>\\</p><table><thead><tr><th>\\</th></tr></thead><tbody><tr><td>\\</td></tr></tbody></table>"
self.assert_case(md, html)
TestGFM.load_spec("gfm")
|
{
"content_hash": "d14095d5fb4392bcc3fa5248a857919c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 111,
"avg_line_length": 27.133333333333333,
"alnum_prop": 0.5733005733005733,
"repo_name": "frostming/marko",
"id": "6a67e28415fcca25cf25ce0a007adfd0c02e53f7",
"size": "1221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138898"
},
{
"name": "Shell",
"bytes": "483"
}
],
"symlink_target": ""
}
|
"""Configure the tests for :mod:`astropy.cosmology`."""
from astropy.cosmology.tests.helper import clean_registry # noqa: F401, F403
from astropy.tests.helper import pickle_protocol # noqa: F401, F403
|
{
"content_hash": "566d7866b6a69a5157b4fc687ccb047b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 77,
"avg_line_length": 51,
"alnum_prop": 0.7598039215686274,
"repo_name": "StuartLittlefair/astropy",
"id": "aefa9a5da9ca6ce95d4646fea020b19298e65527",
"size": "269",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "astropy/cosmology/tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11034753"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78631"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52457"
},
{
"name": "Python",
"bytes": "12224600"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest
import mock
from nose.tools import assert_false
import mkt
import mkt.site.tests
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
from .acl import (action_allowed, check_webapp_ownership, check_ownership,
check_reviewer, match_rules)
class ACLTestCase(mkt.site.tests.TestCase):
"""Test basic ACL by going to a locked page."""
def test_match_rules(self):
"""
Unit tests for the match_rules method.
"""
rules = (
'*:*',
'Admin:%',
'Admin:*',
'Admin:Foo',
'Apps:Edit,Admin:*',
'Apps:Edit,Localizer:*,Admin:*',
)
for rule in rules:
assert match_rules(rule, 'Admin', '%'), '%s != Admin:%%' % rule
rules = (
'Stats:View',
'Apps:Edit',
'None:None',
)
for rule in rules:
assert not match_rules(rule, 'Admin', '%'), (
"%s == Admin:%% and shouldn't" % rule)
def test_anonymous_user(self):
# Fake request must not have .groups, just like an anonymous user.
fake_request = HttpRequest()
assert_false(action_allowed(fake_request, 'Admin', '%'))
def test_admin_login_anon(self):
# Login form for anonymous user on the admin page.
url = '/reviewers/'
r = self.client.get(url)
self.assertLoginRedirects(r, url)
class TestHasPerm(mkt.site.tests.TestCase):
fixtures = fixture('group_admin', 'user_999', 'user_admin',
'user_admin_group', 'webapp_337141')
def setUp(self):
self.user = UserProfile.objects.get(pk=999)
self.app = Webapp.objects.get(pk=337141)
self.app.webappuser_set.create(user=self.user)
self.request = mock.Mock()
self.request.groups = ()
self.request.user = self.user
def login_admin(self):
user = UserProfile.objects.get(email='admin@mozilla.com')
self.login(user)
return user
def test_anonymous(self):
self.request.user = AnonymousUser()
self.client.logout()
assert not check_webapp_ownership(self.request, self.app)
def test_admin(self):
self.request.user = self.login_admin()
self.request.groups = self.request.user.groups.all()
assert check_webapp_ownership(self.request, self.app)
assert check_webapp_ownership(self.request, self.app, admin=True)
assert not check_webapp_ownership(self.request, self.app, admin=False)
def test_require_author(self):
self.login(self.user)
assert check_ownership(self.request, self.app, require_author=True)
def test_require_author_when_admin(self):
self.login(self.user)
self.request.user = self.login_admin()
self.request.groups = self.request.user.groups.all()
assert check_ownership(self.request, self.app, require_author=False)
assert not check_ownership(self.request, self.app,
require_author=True)
def test_disabled(self):
self.login(self.user)
self.app.update(status=mkt.STATUS_DISABLED)
assert not check_webapp_ownership(self.request, self.app)
self.test_admin()
def test_deleted(self):
self.login(self.user)
self.app.update(status=mkt.STATUS_DELETED)
assert not check_webapp_ownership(self.request, self.app)
self.request.user = self.login_admin()
self.request.groups = self.request.user.groups.all()
assert not check_webapp_ownership(self.request, self.app)
def test_ignore_disabled(self):
self.login(self.user)
self.app.update(status=mkt.STATUS_DISABLED)
assert check_webapp_ownership(self.request, self.app,
ignore_disabled=True)
def test_owner(self):
self.login(self.user)
assert check_webapp_ownership(self.request, self.app)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_DEV)
assert not check_webapp_ownership(self.request, self.app)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_VIEWER)
assert not check_webapp_ownership(self.request, self.app)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_SUPPORT)
assert not check_webapp_ownership(self.request, self.app)
def test_dev(self):
self.login(self.user)
assert check_webapp_ownership(self.request, self.app, dev=True)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_DEV)
assert check_webapp_ownership(self.request, self.app, dev=True)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_VIEWER)
assert not check_webapp_ownership(self.request, self.app, dev=True)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_SUPPORT)
assert not check_webapp_ownership(self.request, self.app, dev=True)
def test_viewer(self):
self.login(self.user)
assert check_webapp_ownership(self.request, self.app, viewer=True)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_DEV)
assert check_webapp_ownership(self.request, self.app, viewer=True)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_VIEWER)
assert check_webapp_ownership(self.request, self.app, viewer=True)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_SUPPORT)
assert check_webapp_ownership(self.request, self.app, viewer=True)
def test_support(self):
self.login(self.user)
assert check_webapp_ownership(self.request, self.app, viewer=True)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_DEV)
assert not check_webapp_ownership(self.request, self.app,
support=True)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_VIEWER)
assert not check_webapp_ownership(self.request, self.app,
support=True)
self.app.webappuser_set.update(role=mkt.AUTHOR_ROLE_SUPPORT)
assert check_webapp_ownership(self.request, self.app, support=True)
class TestCheckReviewer(mkt.site.tests.TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.user = UserProfile.objects.get(pk=999)
def test_no_perm(self):
req = mkt.site.tests.req_factory_factory('noop', user=self.user)
assert not check_reviewer(req)
def test_perm_apps(self):
self.grant_permission(self.user, 'Apps:Review')
req = mkt.site.tests.req_factory_factory('noop', user=self.user)
assert check_reviewer(req)
|
{
"content_hash": "15e94d5564204aec629198d54442a49e",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 78,
"avg_line_length": 35.78947368421053,
"alnum_prop": 0.6364705882352941,
"repo_name": "shahbaz17/zamboni",
"id": "3d8266011a05437596ef38d168d9b8cff966feef",
"size": "6800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/access/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357511"
},
{
"name": "HTML",
"bytes": "2331440"
},
{
"name": "JavaScript",
"bytes": "536153"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4400945"
},
{
"name": "Shell",
"bytes": "11200"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
}
|
class Solution:
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
levels = []
if not root:
return levels
def helper(node, level):
# Starting a new level
if len(levels) == level:
levels.append([])
# Append the node to its appropriate level
levels[level].append(node.val)
if node.left:
helper(node.left, level+1)
if node.right:
helper(node.right, level+1)
helper(root, 0)
return levels[::-1]
# BFS
# class Solution:
# def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
# res = []
# temp = [root]
# while temp:
# row, nextTemp = [], []
# for node in temp:
# if node:
# row.append(node.val)
# nextTemp.append(node.left)
# nextTemp.append(node.right)
# if row:
# res.append(row)
# temp = nextTemp
# return res[::-1]
|
{
"content_hash": "9331cb116f1e26770f1e8f407bfd4d4d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 68,
"avg_line_length": 30.37837837837838,
"alnum_prop": 0.44928825622775803,
"repo_name": "saisankargochhayat/algo_quest",
"id": "d7ce77084f6bf5dff6f80b260ca5d4b2e417e6a1",
"size": "1334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/107. Binary Tree Level Order Traversal II/soln.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "405"
},
{
"name": "C++",
"bytes": "9149"
},
{
"name": "HTML",
"bytes": "1679"
},
{
"name": "Java",
"bytes": "3648"
},
{
"name": "JavaScript",
"bytes": "786"
},
{
"name": "Python",
"bytes": "248621"
},
{
"name": "Ruby",
"bytes": "2761"
},
{
"name": "Shell",
"bytes": "610"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'tweetnotes.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('main_site.urls')),
# url(r'^social/', include(
# 'socialregistration.urls',
# namespace='socialregistration')),
)
|
{
"content_hash": "cdfaa851caef8e76af820254ad9994ed",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 31.352941176470587,
"alnum_prop": 0.626641651031895,
"repo_name": "3quarterstack/tweetnotes",
"id": "db71caa7f05759938c4494f0bece28bb98ba3d28",
"size": "533",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tweetnotes/tweetnotes/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6993"
}
],
"symlink_target": ""
}
|
from .resource import Resource
class LocalNetworkGateway(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param local_network_address_space: Local network site address space.
:type local_network_address_space:
~azure.mgmt.network.v2017_11_01.models.AddressSpace
:param gateway_ip_address: IP address of local network gateway.
:type gateway_ip_address: str
:param bgp_settings: Local network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2017_11_01.models.BgpSettings
:param resource_guid: The resource GUID property of the
LocalNetworkGateway resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting',
and 'Failed'.
:vartype provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'local_network_address_space': {'key': 'properties.localNetworkAddressSpace', 'type': 'AddressSpace'},
'gateway_ip_address': {'key': 'properties.gatewayIpAddress', 'type': 'str'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, local_network_address_space=None, gateway_ip_address=None, bgp_settings=None, resource_guid=None, etag=None):
super(LocalNetworkGateway, self).__init__(id=id, location=location, tags=tags)
self.local_network_address_space = local_network_address_space
self.gateway_ip_address = gateway_ip_address
self.bgp_settings = bgp_settings
self.resource_guid = resource_guid
self.provisioning_state = None
self.etag = etag
|
{
"content_hash": "6eb8f79385ec88c8c01d5efd80c8c983",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 167,
"avg_line_length": 42.53030303030303,
"alnum_prop": 0.6441040256501603,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "235072053cb5dc2992b422e96dcd672ae2a0669f",
"size": "3281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/local_network_gateway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from django.utils.html import escape
from djblets.markdown import markdown_escape, markdown_unescape
from djblets.webapi.resources.mixins.forms import (
UpdateFormMixin as DjbletsUpdateFormMixin)
from reviewboard.reviews.markdown_utils import (markdown_set_field_escaped,
render_markdown)
from reviewboard.webapi.base import ImportExtraDataError
class MarkdownFieldsMixin(object):
"""Mixes in common logic for Markdown text fields.
Any resource implementing this is assumed to have at least one
Markdown-capable text field.
Clients can pass ``?force-text-type=`` (for GET) or ``force_text_type=``
(for POST/PUT) with a value of ``plain``, ``markdown`` or ``html`` to
return the given text fields in the payload using the requested format.
When ``markdown`` is specified, the Markdown text fields will return valid
Markdown content, escaping if necessary.
When ``plain`` is specified, plain text will be returned instead. If
the content was in Markdown before, this will unescape the content.
When ``html`` is specified, the content will be transformed into HTML
suitable for display.
Clients can also pass ``?include-text-types=<type1>[,<type2>,...]``
(for GET) or ``include_text_types=<type1>[,<type2>,...]`` (for POST/PUT)
to return the text fields within special :samp:`{type}_text_fields`
entries in the resource payload. A special type of "raw" is allowed,
which will return the text types stored in the database.
(Note that passing ``?include-text-types=raw`` is equivalent to passing
``?include-raw-text-fields=1`` in 2.0.9 and 2.0.10. The latter is
deprecated.)
"""
TEXT_TYPE_PLAIN = 'plain'
TEXT_TYPE_MARKDOWN = 'markdown'
TEXT_TYPE_HTML = 'html'
TEXT_TYPE_RAW = 'raw'
TEXT_TYPES = (TEXT_TYPE_PLAIN, TEXT_TYPE_MARKDOWN, TEXT_TYPE_HTML)
SAVEABLE_TEXT_TYPES = (TEXT_TYPE_PLAIN, TEXT_TYPE_MARKDOWN)
INCLUDEABLE_TEXT_TYPES = TEXT_TYPES + (TEXT_TYPE_RAW,)
DEFAULT_EXTRA_DATA_TEXT_TYPE = TEXT_TYPE_MARKDOWN
def serialize_object(self, obj, *args, **kwargs):
"""Serializes the object, transforming text fields.
This is a specialization of serialize_object that transforms any
text fields that support text types. It also handles attaching
the raw text to the payload, on request.
"""
data = super(MarkdownFieldsMixin, self).serialize_object(
obj, *args, **kwargs)
request = kwargs.get('request')
if not request:
force_text_type = None
elif request.method == 'GET':
force_text_type = request.GET.get('force-text-type')
else:
force_text_type = request.POST.get('force_text_type')
if force_text_type not in self.TEXT_TYPES:
force_text_type = None
extra_text_type_fields = dict(
(extra_text_type, {})
for extra_text_type in self._get_extra_text_types(obj, **kwargs)
)
for field, field_info in self.fields.items():
if not field_info.get('supports_text_types'):
continue
get_func = getattr(self, 'get_is_%s_rich_text' % field, None)
if callable(get_func):
getter = lambda obj, *args: get_func(obj)
else:
getter = lambda obj, data, rich_text_field, text_type_field: \
getattr(obj, rich_text_field, None)
self._serialize_text_info(obj, data, extra_text_type_fields,
field, force_text_type, getter)
if 'extra_data' in data:
extra_data = data['extra_data']
all_text_types_extra_data = {}
if obj.extra_data is None:
obj.extra_data = {}
# Work on a copy of extra_data, in case we change it.
for field, value in obj.extra_data.copy().items():
if not self.get_extra_data_field_supports_markdown(obj, field):
continue
# If all_text_types_extra_data is empty that implies we have
# encountered the first field in extra_data which supports
# markdown. In this case we must initialize the dictionary
# with the extra text types that should be included in the
# payload.
if not all_text_types_extra_data:
all_text_types_extra_data = {
_key: {}
for _key in extra_text_type_fields.keys()
}
# Note that we assume all custom fields are in Markdown by
# default. This is to preserve compatibility with older
# fields. New fields will always have the text_type flag
# set to the proper value.
self._serialize_text_info(
obj, extra_data, all_text_types_extra_data, field,
force_text_type, self._extra_data_rich_text_getter)
for key, values in all_text_types_extra_data.items():
extra_text_type_fields[key]['extra_data'] = values
for key, values in extra_text_type_fields.items():
data[key + '_text_fields'] = values
return data
def _serialize_text_info(self, obj, data, extra_text_type_fields, field,
force_text_type, getter):
text_type_field = self._get_text_type_field_name(field)
rich_text_field = self._get_rich_text_field_name(field)
field_is_rich_text = getter(obj, data, rich_text_field,
text_type_field)
assert field_is_rich_text is not None, \
'No value for field "%s" found in %r' % (rich_text_field, obj)
if field_is_rich_text:
field_text_type = self.TEXT_TYPE_MARKDOWN
else:
field_text_type = self.TEXT_TYPE_PLAIN
value = data.get(field)
if force_text_type:
data[text_type_field] = force_text_type
if value is not None:
data[field] = self._normalize_text(
value, field_is_rich_text, force_text_type)
else:
data[text_type_field] = field_text_type
for extra_text_type in extra_text_type_fields:
if extra_text_type == self.TEXT_TYPE_RAW:
norm_extra_text_type = field_text_type
norm_extra_value = value
else:
norm_extra_text_type = extra_text_type
norm_extra_value = self._normalize_text(
value, field_is_rich_text, extra_text_type)
extra_text_type_fields[extra_text_type].update({
field: norm_extra_value,
text_type_field: norm_extra_text_type,
})
def serialize_text_type_field(self, obj, request=None, **kwargs):
return None
def can_import_extra_data_field(self, obj, field):
"""Returns whether a particular field in extra_data can be imported.
If an extra_data field is marked as supporting rich text, we'll skip
importing it through normal means. Instead, it will be handled
separately later.
"""
return not self.get_extra_data_field_supports_markdown(obj, field)
def get_extra_data_field_supports_markdown(self, obj, key):
"""Returns whether a particular field in extra_data supports Markdown.
If the field supports Markdown text, the value will be normalized
based on the requested ?force-text-type= parameter.
"""
return False
def _get_extra_text_types(self, obj, request=None, **kwargs):
"""Returns any extra text types that should be included in the payload.
This will return the list of extra text types that can be included,
filtering the list by those that are supported.
It also checks for the older ``?include-raw-text-fields=1`` option,
which is the same as using ``?include-text-types=raw``.
"""
extra_text_types = set()
if request:
if request.method == 'GET':
include_types = request.GET.get('include-text-types')
include_raw_text = request.GET.get('include-raw-text-fields')
else:
include_types = request.POST.get('include_text_types')
include_raw_text = request.POST.get('include_raw_text_fields')
if include_raw_text in ('1', 'true'):
extra_text_types.add(self.TEXT_TYPE_RAW)
if include_types:
extra_text_types.update([
text_type
for text_type in include_types.split(',')
if text_type in self.INCLUDEABLE_TEXT_TYPES
])
return extra_text_types
def _normalize_text(self, text, field_is_rich_text, force_text_type):
"""Normalizes text to the proper format.
This considers the requested text format, and whether or not the
value should be set for rich text.
"""
assert force_text_type
if text is not None:
if force_text_type == self.TEXT_TYPE_PLAIN and field_is_rich_text:
text = markdown_unescape(text)
elif (force_text_type == self.TEXT_TYPE_MARKDOWN and
not field_is_rich_text):
text = markdown_escape(text)
elif force_text_type == self.TEXT_TYPE_HTML:
if field_is_rich_text:
text = render_markdown(text)
else:
text = escape(text)
return text
def set_text_fields(self, obj, text_field,
rich_text_field_name=None,
text_type_field_name=None,
text_model_field=None,
**kwargs):
"""Normalize Markdown-capable text fields that are being saved.
Args:
obj (django.db.models.Model):
The object containing Markdown-capable fields to modify.
text_field (unicode):
The key from ``kwargs`` containing the new value for the
text fields.
rich_text_field_name (unicode, optional):
The name of the boolean field on ``obj`` representing the
rich text state.
If not provided, a name will be generated based on the value
of ``text_field`` (``'rich_text'`` if ``text_field`` is
``'text'``, or :samp:`{text_field}_rich_text` otherwise).
text_type_field_name (unicode, optional):
The key from ``kwargs`` containing the text type.
If not provided, a name will be generated based on the value
of ``text_field`` (``'text_type'`` if ``text_field`` is
``'text'``, or :samp:`{text_field}_text_type` otherwise).
text_model_field (unicode, optional):
The name of the text field on ``obj``.
If not provided, ``text_field`` will be used for the value.
**kwargs (dict):
Any fields passed by the client. These should be values
corresponding to ``text_type_field_name`` and ``text_field``
in here.
This may also contain a legacy value of ``'text_type'``,
which will be used if ``text_type_field_name`` is not present.
Returns:
set:
The fields on ``obj`` that were set based on the request.
"""
modified_fields = set()
if not text_model_field:
text_model_field = text_field
if not text_type_field_name:
text_type_field_name = self._get_text_type_field_name(text_field)
if not rich_text_field_name:
rich_text_field_name = self._get_rich_text_field_name(text_field)
old_rich_text = getattr(obj, rich_text_field_name, None)
text = kwargs.get(text_field)
if text is not None:
setattr(obj, text_model_field, text.strip())
modified_fields.add(text_model_field)
legacy_text_type = kwargs.get('text_type')
text_type = kwargs.get(text_type_field_name, legacy_text_type)
if text_type is not None:
rich_text = (text_type == self.TEXT_TYPE_MARKDOWN)
setattr(obj, rich_text_field_name, rich_text)
modified_fields.add(rich_text_field_name)
# If the caller has changed the text type for this field, but
# hasn't provided a new field value, then we will need to update
# the affected field's existing contents by escaping or
# unescaping.
if rich_text != old_rich_text and text is None:
markdown_set_field_escaped(obj, text_model_field, rich_text)
modified_fields.add(text_model_field)
elif old_rich_text:
# The user didn't specify rich-text, but the object may be set
# for rich-text, in which case we'll need to pre-escape the text
# field.
if text is not None:
markdown_set_field_escaped(obj, text_model_field,
old_rich_text)
modified_fields.add(text_model_field)
return modified_fields
def set_extra_data_text_fields(self, obj, text_field, extra_fields,
**kwargs):
"""Normalize Markdown-capable text fields in extra_data.
This will check if any Markdown-capable text fields in extra_data
have been changed (either by changing the text or the text type),
and handle the saving of the text and type.
This works just like :py:meth:`set_text_fields`, but specially handles
how things are stored in extra_data (text_type vs. rich_text fields,
possible lack of presence of a text_type field, etc.).
Args:
obj (django.db.models.Model):
The object containing Markdown-capable fields to modify.
text_field (unicode):
The key from ``kwargs`` containing the new value for the
text fields.
extra_fields (dict):
Fields passed to the API resource that aren't natively handled
by that resource. These may contain ``extra_data.``-prefixed
keys.
**kwargs (dict):
Any fields passed by the client. This may be checked for a
legacy ``text_type`` field.
Returns:
set:
The keys in ``extra_data`` that were set based on the request.
"""
modified_fields = set()
text_type_field = self._get_text_type_field_name(text_field)
extra_data = obj.extra_data
extra_data_text_field = 'extra_data.' + text_field
extra_data_text_type_field = 'extra_data.' + text_type_field
if extra_data_text_field in extra_fields:
# This field was updated in this request. Make sure it's
# stripped.
extra_data[text_field] = \
extra_fields[extra_data_text_field].strip()
modified_fields.add(text_field)
elif extra_data_text_type_field not in extra_fields:
# Nothing about this field has changed, so bail.
return modified_fields
old_text_type = extra_data.get(text_type_field)
text_type = extra_fields.get(extra_data_text_type_field,
kwargs.get('text_type'))
if text_type is not None:
if old_text_type is None:
old_text_type = self.DEFAULT_EXTRA_DATA_TEXT_TYPE
# If the caller has changed the text type for this field, but
# hasn't provided a new field value, then we will need to update
# the affected field's existing contents by escaping or
# unescaping.
if (text_type != old_text_type and
extra_data_text_field not in extra_fields):
markdown_set_field_escaped(
extra_data, text_field,
text_type == self.TEXT_TYPE_MARKDOWN)
modified_fields.add(text_field)
elif old_text_type:
# The user didn't specify a text type, but the object may be set
# for Markdown, in which case we'll need to pre-escape the text
# field.
if extra_data_text_field in extra_fields:
markdown_set_field_escaped(
extra_data, text_field,
old_text_type == self.TEXT_TYPE_MARKDOWN)
modified_fields.add(text_field)
# Ensure we have a text type set for this field. If one wasn't
# provided or set above, we'll set it to the default now.
extra_data[text_type_field] = \
text_type or self.DEFAULT_EXTRA_DATA_TEXT_TYPE
modified_fields.add(text_type_field)
return modified_fields
def _get_text_type_field_name(self, text_field_name):
if text_field_name == 'text':
return 'text_type'
else:
return '%s_text_type' % text_field_name
def _get_rich_text_field_name(self, text_field_name):
if text_field_name == 'text':
return 'rich_text'
else:
return '%s_rich_text' % text_field_name
def _extra_data_rich_text_getter(self, obj, data, rich_text_field,
text_type_field):
text_type = data.get(text_type_field,
self.DEFAULT_EXTRA_DATA_TEXT_TYPE)
return text_type == self.TEXT_TYPE_MARKDOWN
class UpdateFormMixin(DjbletsUpdateFormMixin):
"""A mixin for providing the ability to create and update using a form.
A WebAPIResource class using this mixin must set the :py:attr:`form_class`
attribute to a :py:class:`ModelForm` instance that corresponds to the model
being updated.
Classes using this mixin can provide methods of the form
``parse_<field_name>_field`` to do parsing of form data before it is passed
to the form. These methods may return either a single value or a list (in
the case where the corresponding field expects a list of values, such as a
:py:class:`django.forms.ModelMultipleChoiceField`).
The :py:meth:`create_form` and :py:meth:`save_form` methods should be used
for creating new form instances and saving them. A form created this way
can be given an optional instance argument to allow for updating the
instance. Any fields missing from data (but appearing in the
:py:class:`form_class`'s :py:attr:`fields` attribute) will be copied over
from the instance.
"""
def handle_form_request(self, **kwargs):
"""Handle an HTTP request for creating or updating through a form.
This simply wraps the parent method and handles
:py:class:`~reviewboard.webapi.base.ImportExtraDataError` exceptions
during form save.
Args:
**kwargs (dict):
Keyword arguments to pass to the parent method.
Returns:
tuple or django.http.HttpResponse:
The response to send back to the client.
"""
try:
return super(UpdateFormMixin, self).handle_form_request(**kwargs)
except ImportExtraDataError as e:
return e.error_payload
def save_form(self, form, save_kwargs, extra_fields=None, **kwargs):
"""Save the form and extra data.
Args:
form (django.forms.ModelForm):
The form to save.
save_kwargs (dict):
Additional keyword arguments to pass to the
:py:class:`ModelForm.save() <django.forms.ModelForm.save>`.
extra_fields (dict, optional):
The extra data to save on the object. These should be key-value
pairs in the form of ``extra_data.key = value``.
**kwargs (dict):
Additional keyword arguments to pass to the parent method.
Returns:
django.db.models.Model:
The saved model instance.
Raises:
reviewboard.webapi.base.ImportExtraDataError:
Extra data failed to import. The form will not be saved.
"""
if save_kwargs:
save_kwargs = save_kwargs.copy()
else:
save_kwargs = {}
save_kwargs['commit'] = False
instance = super(UpdateFormMixin, self).save_form(
form=form,
save_kwargs=save_kwargs,
**kwargs)
if extra_fields:
if not instance.extra_data:
instance.extra_data = {}
self.import_extra_data(instance, instance.extra_data, extra_fields)
instance.save()
form.save_m2m()
return instance
|
{
"content_hash": "cb6c7873c7d0a0bb2de8fd1a8a1de163",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 79,
"avg_line_length": 39.772128060263654,
"alnum_prop": 0.5828874473223163,
"repo_name": "reviewboard/reviewboard",
"id": "dcbbf7ac04af3fb0f0250c2469c80e0292dffbd7",
"size": "21119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/webapi/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0032_add_bulk_delete_page_permission"),
]
operations = [
migrations.AlterField(
model_name="page",
name="expire_at",
field=models.DateTimeField(
blank=True, null=True, verbose_name="expiry date/time"
),
),
migrations.AlterField(
model_name="page",
name="go_live_at",
field=models.DateTimeField(
blank=True, null=True, verbose_name="go live date/time"
),
),
]
|
{
"content_hash": "e1074c919567f477d81fcef9b994503f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 26.44,
"alnum_prop": 0.5340393343419062,
"repo_name": "wagtail/wagtail",
"id": "fb5fd1913e230ba3b6e4fd91d853e8251c55a873",
"size": "734",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/migrations/0033_remove_golive_expiry_help_text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593672"
},
{
"name": "JavaScript",
"bytes": "624463"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6598232"
},
{
"name": "SCSS",
"bytes": "221911"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "296087"
}
],
"symlink_target": ""
}
|
import getopt
import os
import re
import sys
import subprocess
def usage():
print >> sys.stderr, """
Usage:
python test_harness.py [ -c config_file ] [ -t gtestfile ] [ -p path_to_gtestfile ]
where:
config_file: test_harness config file; default: test_harness.conf
gtestfile: name of gtest executable file; default: ajtest
path_to_gtestfile: optional path to directory containing gtestfile
"""
def main(argv=None):
# get commandline options
conffile='test_harness.conf'
testpath=''
gtestfileopt=''
if argv is None:
argv=[]
if len(argv) > 0:
try:
opts, junk = getopt.getopt(argv, 'c:p:t:')
if junk:
print >> sys.stderr, 'error, unrecognized arguments ' + str(junk)
usage()
return 2
for opt, val in opts:
if opt == '-c':
conffile = val
elif opt == '-p':
testpath = val
elif opt == '-t':
gtestfileopt= val
except getopt.GetoptError, err:
print >> sys.stderr, 'error, ' + str(err)
usage()
return 2
# initialize
dict = []
filter = ''
negfilter = ''
gtestfile = ''
part = ''
re_comment = re.compile( r'\s*#.*$' )
re_equals = re.compile( r'\s*=\s*' )
re_lastcolon = re.compile( r':$' )
re_TestCases = re.compile( r'^\[\s*Test\s*Cases\s*\]', re.I )
re_Environment = re.compile( r'^\[\s*Environment\s*\]', re.I )
re_GTestFile = re.compile( r'^\[\s*GTest\s*File\s*\]', re.I )
# read config file one line at a time
try:
with open( conffile, 'r' ) as fileptr:
for line in fileptr:
# strip leading and trailing whitespace
line = line.strip()
line = line.strip('\n')
# strip trailing comment (and preceding whitespace), if any
line = re_comment.sub( '', line )
# search line for part header
if re_TestCases.search( line ):
# line ~= [ TestCases ]
part = 'TestCases'
continue
elif re_Environment.search( line ):
# line ~= [ Environment ]
part = 'Environment'
print '[Environment]'
continue
elif re_GTestFile.search( line ):
# line ~= [ GTestFile ]
part = 'GTestFile'
continue
else:
# line is none of the above
# split line around equals sign (and surrounding whitespace), if any
dict = re_equals.split( line, 1 )
if (len(dict) > 1):
# line ~= something = something
if part == 'TestCases':
# Can select individual tests as well as groups.
# That is, TestCase selection can look like Foo.Bar=YES, not just Foo=YES.
# You can also used negative selection, like *=YES followed by Foo.Bar=NO.
d0 = dict[0].split('.',1)
if (dict[1].upper() == 'YES' or dict[1].upper() == 'Y'):
if (len(d0) > 1):
filter = filter + dict[0] + ':'
else:
filter = filter + dict[0] + '.*' + ':'
elif (dict[1].upper() == 'NO' or dict[1].upper() == 'N'):
if (len(d0) > 1):
negfilter = negfilter + dict[0] + ':'
else:
negfilter = negfilter + dict[0] + '.*' + ':'
elif part == 'Environment':
os.putenv(dict[0],dict[1])
print '\t%s="%s"' % ( dict[0], dict[1] )
elif part == 'GTestFile':
# the file name might contain = character
gtestfile = line
elif part == 'GTestFile' and line != '':
gtestfile = line
else:
# line is unusable
continue
except IOError:
print >> sys.stderr, 'error opening config file "%s"' % conffile
return 2
# assemble the path to gtestfile to execute
command = gtestfile
if gtestfileopt != '':
command = gtestfileopt
if command == '':
command = 'ajtest'
if testpath != '':
command = os.path.join( testpath, command )
print '[GTestFile]\n\t%s' % command
if not ( os.path.exists( command ) or os.path.exists( command + '.exe' ) ):
print >> sys.stderr, 'error, GTestFile="%s" not found' % command
return 2
command=[command]
# assemble the gtest filter, if any
if filter == '' and negfilter == '':
pass
elif filter != '' and negfilter == '':
filter = re_lastcolon.sub( '', filter )
elif filter == '' and negfilter != '':
filter = '*' + '-' + re_lastcolon.sub( '', negfilter )
else:
filter = re_lastcolon.sub( '', filter ) + '-' + re_lastcolon.sub( '', negfilter)
if filter != '':
print '[TestCases]\n\t%s' % filter
command.append('--gtest_filter=' + filter)
# execute the gtestfile with filter argument, if any
# exit status 0 if no errors, 1 if any tests failed, 2 if system error
if subprocess.call(command) == 0:
return 0
else:
return 1
if __name__ == '__main__':
if len(sys.argv) > 1:
sys.exit(main(sys.argv[1:]))
else:
sys.exit(main())
|
{
"content_hash": "4198fa43d66dbb1154dfcdf97201103e",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 102,
"avg_line_length": 33.29050279329609,
"alnum_prop": 0.4576271186440678,
"repo_name": "ADVANTECH-Corp/node-alljoyn",
"id": "bb3fec9afd75903c97e44a2246894671e8f2b9c2",
"size": "6779",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "alljoyn/alljoyn_c/unit_test/test_report/test_harness.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4995"
},
{
"name": "C",
"bytes": "638481"
},
{
"name": "C#",
"bytes": "775008"
},
{
"name": "C++",
"bytes": "12965271"
},
{
"name": "CSS",
"bytes": "17461"
},
{
"name": "Groff",
"bytes": "3068"
},
{
"name": "HTML",
"bytes": "45149"
},
{
"name": "Java",
"bytes": "4802386"
},
{
"name": "JavaScript",
"bytes": "606220"
},
{
"name": "Makefile",
"bytes": "42536"
},
{
"name": "Objective-C",
"bytes": "1829239"
},
{
"name": "Objective-C++",
"bytes": "856772"
},
{
"name": "Python",
"bytes": "559767"
},
{
"name": "Shell",
"bytes": "40697"
},
{
"name": "TeX",
"bytes": "817"
},
{
"name": "Visual Basic",
"bytes": "1285"
},
{
"name": "XSLT",
"bytes": "100471"
}
],
"symlink_target": ""
}
|
"""CLI for Server Density.
"""
import os
import json
import math
import datetime
import requests
import urlparse
URL_SD = 'https://api.serverdensity.io'
PATH_TOKEN = '~/.sd_token'
def parse_response(response):
try:
data = json.loads(response.text)
except ValueError:
data = {}
if response.status_code == 200:
return data
else:
print "Error:"
print " %s" % data['message']
def sd_post(path, payload):
response = requests.post(
urlparse.urljoin(URL_SD, path),
headers={
"content-type": "application/json"
},
data=json.dumps(payload)
)
return parse_response(response)
def sd_get(path, payload):
response = requests.get(
urlparse.urljoin(URL_SD, path),
params=payload
)
return parse_response(response)
def save_token(token):
try:
file(os.path.expanduser(PATH_TOKEN), 'w').write(token)
except IOError:
print "Unable to save token to %s" % PATH_TOKEN
def get_token():
try:
return file(os.path.expanduser(PATH_TOKEN)).read().strip()
except IOError:
print "You need an authentication token. " \
"Run 'sd auth' first"
def auth_user(account, user, password):
payload = {
"username": user,
"password": password,
"accountName": account,
"tokenName": 'SD Command line interface',
}
data = sd_post('tokens', payload)
if data:
print "Authenticated."
token = data['token_data']['token']
save_token(token)
def auth_token(token):
payload = {
"token": token,
"fields": json.dumps(['name'])
}
data = sd_get('inventory/devices', payload)
if data:
print "Token verified."
save_token(token)
def device_list():
token = get_token()
if not token:
return
payload = {
"token": token,
"fields": json.dumps(['name', 'hostname'])
}
data = sd_get('inventory/devices', payload)
if data:
for i in data:
print '%s (%s)' % (i['name'], i['hostname'])
def device_get(subject_id):
token = get_token()
if not token:
return
payload = {
"token": token
}
data = sd_get('inventory/devices/' + subject_id, payload)
if data:
return data['name']
def device_search(name):
token = get_token()
if not token:
return
payload = {
"token": token,
"filter": json.dumps({"name": name, "type": "device"})
}
data = sd_get('inventory/resources', payload)
if data and len(data):
return data[0]['_id']
def service_list():
token = get_token()
if not token:
return
payload = {
"token": token,
"fields": json.dumps(['name', 'checkUrl', 'currentStatus'])
}
data = sd_get('inventory/services', payload)
if data:
for i in data:
print '%s (%s) %s' % (i['name'], i['checkUrl'], i['currentStatus'])
def service_get(subject_id):
token = get_token()
if not token:
return
payload = {
"token": token
}
data = sd_get('inventory/services/' + subject_id, payload)
if data:
return "%s (%s)" % (data['name'], data['checkUrl'])
def alerts_list():
token = get_token()
if not token:
return
payload = {
"token": token,
"closed": 'false'
}
data = sd_get('alerts/triggered', payload)
if not data:
print "No alerts. Yay!"
return
for i in data:
field = i['config']['fullField']
if i['config']['subjectType'] == 'device':
device = device_get(i['config']['subjectId'])
print "Device: %s - %s" % (device, field)
elif i['config']['subjectType'] == 'service':
service = service_get(i['config']['subjectId'])
print "Service: %s - %s" % (service, field)
def get_bar(value, high=100.0):
bars = ['▁', '▂', '▃', '▅', '▆', '▇']
x = value * (len(bars) - 1) / high
x = int(math.ceil(x))
x = min(x, len(bars) - 1)
return bars[x]
def sizeof_fmt(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def parse_cpu_metrics(data):
for i in data:
if i['name'] == 'ALL':
for j in i['tree']:
name = j['name']
values = [x['y'] for x in j['data']]
if name in ['System', 'User']:
print " %s" % name
print " %s" % ''.join([get_bar(x) for x in values])
def parse_memory_metrics(data):
for i in data:
name = i['name']
values = [x['y'] for x in i['data']]
if name in ['Physical used', 'Cached used']:
high = max(values)
size = sizeof_fmt(high * 1024 * 1024)
print " %s (%s max)" % (name, size)
print " %s" % ''.join([get_bar(x, high) for x in values])
def parse_network_metrics(data):
for i in data:
iface = i['name']
for j in i['tree']:
name = j['name']
values = [x['y'] for x in j['data']]
if any(values) and 'MB' in name:
high = max(values)
size = sizeof_fmt(high * 1024 * 1024)
print " %s %s (%s max)" % (iface, name, size)
print " %s" % ''.join([get_bar(x) for x in values])
def parse_metrics(data):
for i in data:
name = i['name']
key = i['key']
values = i['tree']
if key == 'cpuStats':
print name
parse_cpu_metrics(values)
print
elif key == 'networkTraffic':
print name
parse_network_metrics(values)
print
elif key == 'memory':
print name
parse_memory_metrics(values)
print
def get_metrics(name):
token = get_token()
if not token:
return
subject_id = device_search(name)
if not subject_id:
print "No such device name."
return
start = datetime.datetime.now().utcnow() - datetime.timedelta(seconds=3600)
end = datetime.datetime.now().utcnow()
payload = {
"token": token,
"start": start.isoformat(),
"end": end.isoformat(),
"filter": json.dumps({'cpuStats': 'all',
'memory': 'all',
'networkTraffic': 'all'})
}
data = sd_get('metrics/graphs/' + subject_id, payload)
if data:
parse_metrics(data)
|
{
"content_hash": "b239ad3adfd301d6c91cf1d3eeef8b6e",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 79,
"avg_line_length": 23.3943661971831,
"alnum_prop": 0.5127934978928357,
"repo_name": "bahadir/sd-cli",
"id": "420c6417d3c935b02d5bf510130d6f8fde482d15",
"size": "6680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdclient/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8422"
}
],
"symlink_target": ""
}
|
"""
WSGI config for protwis project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "protwis.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "0035ecb289df96e781f3deb0f9f79640",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.785714285714285,
"alnum_prop": 0.7737789203084833,
"repo_name": "protwis/protwis",
"id": "6278f5eb3a5ff92161944ae33724b8690de2b4c3",
"size": "389",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "protwis/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167612"
},
{
"name": "HTML",
"bytes": "2477269"
},
{
"name": "JavaScript",
"bytes": "3119217"
},
{
"name": "Promela",
"bytes": "467"
},
{
"name": "Python",
"bytes": "4289933"
}
],
"symlink_target": ""
}
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
import base64
import json
import traceback
''' CLIENT CLASS '''
class Client(BaseClient):
def __init__(self, base_url, *args, **kwarg):
super().__init__(base_url, *args, **kwarg)
def get_destination_lists(self, organizationId):
uri = f'{organizationId}/destinationlists'
return self._http_request('GET', uri)
def get_destinations(self, organizationId, destinationListId, params=None):
uri = f'/{organizationId}/destinationlists/{destinationListId}/destinations'
return self._http_request('GET', uri, params=params)
def add_domain(self, organizationId, destinationListId, data):
uri = f'{organizationId}/destinationlists/{destinationListId}/destinations'
return self._http_request('POST', uri, data=data)
def remove_domain(self, organizationId, destinationListId, data):
# https://docs.umbrella.com/umbrella-api/reference#delete_v1-organizations-organizationid-destinationlists-destinationlistid-destinations-remove
uri = f'{organizationId}/destinationlists/{destinationListId}/destinations/remove'
return self._http_request('DELETE', uri, data=data)
''' HELPER FUNCTIONS '''
def get_first_page_of_destinations(client, organizationId, destinationListId):
page_limit = 100
page = 1
r = client.get_destinations(organizationId, destinationListId, params={'page': page, 'limit': page_limit})
return page_limit, page, r
def get_destination_domains(client, organizationId, destinationListId):
page_limit, page, r = get_first_page_of_destinations(client, organizationId, destinationListId)
destination_domains = []
while r.get('data'):
if r.get('meta').get('total') == 0:
uri = f'/{organizationId}/destinationlists/{destinationListId}/destinations'
demisto.info(f'Unexpected "total" value of 0 returned from Umbrella {uri} API call')
break
destination_domains += r.get('data')
page += 1
r = client.get_destinations(organizationId, destinationListId, params={'page': page, 'limit': page_limit})
return destination_domains
def get_destination_domain(client, organizationId, destinationListId, domain):
demisto.debug(f'domain: {domain}')
page_limit, page, r = get_first_page_of_destinations(client, organizationId, destinationListId)
destination_domain = None
while r.get('data') and not destination_domain:
if r.get('meta').get('total') == 0:
uri = f'/{organizationId}/destinationlists/{destinationListId}/destinations'
demisto.info(f'Unexpected "total" value of 0 returned from Umbrella {uri} API call')
break
for d in r.get('data'):
if d.get('destination') == domain:
destination_domain = d
break
page += 1
r = client.get_destinations(organizationId, destinationListId, params={'page': page, 'limit': page_limit})
return destination_domain
def search_destination_domains(client, organizationId, destinationListId, domains):
demisto.debug(f'domains: {domains}')
page_limit, page, r = get_first_page_of_destinations(client, organizationId, destinationListId)
destination_domains = []
while r.get('data'):
if r.get('meta').get('total') == 0:
uri = f'/{organizationId}/destinationlists/{destinationListId}/destinations'
demisto.info(f'Unexpected "total" value of 0 returned from Umbrella {uri} API call')
break
destination_domains += r.get('data')
page += 1
r = client.get_destinations(organizationId, destinationListId, params={'page': page, 'limit': page_limit})
destination_domains_found = []
if destination_domains:
for domain in domains:
if domain in demisto.dt(destination_domains, 'destination'):
destination_domains_found += [d for d in destination_domains if d.get('destination') == domain]
demisto.debug(f'destination_domains_found: {destination_domains_found}')
return destination_domains_found
''' COMMAND FUNCTIONS '''
def test_module(client: Client, **args) -> str:
organizationId = args.get('orgId')
if not organizationId:
return "organizationId not provided"
uri = f'/{organizationId}/destinationlists'
client._http_request('GET', uri)
return "ok"
def get_destination_lists_command(client: Client, **args) -> CommandResults:
r = client.get_destination_lists(args.get('orgId'))
data = []
for destination_list in r['data']:
data.append(
{
'name': destination_list['name'],
'id': destination_list['id']
}
)
return CommandResults(
outputs_prefix="Umbrella.DestinationLists",
outputs_key_field="id",
outputs=data
)
def add_domain_command(client: Client, **args) -> str:
destinations = argToList(args.get('domains'))
comment = args.get('comment')
# max allowable limit of destinations to send in one request is 500
limit = 500
if len(destinations) > limit:
destinations_remaining = destinations
while destinations_remaining:
demisto.debug(f'length of destinations_remaining: {len(destinations_remaining)}')
destinations_limited = destinations_remaining[0:limit]
payload = json.dumps([{'destination': destination, 'comment': comment} for destination in destinations_limited])
r = client.add_domain(args.get('orgId'), args.get('destId'), data=payload)
destinations_remaining = destinations_remaining[limit:]
else:
payload = json.dumps([{'destination': destination, 'comment': comment} for destination in destinations])
r = client.add_domain(args.get('orgId'), args.get('destId'), data=payload)
return f'Domain(s) {", ".join(destinations)} successfully added to list {r["data"]["name"]}'
def remove_domain_command(client: Client, **args) -> str:
destinations = argToList(args.get('domainIds'))
payload = "[" + ", ".join(destinations) + "]"
client.remove_domain(args.get('orgId'), args.get('destId'), data=payload)
return f'Domain(s) {", ".join(destinations)} successfully removed from list'
def get_destination_domains_command(client: Client, **args) -> CommandResults:
destination_domains = get_destination_domains(client, args.get('orgId'), args.get('destId'))
return CommandResults(
outputs_prefix="Umbrella.Destinations",
outputs_key_field="id",
outputs=destination_domains,
readable_output=tableToMarkdown('Domains in Destination List', destination_domains)
)
def get_destination_domain_command(client: Client, **args) -> CommandResults:
destination_domain = get_destination_domain(client, args.get('orgId'), args.get('destId'), args.get('domain'))
return CommandResults(
outputs_prefix="Umbrella.Destinations",
outputs_key_field="id",
outputs=destination_domain,
readable_output=tableToMarkdown('Domain in Destination List', destination_domain)
)
def search_destination_domains_command(client: Client, **args) -> CommandResults:
domains = argToList(args.get('domains'))
destination_domains = search_destination_domains(client, args.get('orgId'), args.get('destId'), domains)
return CommandResults(
outputs_prefix="Umbrella.Destinations",
outputs_key_field="id",
outputs=destination_domains,
readable_output=tableToMarkdown('Domains in Destination List', destination_domains)
)
def main():
# If an arg supplying an orgId is provided, will override the one found in params
args = {**demisto.params(), **demisto.args()}
base_url = 'https://management.api.umbrella.com/v1/organizations'
api_key = base64.b64encode(f'{demisto.getParam("apiKey")}:{demisto.getParam("apiSecret")}'.encode("ascii"))
verify = not args.get('insecure', False)
proxy = args.get('proxy', False)
headers = {
'Accept': "application/json",
'Content-Type': "application/json",
'Authorization': f'Basic {api_key.decode("ascii")}'
}
try:
client = Client(
base_url,
verify=verify,
headers=headers,
proxy=proxy
)
commands = {
'umbrella-get-destination-lists': get_destination_lists_command,
'umbrella-add-domain': add_domain_command,
'umbrella-remove-domain': remove_domain_command,
'umbrella-get-destination-domains': get_destination_domains_command,
'umbrella-get-destination-domain': get_destination_domain_command,
'umbrella-search-destination-domains': search_destination_domains_command,
'test-module': test_module
}
command = demisto.command()
if command in commands:
return_results(commands[command](client, **args))
else:
return_error(f'Command {command} is not available in this integration')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
{
"content_hash": "611aa7aef3af68bb3a5e908b0393dc73",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 152,
"avg_line_length": 37.51383399209486,
"alnum_prop": 0.6584132335897166,
"repo_name": "VirusTotal/content",
"id": "1cd32965aacb101023559460d813a8feb402a73a",
"size": "9491",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/Cisco-umbrella-cloud-security/Integrations/Cisco-umbrella-cloud-security/Cisco-umbrella-cloud-security.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
"""This module contains special query helper class for query API."""
from ggrc.builder import json
from ggrc.converters.query_helper import QueryHelper
from ggrc.utils import benchmark
# pylint: disable=too-few-public-methods
class QueryAPIQueryHelper(QueryHelper):
"""Helper class for handling request queries for query API.
query object = [
{
# the same parameters as in QueryHelper
type: "values", "ids" or "count" - the type of results requested
fields: [ a list of fields to include in JSON if type is "values" ]
}
]
After the query is done (by `get_results` method), the results are appended
to each query object:
query object with results = [
{
# the same fields as in QueryHelper
values: [ filtered objects in JSON ] (present if type is "values")
ids: [ ids of filtered objects ] (present if type is "ids")
count: the number of objects filtered, after "limit" is applied
total: the number of objects filtered, before "limit" is applied
"""
def get_results(self):
"""Filter the objects and get their information.
Updates self.query items with their results. The type of results required
is read from "type" parameter of every object_query in self.query.
Returns:
list of dicts: same query as the input with requested results that match
the filter.
"""
for object_query in self.query:
query_type = object_query.get("type", "values")
if query_type not in {"values", "ids", "count"}:
raise NotImplementedError("Only 'values', 'ids' and 'count' queries "
"are supported now")
model = self.object_map[object_query["object_name"]]
if query_type == "values":
with benchmark("Get result set: get_results > _get_objects"):
objects = self._get_objects(object_query)
object_query["count"] = len(objects)
with benchmark("get_results > _get_last_modified"):
object_query["last_modified"] = self._get_last_modified(model,
objects)
with benchmark("serialization: get_results > _transform_to_json"):
object_query["values"] = self._transform_to_json(
objects,
object_query.get("fields"),
)
else:
with benchmark("Get result set: get_results -> _get_ids"):
ids = self._get_ids(object_query)
object_query["count"] = len(ids)
object_query["last_modified"] = None # synonymous to now()
if query_type == "ids":
object_query["ids"] = ids
return self.query
@staticmethod
def _transform_to_json(objects, fields=None):
"""Make a JSON representation of objects from the list."""
objects_json = [json.publish(obj) for obj in objects]
objects_json = json.publish_representation(objects_json)
if fields:
objects_json = [{f: o.get(f) for f in fields}
for o in objects_json]
return objects_json
@staticmethod
def _get_last_modified(model, objects):
"""Get the time of last update of an object in the list."""
if not objects or not hasattr(model, "updated_at"):
return None
else:
return max(obj.updated_at for obj in objects)
|
{
"content_hash": "f5792a79918714730e7ae13bfb13ba74",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 78,
"avg_line_length": 38.96470588235294,
"alnum_prop": 0.6271135265700483,
"repo_name": "selahssea/ggrc-core",
"id": "5a55688e68ecf07871b72690274fe760efd70c63",
"size": "3425",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "src/ggrc/services/query_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "211857"
},
{
"name": "HTML",
"bytes": "1056523"
},
{
"name": "JavaScript",
"bytes": "1852333"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2613417"
},
{
"name": "Shell",
"bytes": "31273"
}
],
"symlink_target": ""
}
|
__author__ = 'rhoerbe'
import json, sys
jdata = json.load(sys.stdin)
for k in jdata:
print k["id"]
|
{
"content_hash": "f45c9d4e9624451a66d0469a16df31a8",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 17.333333333333332,
"alnum_prop": 0.6346153846153846,
"repo_name": "rohe/saml2test",
"id": "a7e59c16d43f4708203c419dbbf7c6a79833ab36",
"size": "273",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "script/utility/filter_testcase_ids.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "5576"
},
{
"name": "Python",
"bytes": "262600"
},
{
"name": "Shell",
"bytes": "5473"
}
],
"symlink_target": ""
}
|
"""A setuptools based setup module."""
from os.path import join
from pathlib import Path
from setuptools import setup, find_namespace_packages
with open(Path(__file__).parent / "README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="gem5art-run",
version="1.4.0",
description="A collection of utilities for running gem5",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://www.gem5.org/",
author="Davis Architecture Research Group (DArchR)",
author_email="jlowepower@ucdavis.edu",
license="BSD",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Topic :: System :: Hardware",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
],
keywords="simulation architecture gem5",
packages=find_namespace_packages(),
install_requires=["gem5art-artifact"],
python_requires=">=3.6",
project_urls={
"Bug Reports": "https://gem5.atlassian.net/",
"Source": "https://gem5.googlesource.com/",
"Documentation": "https://www.gem5.org/documentation/gem5art",
},
scripts=[
"bin/gem5art-getruns",
],
)
|
{
"content_hash": "4685b04d93f0549d46380a512be2706b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 70,
"avg_line_length": 31.8,
"alnum_prop": 0.639937106918239,
"repo_name": "gem5/gem5",
"id": "98ff18038631d261e0bd1207fca38777b45b0dc9",
"size": "2828",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "util/gem5art/run/setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
}
|
from selenium.common.exceptions import StaleElementReferenceException, NoSuchElementException, InvalidSelectorException, WebDriverException
from . import exceptions
from .identifier import Identifier
from .locator import locator_to_by_value
from .waiter import Waiter
class Context:
def __init__(self):
self.__wait_interval = None
self.__wait_timeout = None
def get_web_driver(self):
pass
def get_web_driver_info(self):
pass
def _selenium_context(self):
pass
def _refresh(self):
pass
def persist(self):
pass
def get_screenshot_as_file(self, filename):
pass
def save_screenshot(self, filename):
pass
def get_screenshot_as_png(self):
pass
def get_screenshot_as_base64(self):
pass
def get_wait_interval(self):
"""
Get the wait interval of this context.
If the wait interval for element is not set, return the driver's wait interval.
:return: the wait interval
"""
if self.__wait_interval is not None:
return self.__wait_interval
return self.get_web_driver().get_wait_interval()
def set_wait_interval(self, interval):
"""
Set the wait interval of this context.
:param interval: the new wait interval (in milliseconds)
"""
self.__wait_interval = interval
def get_wait_timeout(self):
"""
Get the wait timeout of this context.
If the wait timeout for element is not set, return the driver's wait timeout.
:return: the wait timeout
"""
if self.__wait_timeout is not None:
return self.__wait_timeout
return self.get_web_driver().get_wait_timeout()
def set_wait_timeout(self, timeout):
"""
Set the wait timeout of this context.
:param timeout: the new wait timeout (in milliseconds)
"""
self.__wait_timeout = timeout
def wait_for(self, interval=None, timeout=None):
pass
def waiter(self, interval=None, timeout=None):
"""
Get a Waiter instance.
:param interval: the wait interval (in milliseconds). If None, use context's wait interval.
:param timeout: the wait timeout (in milliseconds). If None, use context's wait interval.
"""
_interval = self.get_wait_interval() if interval is None else interval
_timeout = self.get_wait_timeout() if timeout is None else timeout
return Waiter(_interval, _timeout)
def _find_selenium_element(self, locator):
by, value = locator_to_by_value(locator)
try:
try:
return self._selenium_context().find_element(by, value)
except StaleElementReferenceException:
self._refresh()
return self._selenium_context().find_element(by, value)
except InvalidSelectorException:
raise exceptions.InvalidLocatorException("The value <%s> of locator <%s> is not a valid expression." % (value, locator), self)
except NoSuchElementException:
raise exceptions.NoSuchElementException("Cannot find element by <%s> under:" % locator, self)
except WebDriverException as wde:
raise exceptions.EasyiumException(wde.msg, self)
def has_child(self, locator):
"""
Whether this context has a child element.
:param locator:
the locator (relative to this context) of the child element.
The format of locator is: "by=value", the possible values of "by" are::
"id": By.ID
"xpath": By.XPATH
"link": By.LINK_TEXT
"partial_link": By.PARTIAL_LINK_TEXT
"name": By.NAME
"tag": By.TAG_NAME
"class": By.CLASS_NAME
"css": By.CSS_SELECTOR
"ios_pre": MobileBy.IOS_PREDICATE
"ios_ui": MobileBy.IOS_UIAUTOMATION
"ios_class": MobileBy.IOS_CLASS_CHAIN
"android_ui": MobileBy.ANDROID_UIAUTOMATOR
"android_tag": MobileBy.ANDROID_VIEWTAG
"android_data": MobileBy.ANDROID_DATA_MATCHER
"acc_id": MobileBy.ACCESSIBILITY_ID
"custom": MobileBy.CUSTOM
:return: whether this context has a child element.
"""
return self.find_element(locator) is not None
def find_element(self, locator, identifier=Identifier.id, condition=lambda element: True):
"""
Find a DynamicElement under this context.
Note: if no element is found, None will be returned.
:param locator:
the locator (relative to this context) of the element to be found.
The format of locator is: "by=value", the possible values of "by" are::
"id": By.ID
"xpath": By.XPATH
"link": By.LINK_TEXT
"partial_link": By.PARTIAL_LINK_TEXT
"name": By.NAME
"tag": By.TAG_NAME
"class": By.CLASS_NAME
"css": By.CSS_SELECTOR
"ios_pre": MobileBy.IOS_PREDICATE
"ios_ui": MobileBy.IOS_UIAUTOMATION
"ios_class": MobileBy.IOS_CLASS_CHAIN
"android_ui": MobileBy.ANDROID_UIAUTOMATOR
"android_tag": MobileBy.ANDROID_VIEWTAG
"android_data": MobileBy.ANDROID_DATA_MATCHER
"acc_id": MobileBy.ACCESSIBILITY_ID
"custom": MobileBy.CUSTOM
:param identifier:
the identifier is a function to generate the locator of the found element, you can get the standard ones in class Identifier.
Otherwise, you can create one like this::
context.find_element("class=foo", lambda e: "xpath=.//*[@bar='%s']" % e.get_attribute("bar"))
:param condition:
end finding element when the found element match the condition function.
e.g., end finding element when the found element is not None
context.find_element("class=foo", condition=lambda element: element)
:return: the DynamicElement found by locator
"""
# import the DynamicElement here to avoid cyclic dependency
from .dynamic_element import DynamicElement
by, value = locator_to_by_value(locator)
element = {"inner": None}
def _find_element():
try:
try:
element["inner"] = DynamicElement(self, self._selenium_context().find_element(by, value), locator, identifier)
return element["inner"]
except (exceptions.NoSuchElementException, StaleElementReferenceException):
# Only Element can reach here
self.wait_for().exists()
element["inner"] = DynamicElement(self, self._selenium_context().find_element(by, value), locator, identifier)
return element["inner"]
except InvalidSelectorException:
raise exceptions.InvalidLocatorException("The value <%s> of locator <%s> is not a valid expression." % (value, locator), self)
except NoSuchElementException:
element["inner"] = None
return element["inner"]
except WebDriverException as wde:
raise exceptions.EasyiumException(wde.msg, self)
try:
self.waiter().wait_for(lambda: condition(_find_element()))
except exceptions.TimeoutException as e:
if e.__class__ == exceptions.ElementTimeoutException:
# raised by self.wait_for().exists() in _find_element()
raise
raise exceptions.TimeoutException(
"Timed out waiting for the found element by <%s> under:\n%s\nmatches condition <%s>." % (locator, self, condition.__name__))
return element["inner"]
def find_elements(self, locator, identifier=Identifier.id, condition=lambda elements: True):
"""
Find DynamicElement list under this context.
Note: if no elements is found, empty list will be returned.
:param locator:
the locator (relative to this context) of the elements to be found.
The format of locator is: "by=value", the possible values of "by" are::
"id": By.ID
"xpath": By.XPATH
"link": By.LINK_TEXT
"partial_link": By.PARTIAL_LINK_TEXT
"name": By.NAME
"tag": By.TAG_NAME
"class": By.CLASS_NAME
"css": By.CSS_SELECTOR
"ios_pre": MobileBy.IOS_PREDICATE
"ios_ui": MobileBy.IOS_UIAUTOMATION
"ios_class": MobileBy.IOS_CLASS_CHAIN
"android_ui": MobileBy.ANDROID_UIAUTOMATOR
"android_tag": MobileBy.ANDROID_VIEWTAG
"android_data": MobileBy.ANDROID_DATA_MATCHER
"acc_id": MobileBy.ACCESSIBILITY_ID
"custom": MobileBy.CUSTOM
:param identifier:
the identifier is a function to generate the locator of the found elements, you can get the standard ones in class Identifier.
Otherwise, you can create one like this::
context.find_elements("class=foo", identifier=lambda element: "xpath=.//*[@bar='%s']" % element.get_attribute("bar"))
:param condition:
end finding elements when the found element list match the condition function.
e.g., end finding elements when the found element list is not empty
context.find_elements("class=foo", condition=lambda elements: elements)
:return: the DynamicElement list found by locator
"""
# import the DynamicElement here to avoid cyclic dependency
from .dynamic_element import DynamicElement
by, value = locator_to_by_value(locator)
elements = {"inner": []}
def _find_elements():
try:
try:
selenium_elements = self._selenium_context().find_elements(by, value)
elements["inner"] = [DynamicElement(self, selenium_element, locator, identifier) for selenium_element in selenium_elements]
return elements["inner"]
except (exceptions.NoSuchElementException, StaleElementReferenceException):
# Only Element can reach here
self.wait_for().exists()
selenium_elements = self._selenium_context().find_elements(by, value)
elements["inner"] = [DynamicElement(self, selenium_element, locator, identifier) for selenium_element in selenium_elements]
return elements["inner"]
except InvalidSelectorException:
raise exceptions.InvalidLocatorException("The value <%s> of locator <%s> is not a valid expression." % (value, locator), self)
except WebDriverException as wde:
raise exceptions.EasyiumException(wde.msg, self)
try:
self.waiter().wait_for(lambda: condition(_find_elements()))
except exceptions.TimeoutException as e:
if e.__class__ == exceptions.ElementTimeoutException:
# raised by self.wait_for().exists() in _find_elements()
raise
raise exceptions.TimeoutException(
"Timed out waiting for the found element list by <%s> under:\n%s\nmatches condition <%s>." % (
locator, self, condition.__name__))
return elements["inner"]
|
{
"content_hash": "9c26877bfdbe7c72e5952d2733772ea1",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 143,
"avg_line_length": 42.34172661870504,
"alnum_prop": 0.5889898904086314,
"repo_name": "KarlGong/easyium-python",
"id": "6473f41dfa7f4461f5a610965974c539fe1ab1b1",
"size": "11771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easyium/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "150921"
}
],
"symlink_target": ""
}
|
from django import VERSION as django_version
from django.contrib import admin
from django.contrib import messages
from django.template.defaultfilters import (
linebreaksbr, removetags
)
from django.contrib.humanize.templatetags.humanize import naturalday
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django import forms
from django.conf import settings
from itertools import chain
from operator import attrgetter
from models import (
OneLiner, Note, JOT_MESSAGE_LEVEL
)
from forms import (
OneLinerForm, NoteForm
)
def notify_if_not_duplicate(request, item):
the_message_content = mark_safe("""<strong><a title='{dismiss_title}'
href='{dismiss_url}'>×</a></strong>
<small><em>{date}</em></small>,
<strong>{created_by}</strong>:
{message}""".format(
created_by=item.created_by,
date=naturalday(item.date),
message=linebreaksbr(removetags(item.content,'script')),
dismiss_title=_('Dismiss'),
dismiss_url=item.get_delete_url()
))
jot_messages = [msg for msg in messages.get_messages(request) \
if msg.level == JOT_MESSAGE_LEVEL]
in_jot_messages = [msg for msg in jot_messages \
if msg.message == the_message_content]
if not in_jot_messages:
messages.add_message(request, JOT_MESSAGE_LEVEL,
the_message_content, fail_silently=True)
def jot_notifications(request, ModelContentType, obj_id):
filter_by = {'content_type':ModelContentType,
'object_id':obj_id
}
oneliners = OneLiner.objects.filter(**filter_by)
notes = Note.objects.filter(**filter_by)
items = sorted(chain(oneliners, notes),
key=attrgetter('date'))
for item in items:
if item.personal:
if item.created_by == request.user:
notify_if_not_duplicate(request, item)
else:
notify_if_not_duplicate(request, item)
return {'jot_notifications': "{n} items".format(n=len(items))}
class JotNotifications(admin.options.BaseModelAdmin):
def change_view(self, request, object_id, form_url='', extra_context=None):
ct = ContentType.objects.get_for_model(self.model)
jot_notifications(request, ct, object_id)
return super(JotNotifications, self).change_view(request, object_id,
form_url,
extra_context)
class NoteAdmin(JotNotifications, admin.ModelAdmin):
form = NoteForm
change_form_template = 'admin/change_form_jot.html'
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['jot_ajax_error_msg'] = _('Oops, something went wrong loading child objects')
return super(type(self), self).change_view(request, object_id,
form_url,
extra_context)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "created_by":
kwargs["initial"] = request.user
return super(type(self), self).formfield_for_foreignkey(db_field,
request, **kwargs)
class Media:
js = ('{static}/js/jot.forms.js'.format(static=settings.STATIC_URL),)
class OneLinerAdmin(JotNotifications, admin.ModelAdmin):
form = OneLinerForm
change_form_template = 'admin/change_form_jot.html'
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "created_by":
kwargs["initial"] = request.user
return super(type(self), self).formfield_for_foreignkey(db_field,
request, **kwargs)
class Media:
js = ('{static}/js/jot.forms.js'.format(static=settings.STATIC_URL),)
admin.site.register(OneLiner, OneLinerAdmin)
admin.site.register(Note, NoteAdmin)
|
{
"content_hash": "60e466ac683bce447f0028c0755ebf6d",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 99,
"avg_line_length": 40.77064220183486,
"alnum_prop": 0.594059405940594,
"repo_name": "lvm/django-jot",
"id": "922cedaaba6e60a6bceeaef1c54cf3ef0ff88d5b",
"size": "4444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jot/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "476"
},
{
"name": "JavaScript",
"bytes": "1995"
},
{
"name": "Python",
"bytes": "8245"
}
],
"symlink_target": ""
}
|
import academictorrents as at
import sys
import argparse
parser = argparse.ArgumentParser(description='AT Simple command line tool')
parser.add_argument('-hash', type=str, nargs='?', required=True, help='Hash of torrent to download')
parser.add_argument('-datastore', type=str, nargs='?', default=".", help='Location which to place the files')
args = parser.parse_args()
filename = at.get(args.hash, datastore=args.datastore)
print("Done")
|
{
"content_hash": "aa70bbded38e3f0eea5900cfa4503251",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 109,
"avg_line_length": 36.916666666666664,
"alnum_prop": 0.7471783295711061,
"repo_name": "AcademicTorrents/python-r-api",
"id": "13304f8de07817a0284d6df02b6d5698d8934df1",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/simple_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "115"
},
{
"name": "Python",
"bytes": "56024"
}
],
"symlink_target": ""
}
|
class AWSBasic():
REGION = 'ap-southeast-2'
ZONE = 'ap-southeast-2a'
RESERVED_ZONE = 'ap-southeast-2b'
conn = False
def __init__(self, aws_access_key_id, aws_secret_access_key, region=False):
if region:
self.REGION = region
self.connect(aws_access_key_id, aws_secret_access_key, region)
def connect(self, aws_access_key_id, aws_secret_access_key, region=False):
pass
|
{
"content_hash": "368cd4c52709b86fde8d56b98f13d852",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 79,
"avg_line_length": 25.41176470588235,
"alnum_prop": 0.625,
"repo_name": "idooo/pancake-campfire-bot",
"id": "8baf555e471ab441842ba5c272298d3aa29ac154",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/aws_basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12258"
}
],
"symlink_target": ""
}
|
"""Template script to analyze the coverage of a sample or multiple samples from the .gff file"""
# define search paths manually
import sys
# dependency dirs
sys.path.append('C:/Users/dmccloskey-sbrg/Documents/GitHub/sequencing_analysis')
sys.path.append('C:/Users/dmccloskey-sbrg/Documents/GitHub/io_utilities')
sys.path.append('C:/Users/dmccloskey-sbrg/Documents/GitHub/sequencing_utilities')
sys.path.append('C:/Users/dmccloskey-sbrg/Documents/GitHub/calculate_utilities')
from sequencing_analysis.genome_diff import genome_diff
from sequencing_analysis.mutations_lineage import mutations_lineage
from sequencing_analysis.mutations_endpoints import mutations_endpoints
from sequencing_analysis.mutations_heatmap import mutations_heatmap
from sequencing_analysis.gff_coverage import gff_coverage
#analyze the coverage for a particular strain
gffcoverage = gff_coverage();
gffcoverage.extract_coverage_fromGff(gff_file = '//proline/Users/dmccloskey/Resequencing_DNA/Evo04ptsHIcrrEvo04EP/Evo04ptsHIcrrEvo04EP/data/Evo04ptsHIcrrEvo04EP_reference.gff',
strand_start = 0,strand_stop = 4640000,
scale_factor = False,downsample_factor = 2000,
experiment_id_I = 'ALEsKOs01',
sample_name_I = 'Evo04ptsHIcrrEvo04EP');
# calculate the coverage statistics
gffcoverage.calculate_coverageStats_fromGff(gff_file = '//proline/Users/dmccloskey/Resequencing_DNA/Evo04ptsHIcrrEvo04EP/Evo04ptsHIcrrEvo04EP/data/Evo04ptsHIcrrEvo04EP_reference.gff',
strand_start = 0,strand_stop = 4640000,
scale_factor = False,downsample_factor = 0,
experiment_id_I = 'ALEsKOs01',
sample_name_I = 'Evo04ptsHIcrrEvo04EP')
gffcoverage.export_coverageStats('Evo04ptsHIcrrEvo04EP_coverage.csv');
gffcoverage.export_coverage_js();
# find amplifications
gffcoverage.findAndCalculate_amplificationStats_fromGff(gff_file = '//proline/Users/dmccloskey/Resequencing_DNA/Evo04ptsHIcrrEvo04EP/Evo04ptsHIcrrEvo04EP/data/Evo04ptsHIcrrEvo04EP_reference.gff',
strand_start = 0,strand_stop = 4640000,
scale_factor = True,downsample_factor = 200,
reads_min=1.25,reads_max=4.0, indices_min=5000,consecutive_tol=50,
experiment_id_I = 'ALEsKOs01',
sample_name_I = 'Evo04ptsHIcrrEvo04EP');
gffcoverage.export_amplificationStats('Evo04ptsHIcrrEvo04EP_amplificationStats.csv');
gffcoverage.annotate_amplifications(ref_genome_I = 'C:/Users/dmccloskey-sbrg/Documents/GitHub/sbaas/sbaas/data/U00096.2.gb',
ref_I = 'genbank',
geneReference_I = 'C:/Users/dmccloskey-sbrg/Documents/GitHub/sbaas_workspace/sbaas_workspace/workspace_data/_input/150527_MG1655_geneReference.csv',
biologicalmaterial_id_I = 'MG1655')
gffcoverage.export_amplificationAnnotations('Evo04ptsHIcrrEvo04EP_amplificationAnnotations.csv');
gffcoverage.export_amplifications_js();
|
{
"content_hash": "244e09bd0b69f7b1cdd1efbd0b06e180",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 195,
"avg_line_length": 64.88888888888889,
"alnum_prop": 0.7541095890410959,
"repo_name": "dmccloskey/sequencing_analysis",
"id": "5ea6e27f98e9445436f705df9037cdefbfed93e0",
"size": "2920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/template_coverageAnalysis_fromGFF.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "232477"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Supplier',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(unique=True, max_length=80, verbose_name='Slug')),
('active', models.BooleanField(default=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
]
|
{
"content_hash": "3d503206603ca585db9ba42951e695a4",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 114,
"avg_line_length": 34,
"alnum_prop": 0.6066176470588235,
"repo_name": "diefenbach/django-lfs",
"id": "ea28828a71a09eb907d9509ac0beee4d0baf6cc0",
"size": "840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lfs/supplier/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96584"
},
{
"name": "HTML",
"bytes": "616573"
},
{
"name": "JavaScript",
"bytes": "591609"
},
{
"name": "Python",
"bytes": "1425991"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from crispy_forms.layout import Submit
from lmgtfy.models import Domain, DomainSearch, TLD
from lmgtfy.tasks import search_bing_task
class CleanSubmitButton(Submit):
field_classes = 'btn btn-default'
def search_bing(domain):
domain_db_record, _created = Domain.objects.get_or_create(name=domain)
# Bing does not allow us to search the same domain more than once per day.
recently_searched = DomainSearch.objects.filter(
created_at__gte=datetime.now()-timedelta(days=1),
domain=domain_db_record
).count()
if recently_searched:
return False
else:
domain_search_record = DomainSearch.objects.create(domain=domain_db_record)
search_bing_task.apply_async(kwargs={'domain_search_record': domain_search_record})
return True
def check_valid_tld(domain):
allowed_tlds = TLD.objects.all().values_list('name', flat=True)
for tld in allowed_tlds:
if domain.endswith(tld):
return True
return False
|
{
"content_hash": "5727fbee3260be959ed6f78fb9118476",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 91,
"avg_line_length": 33.54838709677419,
"alnum_prop": 0.7038461538461539,
"repo_name": "todrobbins/lmgtdfy",
"id": "1c33a6f0962c8eedbd246029b47627afbe7bdab3",
"size": "1040",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lmgtfy/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5711"
},
{
"name": "Python",
"bytes": "14915"
}
],
"symlink_target": ""
}
|
import sys
from pubnub import Pubnub
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'pam'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'pam'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'pam'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or ''
ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False
## -----------------------------------------------------------------------
## Initiate Pubnub State
## -----------------------------------------------------------------------
pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key,
secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on, auth_key="abcd")
channel = 'hello_world'
def callback(message):
print(message)
print pubnub.revoke(channel_group='dev:abcd', auth_key="abcd")
print pubnub.audit(channel_group="dev:abcd")
print pubnub.grant(channel_group='dev:abcd', read=True, write=True, manage=True, auth_key="abcd")
print pubnub.channel_group_list_namespaces()
print pubnub.channel_group_list_groups(namespace='aaa')
print pubnub.channel_group_list_groups(namespace='foo')
print pubnub.channel_group_list_channels(channel_group='dev:abcd')
print pubnub.channel_group_add_channel(channel_group='dev:abcd', channel="hi")
print pubnub.channel_group_list_channels(channel_group='dev:abcd')
print pubnub.channel_group_remove_channel(channel_group='dev:abcd', channel="hi")
print pubnub.channel_group_list_channels(channel_group='dev:abcd')
pubnub.revoke(channel_group='dev:abcd', auth_key="abcd", callback=callback, error=callback)
pubnub.audit(channel_group="dev:abcd", callback=callback, error=callback)
pubnub.grant(channel_group='dev:abcd', read=True, write=True, manage=True, auth_key="abcd", callback=callback, error=callback)
pubnub.channel_group_list_namespaces(callback=callback, error=callback)
pubnub.channel_group_list_groups(namespace='aaa', callback=callback, error=callback)
pubnub.channel_group_list_groups(namespace='foo', callback=callback, error=callback)
pubnub.channel_group_list_channels(channel_group='dev:abcd', callback=callback, error=callback)
pubnub.channel_group_add_channel(channel_group='dev:abcd', channel="hi", callback=callback, error=callback)
pubnub.channel_group_list_channels(channel_group='dev:abcd', callback=callback, error=callback)
pubnub.channel_group_remove_channel(channel_group='dev:abcd', channel="hi", callback=callback, error=callback)
pubnub.channel_group_list_channels(channel_group='dev:abcd', callback=callback, error=callback)
|
{
"content_hash": "8330f9337b984daf5c219a074016b8f8",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 126,
"avg_line_length": 58.093023255813954,
"alnum_prop": 0.7185748598879104,
"repo_name": "teddywing/pubnub-python",
"id": "f63e6d2458e50129bae064f55309c10b147f3cc1",
"size": "2698",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/examples/cr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "241780"
},
{
"name": "Shell",
"bytes": "831"
}
],
"symlink_target": ""
}
|
import socket
from threading import Thread
import os
import copy
import time
import subprocess
import json, zmq
import library.utils.utils as UT
import library.constants as CO
class ClABCHandler(object):
def __init__(self, connection):
self.connectionManager = connection
Thread(target=self.__receive).start()
self._model = []
#? needed ?
self._confirmation = 0
self._checkConfirmation = None
self._frames = {}
self._actApp = []
self._appId = {}
self._subscribtion = {}
self._currAct = None
self._me = None # user id
# self._process = []
#
# Connection functions
#
def __receive(self):
sock = self.connectionManager.control
self.connectionManager.locking += 1
poller = zmq.Poller()
poller.register(sock, zmq.POLLIN)
while not self.connectionManager.close:
socks = dict(poller.poll(1000))
if sock in socks and socks[sock] == zmq.POLLIN:
data = sock.recv()
print 'Received> '+ data
resp = self._handleLocalRequest( data )
# print 'Sending> '+ resp
sock.send( resp )
self.connectionManager.locking -= 1
#
# Applications communication
#
def suspend(self, all=False):
# send suspend to processes
msg = 'CMD SUSPEND '
if all:
tmp = [self._frames[x][0] for x in self._frames if isinstance(self._frames[x], list)]
msg += 'ALL'
else:
tmp = [self._frames[x][0] for x in self._frames if (isinstance(self._frames[x], list) and not x == 'activities')]
msg += 'ACT'
self._confirmation = len(tmp)
print self._confirmation
self.connectionManager.publisher.send( msg )
# while self._confirmation:
# continue
time.sleep(1)
self.killApplications(tmp)
def _handleLocalRequest(self, wip):
resp = ''
msg = wip.split(' ', 1)
if (msg[0]=='ABC'):
tmp = msg[1].split(' ')
if tmp[0]=='RESUME' and tmp[1]=='COMPLETED':
self._confirmation -= 1
if tmp[0]=='SUSPEND' and tmp[1]=='COMPLETED':
self._confirmation -= 1
# TODO> check ... should not be here
# self.killApplications( [self._frames[tmp[2]][0]] )
return resp
if (msg[0]=='INIT'):
resp = self._initApplication(msg[1])
if (msg[0]=='RESUME'):
self._resumeActivity(long(msg[1]))
if (msg[0]=='SUSPEND'):
self._suspendActivity(long(msg[1]))
if (msg[0]=='QUERY'):
model = msg[1].split(' ', 1)[0]
q = msg[1].split(' ', 1)[1]
resp = self._query(wip, sender)
return resp
def _query(self, msg, sender=None):
sent = self._send(msg)
recv = self._receive()
return recv[1][2]
def query(self, query, model='abc'):
q = 'QUERY %s %s' % (model, query, )
return self._query(q)
def _resumeActivity(self, actId):
# take all application
# launch them by sending also uid and act id
if self._checkConfirmation != None:
print "Still up - %s" % (self._confirmation,)
self._checkConfirmation[0](self._checkConfirmation[1])
self._checkConfirmation = None
q = 'abc.activity.%s.application' % (actId, )
resp = self.query( q )
if not isinstance(resp, list):
return
print resp
self._currAct = actId
self._actApp = []
for x in resp:
fields = ['name','command','file_name','folder']
param = []
for y in fields:
q = 'abc.application.%s.%s' % (x, y, )
param.append(self.query( q ))
self._actApp.append(param[0])
self._execute(param[0],param[1], [ os.path.join(param[3], param[2]) ], x )
def _suspendActivity(self, actId):
self.suspend()
#
# Applications execution functions
#
def killApplications(self, pids=[]):
if not len(pids):
pids = [self._frames[x][0] for x in self._frames if isinstance(self._frames[x], list)]
print 'killing' + str([x.pid for x in pids ])
for x in pids:
# if os.path.exists( '/proc/%d'% (x.pid, ) ):
# print x.pid
# os.kill(x.pid, 9)
x.kill()
def _execute(self, name, program, argument, id=None):
argument.insert( 0, program )
print '....................................' + ' '.join(x for x in argument)
if program == "python":
wip = subprocess.Popen( argument )
else:
wip = subprocess.Popen( argument,shell=True )
self._frames[name] = [ wip, [] ]
if id != None:
self._appId[name] = id
# DO NOT REMOVE ... MAGIC
#time.sleep(1)
def _initApplication(self, name):
# u_id self._appId[user] currAct
param = [self._me]
if name in self._appId:
param.append(self._appId[name])
param.append(self._currAct)
return ' '.join(str(x) for x in param)
#
# Functions to handle server
#
def connect(self, name, model):
if model in self._model:
return
msg = 'CONNECT %s USER %s' % (model, name, )
sent = self._send(msg)
resp = self._receive()
print resp.__repr__()
if resp[1][2][0]:
self._me = long(resp[1][2][1])
# TODO > check if really a number ... resp[1][2][0] should be false ... resp[1][2][1]
dirTmp = UT.getTmpDir()
msg = 'QUERY %s abc.user.%s.tmp_dir.=.{{%s}}' % (model, self._me, dirTmp, )
self._query(msg, self._me)
print 'Connected to ' + model
self._model.append(model)
msg = 'QUERY %s abc.user.%s.state.=.%s' % (model, self._me, CO.user_CONNECTED, )
self._query(msg, self._me)
return (sent, resp)
def disconnect(self, model):
# rest -> <model>
if model not in self._model:
return
msg = 'QUERY %s abc.user.%s.state.=.%s' % (model, self._me, CO.user_DISCONNECTED, )
self._query(msg, self._me)
msg = 'DISCONNECT %s' % (model, )
sent = self._send(msg)
resp = self._receive()
if resp[1][2][0]:
print 'Disconnected from ' + model
del self._model[ self._model.index(model) ]
return (sent, resp)
def _send(self, msg):
code = UT.getRandomId()
msg = 'CODE %s FROM %s %s' % (code, self._me, msg)
self.connectionManager.backend.send( json.dumps( {'q':msg} ) )
def _receive(self):
data = self.connectionManager.backend.recv()
data = json.loads( data )
return ( 'Old', data['a'] )
def run(self, wip):
msg = 'RUN %s' % (wip, )
sent = self._send(msg)
resp = self._receive()
return (sent, resp)
|
{
"content_hash": "6a1db86f3d10b335adce837458308870",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 116,
"avg_line_length": 27.56279069767442,
"alnum_prop": 0.6211609854876814,
"repo_name": "crest-centre/ABC4GSD",
"id": "b9d7c358da365e608fdcf2e59d83cdcdbee624be",
"size": "6049",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Client-Server/library/client/ClHandler.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "570891"
},
{
"name": "Python",
"bytes": "117679"
}
],
"symlink_target": ""
}
|
"""This example updates rule based first party audience segments.
To determine which audience segments exist, run get_all_audience_segments.py.
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
AUDIENCE_SEGMENT_ID = 'INSERT_AUDIENCE_SEGMENT_ID_HERE'
def main(client, audience_segment_id):
# Initialize client object.
client = dfp.DfpClient.LoadFromStorage()
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201405')
# Create statement object to get the specified first party audience segment.
values = (
[{'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'FIRST_PARTY'
}
},
{'key': 'audience_segment_id',
'value': {
'xsi_type': 'NumberValue',
'value': AUDIENCE_SEGMENT_ID
}
}])
query = 'WHERE Type = :type AND Id = :audience_segment_id'
statement = dfp.FilterStatement(query, values, 1)
# Get audience segments by statement.
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response:
updated_audience_segments = []
for audience_segment in response['results']:
print ('Audience segment with id \'%s\' and name \'%s\' will be updated.'
% (audience_segment['id'], audience_segment['name']))
audience_segment['membershipExpirationDays'] = '180'
updated_audience_segments.append(audience_segment)
audience_segments = audience_segment_service.updateAudienceSegments(
updated_audience_segments)
for audience_segment in audience_segments:
print ('Audience segment with id \'%s\' and name \'%s\' was updated' %
(audience_segment['id'], audience_segment['name']))
else:
print 'No audience segment found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, AUDIENCE_SEGMENT_ID)
|
{
"content_hash": "102bff0cdfca14942611e58546412418",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 79,
"avg_line_length": 32.24242424242424,
"alnum_prop": 0.6555451127819549,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "db92594ebd0fe34a8c95f37f0281183384c10f5b",
"size": "2746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dfp/v201405/audience_segment_service/update_audience_segments.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
}
|
try:
enumerate, exec
except:
print("SKIP")
raise SystemExit
def get_stop_iter_arg(msg, code):
try:
exec(code)
print("FAIL")
except StopIteration as er:
print(msg, er.args)
class A:
def __iter__(self):
return self
def __next__(self):
raise StopIteration(42)
class B:
def __getitem__(self, index):
# argument to StopIteration should get ignored
raise StopIteration(42)
def gen(x):
return x
yield
def gen2(x):
try:
yield
except ValueError:
pass
return x
get_stop_iter_arg("next", "next(A())")
get_stop_iter_arg("iter", "next(iter(B()))")
get_stop_iter_arg("enumerate", "next(enumerate(A()))")
get_stop_iter_arg("map", "next(map(lambda x:x, A()))")
get_stop_iter_arg("zip", "next(zip(A()))")
g = gen(None)
get_stop_iter_arg("generator0", "next(g)")
get_stop_iter_arg("generator1", "next(g)")
g = gen(42)
get_stop_iter_arg("generator0", "next(g)")
get_stop_iter_arg("generator1", "next(g)")
get_stop_iter_arg("send", "gen(None).send(None)")
get_stop_iter_arg("send", "gen(42).send(None)")
g = gen2(None)
next(g)
get_stop_iter_arg("throw", "g.throw(ValueError)")
g = gen2(42)
next(g)
get_stop_iter_arg("throw", "g.throw(ValueError)")
|
{
"content_hash": "7ea9ef77a9e77eacb8336e3537053496",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 54,
"avg_line_length": 20.737704918032787,
"alnum_prop": 0.6055335968379446,
"repo_name": "adafruit/circuitpython",
"id": "d4719c9bc35ec0b5002f68a5e946d4379af7c1e2",
"size": "1315",
"binary": false,
"copies": "11",
"ref": "refs/heads/main",
"path": "tests/basics/stopiteration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10241"
},
{
"name": "C",
"bytes": "18450191"
},
{
"name": "C++",
"bytes": "476"
},
{
"name": "CMake",
"bytes": "18203"
},
{
"name": "CSS",
"bytes": "316"
},
{
"name": "HTML",
"bytes": "10126"
},
{
"name": "JavaScript",
"bytes": "13854"
},
{
"name": "Jinja",
"bytes": "11034"
},
{
"name": "Makefile",
"bytes": "330832"
},
{
"name": "Python",
"bytes": "1423935"
},
{
"name": "Shell",
"bytes": "18681"
}
],
"symlink_target": ""
}
|
"""
Context processors used by Horizon.
"""
import logging
from django.conf import settings
from horizon import api
LOG = logging.getLogger(__name__)
def horizon(request):
""" The main Horizon context processor. Required for Horizon to function.
Adds three variables to the request context:
``authorized_tenants``
A list of tenant objects which the current user has access to.
``object_store_configured``
Boolean. Will be ``True`` if there is a service of type
``object-store`` in the user's ``ServiceCatalog``.
``network_configured``
Boolean. Will be ``True`` if ``settings.QUANTUM_ENABLED`` is ``True``.
Additionally, it sets the names ``True`` and ``False`` in the context
to their boolean equivalents for convenience.
.. warning::
Don't put API calls in context processors; they will be called once
for each template/template fragment which takes context that is used
to render the complete output.
"""
context = {"True": True,
"False": False}
# Auth/Keystone context
context.setdefault('authorized_tenants', [])
if request.user.is_authenticated():
context['authorized_tenants'] = request.user.authorized_tenants
# Object Store/Swift context
catalog = getattr(request.user, 'service_catalog', [])
object_store = catalog and api.get_service_from_catalog(catalog,
'object-store')
context['object_store_configured'] = object_store
# Quantum context
# TODO(gabriel): Convert to service catalog check when Quantum starts
# supporting keystone integration.
context['network_configured'] = getattr(settings, 'QUANTUM_ENABLED', None)
# Region context/support
available_regions = getattr(settings, 'AVAILABLE_REGIONS', None)
regions = {'support': available_regions > 1,
'endpoint': request.session.get('region_endpoint'),
'name': request.session.get('region_name')}
context['region'] = regions
return context
|
{
"content_hash": "0a81040402304ab969fdd01554e932aa",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 32.47692307692308,
"alnum_prop": 0.6485078162008526,
"repo_name": "rcbops/horizon-buildpackage",
"id": "c2c582aa43b1bc82b0d85a28142a39eef6eeb178",
"size": "2919",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "horizon/context_processors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "618253"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.