text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import os.path
import pytest
from pre_commit_hooks.detect_private_key import detect_private_key
# Input, expected return value
TESTS = (
(b'-----BEGIN RSA PRIVATE KEY-----', 1),
(b'-----BEGIN DSA PRIVATE KEY-----', 1),
(b'-----BEGIN EC PRIVATE KEY-----', 1),
(b'ssh-rsa DATA', 0),
(b'ssh-dsa DATA', 0),
# Some arbitrary binary data
(b'\xa2\xf1\x93\x12', 0),
)
@pytest.mark.parametrize(('input_s', 'expected_retval'), TESTS)
def test_detect_private_key(input_s, expected_retval, tmpdir):
path = os.path.join(tmpdir.strpath, 'file.txt')
with open(path, 'wb') as file_obj:
file_obj.write(input_s)
assert detect_private_key([path]) == expected_retval
|
{
"content_hash": "4a5c79ced6064dff89b8e8983f002b41",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 27,
"alnum_prop": 0.6253561253561254,
"repo_name": "bgschiller/pre-commit-hooks",
"id": "d9764724f05d7e1bf46c5325bea9acf9e3c25255",
"size": "702",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/detect_private_key_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Python",
"bytes": "95546"
}
],
"symlink_target": ""
}
|
from Foundation import *
from PyObjCTools.TestSupport import *
class TestNSGarbageCollector (TestCase):
def testMethods(self):
self.assertResultIsBOOL(NSGarbageCollector.isCollecting)
self.assertResultIsBOOL(NSGarbageCollector.isEnabled)
if __name__ == "__main__":
main()
|
{
"content_hash": "ba0c194068d358cd2aabbf3ee82aa20d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 27.181818181818183,
"alnum_prop": 0.7357859531772575,
"repo_name": "Khan/pyobjc-framework-Cocoa",
"id": "aaf83ec8130a835fa5e4d2b5ac52b5b54a53c2fc",
"size": "299",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PyObjCTest/test_nsgarbagecollector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "M",
"bytes": "5481"
},
{
"name": "Objective-C",
"bytes": "213902"
},
{
"name": "Python",
"bytes": "2450939"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class RegistryPassword(Model):
"""The login password for the container registry.
:param name: The password name. Possible values include: 'password',
'password2'
:type name: str or :class:`PasswordName
<azure.mgmt.containerregistry.v2017_03_01.models.PasswordName>`
:param value: The password value.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'PasswordName'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, name=None, value=None):
self.name = name
self.value = value
|
{
"content_hash": "040e20684ca6fd158ae4a22e7b3775d4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 28.818181818181817,
"alnum_prop": 0.6230283911671924,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "9835295c95956efc278a562623bbdc5f35e1cca0",
"size": "1108",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_03_01/models/registry_password.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
}
|
from PyQt5 import QtWidgets, QtCore
from ui.mainwindow import MainWindow
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
translator = QtCore.QTranslator()
# get system locale
systemLocale = QtCore.QLocale.system().name()
translationFile = "i18n/{}".format(systemLocale)
# load translation file and install translator
translator.load(translationFile)
app.installTranslator(translator)
# fire up main ui
ui = MainWindow()
ui.showMaximized()
sys.exit(app.exec_())
|
{
"content_hash": "cfa8286231db6d1566455965eca86163",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 52,
"avg_line_length": 28.68421052631579,
"alnum_prop": 0.691743119266055,
"repo_name": "Athemis/PyDSF",
"id": "10bb3b73d2d5c1da7f8b5895fd928389192c8d33",
"size": "593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100035"
},
{
"name": "QMake",
"bytes": "136"
}
],
"symlink_target": ""
}
|
from posts import views
from django.urls import path
urls = [
path(r'add/', views.PostFormView.as_view(), name="add_post"),
path(r'<slug:slug>/', views.post_detail, name='post_detail')
]
|
{
"content_hash": "1899d544b44474713ae22e2547cf009a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 65,
"avg_line_length": 28,
"alnum_prop": 0.673469387755102,
"repo_name": "robotgear/robotgear",
"id": "86170bb4fb17a2b781fb8e3541072213711633c9",
"size": "196",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "posts/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3150090"
},
{
"name": "Dockerfile",
"bytes": "156"
},
{
"name": "HTML",
"bytes": "47366"
},
{
"name": "JavaScript",
"bytes": "2745371"
},
{
"name": "Python",
"bytes": "49333"
}
],
"symlink_target": ""
}
|
from behave_core.resource import *
from behave import step
## Collector for internal path.
@step('I collect data at path "{path}"')
def step_get_data_path(context, path):
full_url = context.target + path
get_and_process(context, full_url, {})
## Collector for remote resource.
@step('I collect data at URL "{url}"')
def step_get_data_url(context, url):
get_and_process(context, url, {})
@step('the content type should be "{ctype}"')
def step_content_type_should(context, ctype):
if not context.content_type :
## Apparently no content type at all...
assert True is False
else:
assert context.content_type == ctype
@step('the content should contain "{text}"')
def step_content_contain_should(context, text):
if not context.content :
## Apparently no text at all...
assert True is False
else:
assert context.content.rfind(text) != -1
|
{
"content_hash": "c7c0096d49a4953f3a0e8df95e1e078f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 48,
"avg_line_length": 31.413793103448278,
"alnum_prop": 0.6652030735455543,
"repo_name": "berkeleybop/behave_core",
"id": "7068faf6701aea4ee7ffd2f364135a6f73f9605e",
"size": "942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resource_steps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cucumber",
"bytes": "1316"
},
{
"name": "Python",
"bytes": "8771"
}
],
"symlink_target": ""
}
|
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
from scrapy_example.spiders.new_crawling import MySpider
from scrapy_example.spiders.new_spider import NewSpider
from scrapy_example.spiders.crawl_images import ImagesSpider
configure_logging()
runner = CrawlerRunner(get_project_settings())
@defer.inlineCallbacks
def crawl():
yield runner.crawl(ImagesSpider)
yield runner.crawl(MySpider)
yield runner.crawl(NewSpider)
reactor.stop()
crawl()
reactor.run()
|
{
"content_hash": "a49023e5cff60f26cae53fc369a5542f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 60,
"avg_line_length": 29.142857142857142,
"alnum_prop": 0.8055555555555556,
"repo_name": "xdlabs/crawling-tutorials",
"id": "f4fd91b6926f38311a3c71fe04f359c70bbfd286",
"size": "612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapy_example/calling_spider_by_CrawlerRunner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "160258"
},
{
"name": "Python",
"bytes": "27024"
}
],
"symlink_target": ""
}
|
import pytest
max_test_cases = 100
# The following generates up to max_test_case tests
@pytest.mark.parametrize('count', range(max_test_cases))
def test_case(count): pass
|
{
"content_hash": "61bcb5fd7f97c6247e208f3cba32ae74",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 56,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.7572254335260116,
"repo_name": "s2technologies/testspace.test.ci",
"id": "305f0b68b70b7dd9feda874fa36e2b3ba6cd46ae",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate-results.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "421"
}
],
"symlink_target": ""
}
|
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class UpdatePaymentRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, token=None, type=None):
"""
UpdatePaymentRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'token': 'str',
'type': 'str'
}
self.attribute_map = {
'token': 'token',
'type': 'type'
}
self._token = token
self._type = type
@property
def token(self):
"""
Gets the token of this UpdatePaymentRequest.
:return: The token of this UpdatePaymentRequest.
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""
Sets the token of this UpdatePaymentRequest.
:param token: The token of this UpdatePaymentRequest.
:type: str
"""
self._token = token
@property
def type(self):
"""
Gets the type of this UpdatePaymentRequest.
:return: The type of this UpdatePaymentRequest.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this UpdatePaymentRequest.
:param type: The type of this UpdatePaymentRequest.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "76d141c8542d2dd7f669c278d3593fa2",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 77,
"avg_line_length": 24.76865671641791,
"alnum_prop": 0.5094908104850858,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "c62a45838be7d033bda958c5558b6e4f28116a12",
"size": "3336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kinow_client/models/update_payment_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
}
|
__author__ = 'lisette-espin'
################################################################################
### Local
################################################################################
from org.gesis.libs import graph as c
from org.gesis.libs.janus import JANUS
from org.gesis.libs.graph import DataMatrix
################################################################################
### Global Dependencies
################################################################################
import time
from scipy.sparse import csr_matrix, lil_matrix
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from scipy import io
from random import randint
import operator
import os
import sys
import pickle
import seaborn as sns; sns.set(); sns.set_style("whitegrid"); sns.set_style("ticks"); sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5}); sns.set_style({'legend.frameon': True})
################################################################################
### Constants
################################################################################
BLOCKS = 2 # number of blocks
LINKSONLY = False
LOW = 0.2
HIGH = 0.8
COLORDIST = {'red':50,'blue':50} # distribution of colors in nodes
COLORPROB = {'red':{'red':HIGH,'blue':LOW},'blue':{'red':LOW,'blue':HIGH}} # color probabilities
COLORVOCAB = {0:'red',1:'blue'}
FIGSIZE = (5,5)
################################################################################
### Class
################################################################################
class RandomWalkGraph(object):
def __init__(self,nnodes,walks,colors,probabilities,selfloops,isdirected,isweighted,ismultigraph,path,name):
self.G = None
self.data = None
self.nnodes = nnodes
self.walks = walks
self.colors = colors
self.probabilities = probabilities
self.selfloops = selfloops
self.isdirected = isdirected
self.isweighted = isweighted
self.ismultigraph = ismultigraph
self.path = path
self.name = name
self.nodes_sorted = None
self.labels = None
self.colordistribution = {}
def validate(self):
if set([c for c,p in self.colors.items()]) == set(self.probabilities.keys()) == set([k for av,obj in self.probabilities.items() for k in obj.keys()]):
return True
print('Error: There is no enough information to generate the graph.')
return False
def plot_color_distribution(self):
data = {}
fig, ax = plt.subplots()
for v1, vobj in self.colordistribution.items():
for v2, nedges in vobj.items():
label = '{}-{}'.format(v1,v2)
if label not in data:
data[label] = 0
data[label] += nedges
print('Total edges: {}'.format(sum(data.values())))
x = range(len(data.keys()))
ax.bar(x, data.values(), 0.35, color='r')
ax.set_ylabel('# Edges')
ax.set_xlabel('Colors')
ax.set_xticks(x)
ax.set_xticklabels(data.keys())
ax.set_title('Distribution of Edges per Color')
# plt.show()
fn = os.path.join(self.path,'{}-color-distribution.pdf'.format(self.name))
plt.savefig(fn, dpi=1200, bbox_inches='tight')
plt.close()
def plot_degree_rank(self):
degree_sequence=sorted(nx.degree(self.G).values(),reverse=True)
plt.loglog(degree_sequence,'b-',marker='o')
plt.title("Degree rank plot")
plt.ylabel("degree")
plt.xlabel("rank")
# draw graph in inset
plt.axes([0.45,0.45,0.45,0.45])
Gcc=sorted(nx.connected_component_subgraphs(self.G), key = len, reverse=True)[0]
pos=nx.spring_layout(Gcc)
plt.axis('off')
nx.draw_networkx_nodes(Gcc,pos,node_size=20)
nx.draw_networkx_edges(Gcc,pos,alpha=0.4)
fn = os.path.join(self.path,'{}-degree-rank.pdf'.format(self.name))
plt.savefig(fn, dpi=1200, bbox_inches='tight')
plt.close()
def plot_adjacency(self,**kwargs):
if self.data is None and self.G is not None:
self.data = nx.adjacency_matrix(self.G)
nodes = {n[0]:n[1]['color'] for n in self.G.nodes(data=True)}
self.nodes_sorted = sorted(nodes.items(), key=operator.itemgetter(1))
m = lil_matrix(self.data.shape)
#### labels in the middle of the block
colors = {}
for n in self.G.nodes(data=True):
c = n[1]['color']
if c not in colors:
colors[c] = 0
colors[c] += 1
print('Colors Distribution: {}'.format(colors))
self.labels = ['' for n in range(self.nnodes)]
p = 0
for c,n in colors.items():
self.labels[int(p+n/2.)] = c
p += n
### reordering edges to see blocks
row = 0
for n1 in self.nodes_sorted:
col = 0
v1 = n1[0]
for n2 in self.nodes_sorted:
v2 = n2[0]
m[row,col] = self.data[v1,v2]
col += 1
row += 1
grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws, figsize=kwargs['figsize'])
ax = sns.heatmap(m.toarray(), ax=ax,
# annot=True,
cbar_ax=cbar_ax,
cbar_kws={"orientation": "horizontal"},
xticklabels=self.labels,
yticklabels=self.labels)
ax.set_xlabel('target nodes')
ax.set_ylabel('source nodes')
ax.xaxis.tick_top()
ax.yaxis.tick_right()
ax.tick_params(axis='x', colors='grey')
ax.tick_params(axis='y', colors='grey')
plt.setp( ax.xaxis.get_majorticklabels(), horizontalalignment='center' )
plt.setp( ax.yaxis.get_majorticklabels(), rotation=270, horizontalalignment='center', x=1.02 )
cbar_ax.set_title('edge multiplicity', y=-5)
fn = os.path.join(self.path,'{}-adjacency-matrix.pdf'.format(self.name))
plt.savefig(fn, dpi=1200, bbox_inches='tight')
print('- plot adjacency done!')
plt.close()
def saveGraph(self, fn):
fn = os.path.join(self.path,fn)
nx.write_gpickle(self.G, fn)
def saveCSV(self,fn):
fn = fn.replace('.gpickle','.csv')
fn = os.path.join(self.path,fn)
m = nx.adjacency_matrix(self.G)
np.savetxt(fn, m.toarray(), delimiter=',', fmt='%.2f')
def loadGraph(self, fn):
fn = os.path.join(self.path,fn)
self.G = nx.read_gpickle(fn)
def fileExists(self,fn):
fn = os.path.join(self.path,fn)
return os.path.exists(fn)
def saveColorDistribution(self):
fn = 'color_distribution.p'
fn = os.path.join(self.path,fn)
with open(fn,'w') as f:
pickle.dump(self.colordistribution,f)
def loadColorDistribution(self):
fn = 'color_distribution.p'
fn = os.path.join(self.path,fn)
with open(fn,'r') as f:
self.colordistribution = pickle.load(f)
def createGraph(self):
if self.validate():
fn = '2color_graph.gpickle'
if self.fileExists(fn):
self.loadGraph(fn)
self.loadColorDistribution()
else:
### Initializing graph
if self.ismultigraph:
self.G = nx.MultiDiGraph() if self.isdirected else nx.MultiGraph()
else:
self.G = nx.DiGraph() if self.isdirected else nx.Graph()
### Creating nodes with attributes (50% each color)
### 0:red, 1:blue
nodes = {n:int(n*BLOCKS/self.nnodes) for n in range(self.nnodes)}
for source,block in nodes.items():
if source not in self.G:
self.G.add_node(source, color=COLORVOCAB[block])
for i in range(self.walks):
target = None
while(target == source or target is None):
target = randint(0,self.nnodes-1)
if target not in self.G:
self.G.add_node(target, color=COLORVOCAB[nodes[target]])
prob = COLORPROB[COLORVOCAB[block]][COLORVOCAB[nodes[target]]]
draw = np.random.binomial(n=1,p=prob,size=1)
if draw:
self.G.add_edge(source, target, weight=1.)
if COLORVOCAB[block] not in self.colordistribution:
self.colordistribution[COLORVOCAB[block]] = {}
if COLORVOCAB[nodes[target]] not in self.colordistribution[COLORVOCAB[block]]:
self.colordistribution[COLORVOCAB[block]][COLORVOCAB[nodes[target]]] = 0
self.colordistribution[COLORVOCAB[block]][COLORVOCAB[nodes[target]]] += 1
self.saveGraph(fn)
self.saveCSV(fn)
self.saveColorDistribution()
self.saveCSV(fn)
print(nx.info(self.G))
self.data = nx.adjacency_matrix(self.G)
################################################################################
### Hypothesis
################################################################################
def file_exists(rg,fn):
fn = os.path.join(rg.path,fn)
return os.path.exists(fn)
def load_matrix(rg,fn):
fn = os.path.join(rg.path,fn)
return csr_matrix(io.mmread(fn))
def save_matrix(m,rg,fn):
fn = os.path.join(rg.path,fn)
io.mmwrite(fn, m)
def build_hypothesis(rg, criteriafn, selfloops=False):
fn = '{}.mtx'.format(criteriafn.__name__)
if file_exists(rg,fn):
m = load_matrix(rg,fn)
print('sum hypothesis {}: {}'.format(criteriafn.__name__,m.sum()))
else:
nnodes = nx.number_of_nodes(rg.G)
m = lil_matrix((nnodes,nnodes))
for n1,d1 in rg.G.nodes_iter(data=True):
for n2,d2 in rg.G.nodes_iter(data=True):
i1 = rg.G.nodes().index(n1)
i2 = rg.G.nodes().index(n2)
if i1 == i2 and not selfloops:
continue
if i2 > i1:
if LINKSONLY:
if rg.G.has_edge(n1,n2) or rg.G.has_edge(n2,n1):
value = criteriafn(d1,d2)
m[i1,i2] = value
if nx.is_directed(rg.G):
value = criteriafn(d2,d1)
m[i2,i1] = value
else:
value = criteriafn(d1,d2)
m[i1,i2] = value
if nx.is_directed(rg.G):
value = criteriafn(d2,d1)
m[i2,i1] = value
save_matrix(m,rg,fn)
print('sum hypothesis {}: {}'.format(criteriafn.__name__,m.sum()))
return m
def homophily(datanode1, datanode2):
return HIGH if datanode1['color'] == datanode2['color'] else LOW
def heterophily(datanode1, datanode2):
return LOW if datanode1['color'] == datanode2['color'] else HIGH
def plot_adjacency(rg, matrix,name,**kwargs):
m = lil_matrix(rg.data.shape)
row = 0
for n1 in rg.nodes_sorted:
col = 0
v1 = n1[0]
for n2 in rg.nodes_sorted:
v2 = n2[0]
m[row,col] = matrix[v1,v2]
col += 1
row += 1
grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws, figsize=kwargs['figsize'])
ax = sns.heatmap(m.toarray(), ax=ax,
# annot=True,
cbar_ax=cbar_ax,
cbar_kws={"orientation": "horizontal"},
xticklabels=rg.labels,
yticklabels=rg.labels)
ax.set_xlabel('target nodes')
ax.set_ylabel('source nodes')
ax.xaxis.tick_top()
ax.yaxis.tick_right()
ax.tick_params(axis='x', colors='grey')
ax.tick_params(axis='y', colors='grey')
plt.setp( ax.xaxis.get_majorticklabels(), horizontalalignment='center' )
plt.setp( ax.yaxis.get_majorticklabels(), rotation=270, horizontalalignment='center', x=1.02 )
cbar_ax.set_title('cardinality (no. of edges)')
fn = os.path.join(rg.path,'{}-adjacency-matrix.pdf'.format(name))
plt.savefig(fn, dpi=1200, bbox_inches='tight')
print('- plot adjacency done!')
plt.close()
def run_janus(rg,isdirected,isweighted,ismultigraph,dependency,algorithm,path,kmax,klogscale,krank,tocsv,**hypotheses):
graph = DataMatrix(isdirected,isweighted,ismultigraph,dependency,algorithm,path)
graph.dataoriginal = rg.data.copy()
graph.nnodes = rg.data.shape[0]
graph.nedges = rg.data.sum() / (1. if isdirected else 2.)
graph.saveData()
start_time = time.time()
janus = JANUS(graph, path)
janus.createHypothesis('data')
janus.createHypothesis('uniform')
janus.createHypothesis('selfloop')
for k,v in hypotheses.items():
janus.createHypothesis(k,v)
janus.generateEvidences(kmax,klogscale)
print("--- %s seconds ---" % (time.time() - start_time))
janus.showRank(krank)
janus.saveEvidencesToFile()
janus.plotEvidences(krank,figsize=(9, 5),bboxx=0.8,bboxy=0.63,fontsize='x-small')
janus.plotBayesFactors(krank,figsize=(9, 5),bboxx=0.8,bboxy=0.63,fontsize='x-small')
janus.saveReadme()
# ### Saving CSV (dense matrix)
if tocsv:
save_csv(graph.dataoriginal,rg,'{}_data.csv'.format(algorithm))
for h,m in hypotheses.items():
save_csv(m,rg,'{}_{}.csv'.format(algorithm,h))
save_csv(np.zeros((graph.nnodes,graph.nnodes)),rg,'{}_uniform.csv'.format(algorithm))
save_csv(np.diagflat(np.zeros(graph.nnodes)+1),rg,'{}_selfloop.csv'.format(algorithm))
def save_csv(sparsematrix,rg,name):
fn = os.path.join(rg.path,name)
np.savetxt(fn, sparsematrix.toarray(), delimiter=",", fmt='%.5f')
print('{} CSV saved!'.format(fn))
################################################################################
### MAIN
################################################################################
selfloops = False
isdirected = False
isweighted = False
ismultigraph = True
dependency = c.LOCAL
algorithm = 'colorgraph'
kmax = 10
klogscale = False
krank = 10
tocsv = False
nnodes = int(sys.argv[1])
walks = 2 * nnodes
output = '../resources/{}-{}-{}-{}nodes-kmax{}-{}walks'.format(algorithm,dependency,'logscale' if klogscale else 'intscale',nnodes,kmax,walks)
if not os.path.exists(output):
os.makedirs(output)
rg = RandomWalkGraph(nnodes=nnodes,
walks=walks,
colors=COLORDIST,
probabilities=COLORPROB,
selfloops=selfloops,
isdirected=isdirected,
isweighted=isweighted,
ismultigraph=ismultigraph,
path=output,
name='data')
rg.createGraph()
h1 = build_hypothesis(rg,homophily,selfloops)
h2 = build_hypothesis(rg,heterophily,selfloops)
run_janus(rg,isdirected,isweighted,ismultigraph,dependency,algorithm,output,kmax,klogscale,krank,tocsv,
homophily=h1,
heterophily=h2)
rg.plot_adjacency(figsize=FIGSIZE)
rg.plot_color_distribution()
rg.plot_degree_rank()
plot_adjacency(rg,h1,'homophily',figsize=FIGSIZE)
plot_adjacency(rg,h2,'heterophily',figsize=FIGSIZE)
|
{
"content_hash": "210121525705cf0022a5d7c71f6b19b2",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 199,
"avg_line_length": 35.05145413870246,
"alnum_prop": 0.5345928006127139,
"repo_name": "lisette-espin/JANUS",
"id": "44e7b6fe0e30800e2c45afd6cd75a232d9c29b2a",
"size": "15668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-code/colorgraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "1154837"
},
{
"name": "Python",
"bytes": "104993"
}
],
"symlink_target": ""
}
|
from BitBoard import *
import time
from collections import deque
alpha = float('inf')
beta = float('-inf')
class MoveTree(object):
"""base to create a movement tree"""
def __init__(self, bitboard, move=None, isMin=False, depth=0):
if move != None:
self.move = move
self.isMin = isMin
self.bitboard = bitboard
self.children = []
self.depth = depth
def build_children_withdepth(self, depth=0):
if self.isMin:
nextMoves = self.bitboard.generate(-self.bitboard.my_team)
else:
nextMoves = self.bitboard.generate(self.bitboard.my_team)
for m in nextMoves:
node = MoveTree(m.apply(self.bitboard), m, not self.isMin, depth-1)
node.build_children_withdepth(depth-1)
self.children.append(node)
return self.children
def build_children(self):
if self.isMin:
nextMoves = self.bitboard.generate(-self.bitboard.my_team)
else:
nextMoves = self.bitboard.generate(self.bitboard.my_team)
for m in nextMoves:
node = MoveTree(m.apply(self.bitboard), m, not self.isMin)
self.children.append(node)
return self.children
def get_heuristic(self):
return self.bitboard.heuristic()
def compute_alpha_beta(self):
global alpha
global beta
if not self.children:
return self.get_heuristic()
if self.isMin:
v = float('inf')
for child in self.children:
v = min(v, child.compute_alpha_beta())
alpha = min(alpha, v)
if alpha >= beta:
break
return v
else:
v = float('-inf')
for child in self.children:
v = max(v, child.compute_alpha_beta())
beta = max(beta, v)
if alpha >= beta:
break
return v
def compute_alpha_beta_incr(self, prev_score):
global alpha
global beta
delta = self.move.compute_delta(self.bitboard)
score = prev_score + delta
if not self.children:
return score
if self.isMin:
v = float('inf')
for child in self.children:
v = min(v, child.compute_alpha_beta_incr(score))
alpha = min(alpha, v)
if alpha >= beta:
break
return v
else:
v = float('-inf')
for child in self.children:
v = max(v, child.compute_alpha_beta_incr(score))
beta = max(beta, v)
if alpha >= beta:
break
return v
def get_best_move(self):
global alpha
alpha = float('inf')
global beta
beta = float('-inf')
best_move = None
for n in self.children:
val = n.compute_alpha_beta()
if best_move == None or best_val < val:
best_move = n
best_val = val
return best_move
def root_build_children(self, time_zero):
t_zero = time_zero
queue = deque()
# look for leaves
# breadth-first search
nodesToExplore = deque()
nodesToExplore.append(self)
while len(nodesToExplore) != 0:
node = nodesToExplore.popleft()
if node.children == []:
queue.append(node)
else:
nodesToExplore.extend(node.children)
# build children, still BFS order
while(time.time() < t_zero + 4.):
node = queue.popleft()
children = node.build_children()
queue.extend(children)
def get_right_child(self, bitboard):
for child in self.children:
if bitboard.equals(child.bitboard):
return child
return None
|
{
"content_hash": "228ae09fb147313e4b1c9b82e52c64f4",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 70,
"avg_line_length": 24.1953125,
"alnum_prop": 0.6667742977074589,
"repo_name": "fabchiffre/Bob",
"id": "fa72827fd206f38487d22ecf2b27830bdc456c3c",
"size": "3097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoveTree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29641"
},
{
"name": "Shell",
"bytes": "241"
}
],
"symlink_target": ""
}
|
from xi_plugin import start_plugin, Plugin, edit
MATCHES = {"{": "}", "[": "]", "(": ")"}
class BracketCloser(Plugin):
"""Naively closes opened brackets, parens, & braces."""
def update(self, view, author, rev, start, end,
new_len, edit_type, text=None):
resp = 0
close_char = MATCHES.get(text)
if close_char:
# compute a delta from params:
new_cursor = end + new_len
# we set 'after_cursor' because we want the edit to appear to the right
# of the active cursor. we set priority=HIGH because we want this edit
# applied after concurrent edits.
resp = self.new_edit(rev, (new_cursor, new_cursor), close_char,
after_cursor=True, priority=edit.EDIT_PRIORITY_HIGH)
return resp
def main():
start_plugin(BracketCloser())
if __name__ == "__main__":
main()
|
{
"content_hash": "4a8e63d9cb07f369d9ed4fd38b4993eb",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 31.93103448275862,
"alnum_prop": 0.5658747300215983,
"repo_name": "modelorganism/xi-editor",
"id": "1a78b0f23b2e737da2a5e940dc32cb0c69bb0d5a",
"size": "1546",
"binary": false,
"copies": "2",
"ref": "refs/heads/master2",
"path": "python/bracket_example.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3963"
},
{
"name": "Makefile",
"bytes": "91"
},
{
"name": "Python",
"bytes": "46520"
},
{
"name": "Rust",
"bytes": "754958"
}
],
"symlink_target": ""
}
|
import allure
import pytest
from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status
@allure.issue("140")
@allure.feature("Integration")
@pytest.mark.parametrize("countdown", [2, 4])
def test_pytest_rerunfailures(allured_testdir, countdown):
allured_testdir.testdir.makepyfile("""
import threading
import pytest
back_to_normal = threading.local()
@pytest.mark.flaky(reruns={countdown})
def test_pytest_rerunfailures_example():
countdown = getattr(back_to_normal, "countdown", 3)
back_to_normal.countdown = countdown - 1
assert not countdown > 0
""".format(countdown=countdown))
allured_testdir.run_with_allure()
assert_that(allured_testdir.allure_report,
has_test_case("test_pytest_rerunfailures_example",
with_status("failed" if countdown == 2 else "passed"))
)
|
{
"content_hash": "7e52ca8ae8bd82eb424987f73780922a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 84,
"avg_line_length": 32.38709677419355,
"alnum_prop": 0.6563745019920318,
"repo_name": "allure-framework/allure-python",
"id": "d6be7db1f390a51fdd57938002341d9dd7c0d584",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allure-pytest/test/integration/pytest_rerunfailures/pytest_rerunfailures_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "35653"
},
{
"name": "Python",
"bytes": "335323"
},
{
"name": "RobotFramework",
"bytes": "14420"
}
],
"symlink_target": ""
}
|
import re
import struct
from typing import Dict, List, Callable, Sequence, Tuple, Type, Union, Optional
import logging
from mrcrowbar.common import BytesReadType
from typing_extensions import Literal
logger = logging.getLogger( __name__ )
NumberType = Union[Type[int], Type[float]]
Number = Union[int, float]
SignedEncoding = Literal["signed", "unsigned"]
EndianEncoding = Literal["big", "little", None]
NumberEncoding = Tuple[NumberType, int, SignedEncoding, EndianEncoding]
# Python doesn't provide a programmatic way of fetching the supported codec list.
# The below list is taken from the 3.7 manual.
CODECS = [
"ascii",
"big5",
"big5hkscs",
"cp037",
"cp273",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"euc_jp",
"euc_jis_2004",
"euc_jisx0213",
"euc_kr",
"gb2312",
"gbk",
"gb18030",
"hz",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"latin_1",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"johab",
"koi8_r",
"koi8_t",
"koi8_u",
"kz1048",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"ptcp154",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"utf_32",
"utf_32_be",
"utf_32_le",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
"utf_8_sig",
]
REGEX_CHARS = """()[]{}?*+-|^$\\.&~#="""
byte_escape: Callable[[int], bytes] = lambda char: f"\\x{char:02x}".encode( "utf8" )
def regex_pattern_to_bytes(
pattern: str,
encoding: str = "utf8",
fixed_string: bool = False,
hex_format: bool = False,
) -> bytes:
result = bytearray()
# for hex format mode, strip out all whitespace characters first
if hex_format:
pattern = (
pattern.replace( " ", "" )
.replace( "\t", "" )
.replace( "\n", "" )
.replace( "\r", "" )
)
# strip out the automatic byte-order mark
encoding_test = (
encoding.lower().replace( " ", "" ).replace( "-", "" ).replace( "_", "" )
)
if encoding_test == "utf16":
encoding = "utf-16-le"
elif encoding_test == "utf32":
encoding = "utf-32-le"
pointer = 0
repeat_block = False
while pointer < len( pattern ):
if pattern[pointer] == "\\" and not hex_format and not fixed_string:
# an escaped character!
if re.match( r"\\x[0-9A-Fa-f]{2}", pattern[pointer : pointer + 4] ):
# escaped hex byte
result.extend(
byte_escape(
bytes.fromhex( pattern[pointer + 2 : pointer + 4] )[0]
)
)
pointer += 4
elif re.match( r'\\[\\\'"abfnrtv]', pattern[pointer : pointer + 2] ):
# escaped single character
char_id, char_raw = "\\'\"abfnrtv", "\\'\"\a\b\f\n\r\t\v"
char_map = {
char_id[i]: ord( char_raw[i] ) for i in range( len( char_id ) )
}
result.extend( byte_escape( char_map[pattern[pointer + 1]] ) )
pointer += 2
elif pattern[pointer + 1] in REGEX_CHARS:
# escaped character that's also a regex char
result.extend( byte_escape( ord( pattern[pointer + 1] ) ) )
pointer += 2
else:
raise ValueError( f"Unknown escape sequence \\{pattern[pointer + 1]}" )
elif pattern[pointer] in REGEX_CHARS and not fixed_string:
# a regex special character! inject it into the output unchanged
if pattern[pointer] == "{":
repeat_block = True
elif pattern[pointer] == "}":
repeat_block = False
result.extend( pattern[pointer].encode( "utf8" ) )
pointer += 1
elif repeat_block:
# inside a repeat block, don't encode anything
result.extend( pattern[pointer].encode( "utf8" ) )
pointer += 1
elif hex_format:
# we're in hex string mode; treat as raw hexadecimal
if not re.match( r"[0-9A-Fa-f]{2}", pattern[pointer : pointer + 2] ):
raise ValueError(
f"Sequence {pattern[pointer:pointer + 2]} is not valid hexadecimal"
)
result.extend( byte_escape( int( pattern[pointer : pointer + 2], 16 ) ) )
pointer += 2
else:
# a normal character! encode as bytes, and inject escaped digits into the output
for char in pattern[pointer].encode( encoding ):
result.extend( byte_escape( char ) )
pointer += 1
return bytes( result )
def regex_unknown_encoding_match(
source: str, char_size: int = 1
) -> Tuple[Dict[str, int], bytes]:
match_map: Dict[str, int] = {}
pattern = bytearray()
for char in source:
if char not in match_map:
match_id = len( match_map )
match_group = f"?P<p{match_id}>.".encode( "utf8" )
if char_size != 1:
match_group += b"{" + f"{char_size}".encode( "utf8" ) + b"}"
if len( pattern ) == 0:
pattern += b"(" + match_group + b")"
else:
pattern += b"(" + match_group + b"(?<!"
pattern += b"|".join(
[
f"(?P=p{match_map[c]})".encode( "utf8" )
for c in match_map
if c != char
]
)
pattern += b"))"
match_map[char] = match_id
else:
pattern += f"(?P=p{match_map[char]})".encode( "utf8" )
if len( source ) == len( match_map ):
logger.warning(
"Input has no repeated characters! This can make an enormous number of false matches, and is likely not what you want"
)
return match_map, bytes( pattern )
RAW_TYPE_NAME: Dict[NumberEncoding, str] = {
(int, 1, "signed", "little"): "int8",
(int, 1, "unsigned", "little"): "uint8",
(int, 1, "signed", "big"): "int8",
(int, 1, "unsigned", "big"): "uint8",
(int, 1, "signed", None): "int8",
(int, 1, "unsigned", None): "uint8",
(int, 2, "signed", "little"): "int16_le",
(int, 3, "signed", "little"): "int24_le",
(int, 4, "signed", "little"): "int32_le",
(int, 8, "signed", "little"): "int64_le",
(int, 2, "unsigned", "little"): "uint16_le",
(int, 3, "unsigned", "little"): "uint24_le",
(int, 4, "unsigned", "little"): "uint32_le",
(int, 8, "unsigned", "little"): "uint64_le",
(float, 4, "signed", "little"): "float32_le",
(float, 8, "signed", "little"): "float64_le",
(int, 2, "signed", "big"): "int16_be",
(int, 3, "signed", "big"): "int24_be",
(int, 4, "signed", "big"): "int32_be",
(int, 8, "signed", "big"): "int64_be",
(int, 2, "unsigned", "big"): "uint16_be",
(int, 3, "unsigned", "big"): "uint24_be",
(int, 4, "unsigned", "big"): "uint32_be",
(int, 8, "unsigned", "big"): "uint64_be",
(float, 4, "signed", "big"): "float32_be",
(float, 8, "signed", "big"): "float64_be",
}
RAW_TYPE_NAME_REVERSE = {v: k for k, v in RAW_TYPE_NAME.items()}
RAW_TYPE_STRUCT: Dict[Tuple[NumberType, int, SignedEncoding], str] = {
(int, 1, "unsigned"): "B",
(int, 1, "signed"): "b",
(int, 2, "unsigned"): "H",
(int, 2, "signed"): "h",
(int, 4, "unsigned"): "I",
(int, 4, "signed"): "i",
(int, 8, "unsigned"): "Q",
(int, 8, "signed"): "q",
(float, 4, "signed"): "f",
(float, 8, "signed"): "d",
}
FROM_RAW_TYPE: Dict[NumberEncoding, Callable[[BytesReadType], Number]] = {}
TO_RAW_TYPE: Dict[NumberEncoding, Callable[[Number], bytes]] = {}
FROM_RAW_TYPE_ARRAY: Dict[NumberEncoding, Callable[[BytesReadType], List[Number]]] = {}
TO_RAW_TYPE_ARRAY: Dict[NumberEncoding, Callable[[Sequence[Number]], bytes]] = {}
def get_raw_type_struct(
format_type: NumberType,
field_size: int,
signedness: SignedEncoding,
endian: EndianEncoding,
count: Optional[int] = None,
) -> str:
endianness = ">" if endian == "big" else "<"
count_str = count if count is not None else ""
return f"{endianness}{count_str}{RAW_TYPE_STRUCT[(format_type, field_size, signedness)]}"
def get_raw_type_description(
format_type: NumberType,
field_size: int,
signedness: SignedEncoding,
endian: EndianEncoding,
) -> Tuple[str, str]:
TYPE_NAMES: Dict[NumberType, str] = {
int: "integer",
float: "floating-point number",
}
type_name = TYPE_NAMES[format_type]
prefix = (
("signed " if signedness == "signed" else "unsigned ")
if format_type == int
else ""
)
suffix = f" ({endian}-endian)" if field_size > 1 else ""
return f"{prefix}{field_size * 8}-bit {type_name}{suffix}", type_name
def _from_raw_type(
format_type: NumberType,
field_size: int,
signedness: SignedEncoding,
endian: EndianEncoding,
) -> Callable[[BytesReadType], Number]:
result: Callable[[BytesReadType], Number] = lambda buffer: struct.unpack(
get_raw_type_struct( format_type, field_size, signedness, endian ), buffer
)[0]
result.__doc__ = "Convert a {0} byte string to a Python {1}.".format(
*get_raw_type_description( format_type, field_size, signedness, endian )
)
return result
def _to_raw_type(
format_type: NumberType,
field_size: int,
signedness: SignedEncoding,
endian: EndianEncoding,
) -> Callable[[Number], bytes]:
result: Callable[[Number], bytes] = lambda value: struct.pack(
get_raw_type_struct( format_type, field_size, signedness, endian ), value
)
result.__doc__ = "Convert a Python {1} to a {0} byte string.".format(
*get_raw_type_description( format_type, field_size, signedness, endian )
)
return result
def _from_raw_type_array(
format_type: NumberType,
field_size: int,
signedness: SignedEncoding,
endian: EndianEncoding,
) -> Callable[[BytesReadType], List[Number]]:
result: Callable[[BytesReadType], List[Number]] = lambda buffer: list(
struct.unpack(
get_raw_type_struct(
format_type,
field_size,
signedness,
endian,
count=len( buffer ) // field_size,
),
buffer,
)
)
result.__doc__ = "Convert a {0} byte string to a Python list of {1}s.".format(
*get_raw_type_description( format_type, field_size, signedness, endian )
)
return result
def _to_raw_type_array(
format_type: NumberType,
field_size: int,
signedness: SignedEncoding,
endian: EndianEncoding,
) -> Callable[[Sequence[Number]], bytes]:
result: Callable[[Sequence[Number]], bytes] = lambda value_list: struct.pack(
get_raw_type_struct(
format_type, field_size, signedness, endian, count=len( value_list )
),
*value_list,
)
result.__doc__ = "Convert a Python list of {1}s to a {0} byte string.".format(
*get_raw_type_description( format_type, field_size, signedness, endian )
)
return result
def _from_generic_array(
type_id: NumberEncoding, from_raw: Callable[[BytesReadType], Number]
):
result: Callable[[BytesReadType], List[Number]] = lambda buffer: [
from_raw( buffer[i : i + type_id[1]] )
for i in range( 0, len( buffer ), type_id[1] )
]
result.__doc__ = "Convert a {0} byte string to a Python list of {1}s.".format(
*get_raw_type_description( *type_id )
)
return result
def _to_generic_array( type_id: NumberEncoding, to_raw: Callable[[Number], bytes] ):
result: Callable[[Sequence[Number]], bytes] = lambda value_list: b"".join(
[to_raw( value ) for value in value_list]
)
result.__doc__ = "Convert a Python list of {1}s to a {0} byte string.".format(
*get_raw_type_description( *type_id )
)
return result
# autogenerate conversion methods based on struct
for format_type, field_size, signedness in RAW_TYPE_STRUCT:
endian_choices: List[EndianEncoding] = (
[None, "little", "big"] if field_size == 1 else ["little", "big"]
)
endian: EndianEncoding
for endian in endian_choices:
type_id = (format_type, field_size, signedness, endian)
FROM_RAW_TYPE[type_id] = _from_raw_type( *type_id )
TO_RAW_TYPE[type_id] = _to_raw_type( *type_id )
FROM_RAW_TYPE_ARRAY[type_id] = _from_raw_type_array( *type_id )
TO_RAW_TYPE_ARRAY[type_id] = _to_raw_type_array( *type_id )
# 24-bit types
RAW_24 = ["int24_le", "uint24_le", "int24_be", "uint24_be"]
def _from_raw_24( type_id: NumberEncoding ):
signedness: SignedEncoding
endian: EndianEncoding
format_type, field_size, signedness, endian = type_id
assert format_type == int
assert field_size == 3
assert endian in ("little", "big")
assert signedness in ("signed", "unsigned")
def result( buffer: BytesReadType ):
if endian == "little":
buffer = bytes( buffer ) + (
b"\xff" if (signedness == "signed" and buffer[2] >= 0x80) else b"\x00"
)
elif endian == "big":
buffer = (
b"\xff" if (signedness == "signed" and buffer[0] >= 0x80) else b"\x00"
) + bytes(buffer)
return FROM_RAW_TYPE[(format_type, 4, signedness, endian)]( buffer )
result.__doc__ = "Convert a {0} byte string to a Python {1}.".format(
*get_raw_type_description( *type_id )
)
return result
def _to_raw_24( type_id: NumberEncoding ):
signedness: SignedEncoding
endian: EndianEncoding
format_type, field_size, signedness, endian = type_id
assert format_type == int
assert field_size == 3
assert endian in ("little", "big")
assert signedness in ("signed", "unsigned")
def result( value: Number ):
if signedness == "signed":
assert value in range( -1 << 23, 1 << 23 )
else:
assert value in range( 0, 1 << 24 )
output = TO_RAW_TYPE[(format_type, 4, signedness, endian)]( value )
if endian == "little":
output = output[:3]
elif endian == "big":
output = output[1:]
return output
result.__doc__ = "Convert a Python {1} to a {0} byte string.".format(
*get_raw_type_description( *type_id )
)
return result
for code in RAW_24:
type_id = RAW_TYPE_NAME_REVERSE[code]
FROM_RAW_TYPE[type_id] = _from_raw_24( type_id )
TO_RAW_TYPE[type_id] = _to_raw_24( type_id )
FROM_RAW_TYPE_ARRAY[type_id] = _from_generic_array(
type_id, FROM_RAW_TYPE[type_id]
)
TO_RAW_TYPE_ARRAY[type_id] = _to_generic_array( type_id, TO_RAW_TYPE[type_id] )
def unpack( type_id: NumberEncoding, value: bytes ):
if isinstance( type_id, str ):
type_id = RAW_TYPE_NAME_REVERSE[type_id]
return FROM_RAW_TYPE[type_id]( value )
def pack( type_id: NumberEncoding, value: Number ):
if isinstance( type_id, str ):
type_id = RAW_TYPE_NAME_REVERSE[type_id]
return TO_RAW_TYPE[type_id]( value )
def unpack_array( type_id: NumberEncoding, values: bytes ):
if isinstance( type_id, str ):
type_id = RAW_TYPE_NAME_REVERSE[type_id]
return FROM_RAW_TYPE_ARRAY[type_id]( values )
def pack_array( type_id: NumberEncoding, values: List[Number] ):
if isinstance( type_id, str ):
type_id = RAW_TYPE_NAME_REVERSE[type_id]
return TO_RAW_TYPE_ARRAY[type_id]( values )
|
{
"content_hash": "759ab7865616d039c9cdc631b59eab3a",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 130,
"avg_line_length": 31.464491362763916,
"alnum_prop": 0.5553589946928567,
"repo_name": "moralrecordings/mrcrowbar",
"id": "c86f7784bc22aa743dad04fa6a2f6799916bf14a",
"size": "16393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mrcrowbar/encoding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "563354"
}
],
"symlink_target": ""
}
|
from urllib.parse import urlencode
from urllib.request import urlopen
from django.apps import apps as django_apps
from django.conf import settings
from django.core import paginator
from django.core.exceptions import ImproperlyConfigured
from django.urls import NoReverseMatch, reverse
from django.utils import translation
PING_URL = "https://www.google.com/webmasters/tools/ping"
class SitemapNotFound(Exception):
pass
def ping_google(sitemap_url=None, ping_url=PING_URL, sitemap_uses_https=True):
"""
Alert Google that the sitemap for the current site has been updated.
If sitemap_url is provided, it should be an absolute path to the sitemap
for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this
function will attempt to deduce it by using urls.reverse().
"""
sitemap_full_url = _get_sitemap_full_url(sitemap_url, sitemap_uses_https)
params = urlencode({'sitemap': sitemap_full_url})
urlopen('%s?%s' % (ping_url, params))
def _get_sitemap_full_url(sitemap_url, sitemap_uses_https=True):
if not django_apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured("ping_google requires django.contrib.sites, which isn't installed.")
if sitemap_url is None:
try:
# First, try to get the "index" sitemap URL.
sitemap_url = reverse('django.contrib.sitemaps.views.index')
except NoReverseMatch:
try:
# Next, try for the "global" sitemap URL.
sitemap_url = reverse('django.contrib.sitemaps.views.sitemap')
except NoReverseMatch:
pass
if sitemap_url is None:
raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.")
Site = django_apps.get_model('sites.Site')
current_site = Site.objects.get_current()
scheme = 'https' if sitemap_uses_https else 'http'
return '%s://%s%s' % (scheme, current_site.domain, sitemap_url)
class Sitemap:
# This limit is defined by Google. See the index documentation at
# https://www.sitemaps.org/protocol.html#index.
limit = 50000
# If protocol is None, the URLs in the sitemap will use the protocol
# with which the sitemap was requested.
protocol = None
# Enables generating URLs for all languages.
i18n = False
# Override list of languages to use.
languages = None
# Enables generating alternate/hreflang links.
alternates = False
# Add an alternate/hreflang link with value 'x-default'.
x_default = False
def _get(self, name, item, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
if self.i18n:
# Split the (item, lang_code) tuples again for the location,
# priority, lastmod and changefreq method calls.
item, lang_code = item
return attr(item)
return attr
def _languages(self):
if self.languages is not None:
return self.languages
return [lang_code for lang_code, _ in settings.LANGUAGES]
def _items(self):
if self.i18n:
# Create (item, lang_code) tuples for all items and languages.
# This is necessary to paginate with all languages already considered.
items = [
(item, lang_code)
for lang_code in self._languages()
for item in self.items()
]
return items
return self.items()
def _location(self, item, force_lang_code=None):
if self.i18n:
obj, lang_code = item
# Activate language from item-tuple or forced one before calling location.
with translation.override(force_lang_code or lang_code):
return self._get('location', item)
return self._get('location', item)
@property
def paginator(self):
return paginator.Paginator(self._items(), self.limit)
def items(self):
return []
def location(self, item):
return item.get_absolute_url()
def get_protocol(self, protocol=None):
# Determine protocol
return self.protocol or protocol or 'http'
def get_domain(self, site=None):
# Determine domain
if site is None:
if django_apps.is_installed('django.contrib.sites'):
Site = django_apps.get_model('sites.Site')
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
pass
if site is None:
raise ImproperlyConfigured(
"To use sitemaps, either enable the sites framework or pass "
"a Site/RequestSite object in your view."
)
return site.domain
def get_urls(self, page=1, site=None, protocol=None):
protocol = self.get_protocol(protocol)
domain = self.get_domain(site)
return self._urls(page, protocol, domain)
def _urls(self, page, protocol, domain):
urls = []
latest_lastmod = None
all_items_lastmod = True # track if all items have a lastmod
paginator_page = self.paginator.page(page)
for item in paginator_page.object_list:
loc = f'{protocol}://{domain}{self._location(item)}'
priority = self._get('priority', item)
lastmod = self._get('lastmod', item)
if all_items_lastmod:
all_items_lastmod = lastmod is not None
if (all_items_lastmod and
(latest_lastmod is None or lastmod > latest_lastmod)):
latest_lastmod = lastmod
url_info = {
'item': item,
'location': loc,
'lastmod': lastmod,
'changefreq': self._get('changefreq', item),
'priority': str(priority if priority is not None else ''),
}
if self.i18n and self.alternates:
alternates = []
for lang_code in self._languages():
loc = f'{protocol}://{domain}{self._location(item, lang_code)}'
alternates.append({
'location': loc,
'lang_code': lang_code,
})
if self.x_default:
lang_code = settings.LANGUAGE_CODE
loc = f'{protocol}://{domain}{self._location(item, lang_code)}'
loc = loc.replace(f'/{lang_code}/', '/', 1)
alternates.append({
'location': loc,
'lang_code': 'x-default',
})
url_info['alternates'] = alternates
urls.append(url_info)
if all_items_lastmod and latest_lastmod:
self.latest_lastmod = latest_lastmod
return urls
class GenericSitemap(Sitemap):
priority = None
changefreq = None
def __init__(self, info_dict, priority=None, changefreq=None, protocol=None):
self.queryset = info_dict['queryset']
self.date_field = info_dict.get('date_field')
self.priority = self.priority or priority
self.changefreq = self.changefreq or changefreq
self.protocol = self.protocol or protocol
def items(self):
# Make sure to return a clone; we don't want premature evaluation.
return self.queryset.filter()
def lastmod(self, item):
if self.date_field is not None:
return getattr(item, self.date_field)
return None
|
{
"content_hash": "95863102f9f67cb9a1824b604828ecc3",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 113,
"avg_line_length": 35.62672811059908,
"alnum_prop": 0.5881515974647523,
"repo_name": "elena/django",
"id": "b13507a11ede2f4566e63d9ba2992d04e6f6cf9d",
"size": "7731",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "django/contrib/sitemaps/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43253"
},
{
"name": "HTML",
"bytes": "171768"
},
{
"name": "JavaScript",
"bytes": "105066"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11016010"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import os.path
TESTDATA_ROOT = os.path.abspath(os.path.dirname(__file__))
TESTFILE_PATH = os.path.join(TESTDATA_ROOT, 'example.data')
TESTIMAGE_PATH = os.path.join(TESTDATA_ROOT, 'example.jpeg')
|
{
"content_hash": "99073cd9bfff6e2c58d329616bc8eb0a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 60,
"avg_line_length": 39.2,
"alnum_prop": 0.7397959183673469,
"repo_name": "FactoryBoy/factory_boy",
"id": "b0de791a903a347c0b849d3a946a495894ebd037",
"size": "233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testdata/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3337"
},
{
"name": "Python",
"bytes": "330382"
}
],
"symlink_target": ""
}
|
"""
Make a big data store and see what we learn
from playing with it.
"""
import os
import py.test
import tiddlyweb.stores.text
from tiddlyweb.store import NoBagError
from tiddlyweb.serializer import Serializer
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.recipe import Recipe
from tiddlyweb import control
from fixtures import reset_textstore, muchdata, _teststore
def setup_module(module):
reset_textstore()
module.store = _teststore()
if type(module.store.storage) != tiddlyweb.stores.text.Store:
py.test.skip('skipping this test for non-text store')
muchdata(module.store)
def test_many_bags_and_tiddlers():
"""
Create a bunch of bags and tiddlers.
"""
assert len(os.listdir('store/bags')) == 30, '30 bags created'
assert len(os.listdir('store/bags/bag0/tiddlers')) == 10, '10 tiddlers created in a bag'
def test_long_recipe():
"""
Check muchdata() stored a recipe
"""
assert os.path.exists('store/recipes/long'), 'long recipe put to disk'
def test_construct_from_recipe():
"""
Make sure the tiddlywiki that results from
a recipe has the right stuff in it.
"""
recipe = Recipe('long')
recipe = store.get(recipe)
serializer = Serializer('html')
serializer.object = recipe
html_text = serializer.to_string()
assert 'filter:select=title:tiddler8' in html_text
def test_get_tiddlers_from_bag():
"""
Make sure a bag comes to life as expected.
"""
bag = Bag('bag0')
bag = store.get(bag)
tiddlers = list(control.get_tiddlers_from_bag(bag))
assert len(tiddlers) == 10, 'there are 10 tiddlers in bag0'
text = ''
for tiddler in tiddlers:
store.get(tiddler)
text += tiddler.text
assert 'i am tiddler 4' in text
def test_filter_tiddlers_from_bag():
"""
Make sure a bag comes to life and filters as expect.
"""
bag = Bag('bag0')
bag = store.get(bag)
tiddlers = list(control._filter_tiddlers_from_bag(bag, 'select=tag:tagfour',
environ={'tiddlyweb.store': store}))
assert len(tiddlers) == 3
|
{
"content_hash": "4ef9212677f36f4ff5fd0a5f2cebbe78",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 92,
"avg_line_length": 26.195121951219512,
"alnum_prop": 0.6717877094972067,
"repo_name": "funkyeah/tiddlyweb",
"id": "94d78531bb7351b78f6d9e56a5187b7f24cf24fd",
"size": "2148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_muchdata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating the testing decoders based on
parsed table representations.
"""
# This file generates testing code for our class decoder. The decoder
# tables are specifically written to minimize the number of decoder
# classes needed to parse valid ARM instructions. For testing, this is
# a problem. We can't (easily) tell if the intended instruction rules
# of ARM are being met, since there is not a one-to-one mapping from
# class decoders to rules.
#
# For example, consider the following two rows (from armv7.table):
#
# | 0011x - = Binary4RegisterShiftedOp => Defs12To15RdRnRsRmNotPc
# Rsb_Rule_144_A1_P288
# cccc0000011snnnnddddssss0tt1mmmm
# RegsNotPc
# | 0100x - = Binary4RegisterShiftedOp => Defs12To15RdRnRsRmNotPc
# Add_Rule_7_A1_P26
# cccc0000100snnnnddddssss0tt1mmmm
# RegsNotPc
#
# Both rows state to return a Binary4RegisterShiftedOp class decoder.
# The sequence of four symbols correspond to (in order presented):
#
# baseline - The name of the class decoder that should be used for testing.
# actual - The name of the class decoder to use in sel_ldr
# rule - A unique name identifying the rule from the manual that
# defines what the selected class decoder is to decode.
# pattern - The sequence of bits defines by the rule (above)
# constraints - Any additional constraints assumed by the rule.
#
# All but the baseline is optional. The remaining fields provide
# additional documentation and information for testing (which is used
# by this file). If the actual is not specified (prefixed by '=>')
# then it is assumed to have the same value as the baseline.
#
# If these two rows had a mergable bit pattern (which they do not),
# these rows would still not mergable since the actions are
# different. However, for sel_ldr, they both state to use a
# Binary4RegisterShiftedOp. The remaining identifiers are added data
# for testing only.
#
# We fix this by defining a notion of "action_filter" where one can
# choose to keep only those fields that are applicable. For sel_ldr,
# it's only 'actual'. For testing, it will include other fields,
# depending on the context.
#
# Note: The current ARM instruction table has both new and old
# actions. Old actions only define the 'InstClass' entry. If the
# remaining fields are omitted, the corresponding testing for those
# entries are omitted.
#
# Note: See dgen_decoder_output.py for more details on how we build a
# decoder for sel_ldr.
#
# For testing, we would like to know the specific instruction rule
# that was being tested. Further, we would like to know what
# instruction rule was chosen for each decoder class selection made by
# the parse tables. To do this, we do two levels of wrapping.
#
# This file generates a set of wrapper classes, each a subclass of
# NamedClassDecoder. One is generated for each InstClass needed by
# sel_ldr (i.e. only the 'actual' field). These named classes correspond
# to what sel_ldr will select.
#
# The named version of each named InstClass is:
#
# class NamedInstClass : public NamedClassDecoder {
# public:
# NamedInstClass()
# : NamedClassDecoder(decoder_, "InstClass")
# {}
#
# private:
# Binary3RegisterShiftedTest decoder_;
# NACL_DISALLOW_COPY_AND_ASSIGN(NamedInstClass);
#};
#
# This makes sure that each decoder class can be identified using a
# separate class decoder. For rows without rules, the corresponding
# named class 'NamedInstClass' will be used. If a row also has
# a rule, the 'NamedInstClass' is converted to 'NamedRuleInstClass' where
# 'Rule' is the name of the rule.
#
# The base class for NamedClassDecoder is specified in
# "named_class_decoder.h". This file defines a class that takes a
# ClassDecoder (reference) C and a print name NAME, and builds a
# corresponding ClassDecoder that acts like C, but will print out
# NAME. The behaviour of C is maintained by dispatching each virtual
# on the NamedClassDecoder to the corresponding virtual on C.
#
# We then define the class decoder Decoder, by defining a derived
# instance of DecoderState as follows:
#
# class NamedDecoder : DecoderState {
# public:
# explicit NamedDecoder();
# const NamedClassDecoder& decode_named(const Instruction) const;
# virtual const ClassDecoder& decode(const Instruction) const;
# ...
# };
#
# The method decode is the expected API for the NamedDecoder, which is
# an instance of DecoderState (defined in decode.h). The method
# decode_named is the same, but returns NamedClassDecoder's so that
# good error messages can be generated by the test harnesses for
# ClassDecoder's (see decoder_tester.h for more details on
# ClassDecoder test harnesses).
#
# To the NamedDecoder, we add a constant field NamedClassDecoder for
# each possible class decoder method decode_named could return, or
# that we could use in automatically generated tests. These fields
# allow us to only create the corresponding decoder classes once
# (during constructor initialization).
#
# Finally, we add a method corresponding to each defined decoder
# table. The forms of these decoders is:
#
# inline const NamedClassDecoder& decode_TABLE(
# const nacl_arm_dec::Instruction inst) const;
#
# Each of these methods are defined as inline methods so that they can
# be optimized away in the corresponding top level methods (i.e.
# decode_named and decode).
#
# For testing, there are three files generated:
#
# decoder_named_classes.h
# decoder_named_decoder.h
# decoder_named.cc
# decoder_tests.cc
#
# File decoder_named_classes.h defines the class declarations for the
# generated Rule classes, and named class decoder classes. File
# decoder_named_decoder.h defines the decoder class NamedDecoder
# (discussed above). decoder_named.cc contains the corresponding
# implementations of the constructors and methods of these classes.
#
# decoder_tests.cc generates an automatic test harness executable,
# that will test each instruction Rule. Each test generates all
# possible matches the the corresponding Pattern of the table rule,
# and calls the corresponding tester associated with the class decoder
# of that row. By default, the tester is presumed to be named.
#
# InstClassTester
#
# If the row defines a Constraints identifier, then the tester
#
# InstClassTesterConstraints
#
# is used instead.
import dgen_core
import dgen_opt
import dgen_output
import dgen_decoder
import dgen_actuals
import dgen_baselines
"""The current command line arguments to use"""
_cl_args = {}
# The following defines naming conventions used for identifiers.
# Note: DECODER will be replaced by 'actual' and 'baseline', defining
# how both types of symbols are generated.
CLASS = '%(DECODER)s_%(rule)s'
NAMED_CLASS = 'Named%(DECODER)s_%(rule)s'
INSTANCE = '%(DECODER_class)s_instance_'
BASE_TESTER='%(decoder_base)sTester%(base_test_case)s'
BASE_BASE_TESTER='%(decoder_base)sTester%(qualifier)s'
DECODER_TESTER='%(baseline)sTester_%(test_case)s'
def _safety_to_check(safety):
return [s for s in safety if not isinstance(s, str)]
def _interesting_patterns(patterns):
""" Filters out non-interesting patterns."""
# Only include rows not corresponding to rule pattern,
# and not always true.
return [ p for p in patterns if (
(not p.column or p.column.name() != '$pattern')
and not p.matches_any())]
def _install_action(decoder, action, values):
"""Install common names needed to generate code for the given action,
and adds it to the values map.
"""
# This code is somewhat inefficient in that most cases, most of the
# added strings are not needed. On the other hand, by having a
# single routine that generates all action specific names at one
# spot, it is much easier to change definitions.
values['baseline'] = action.baseline()
values['actual'] = action.actual()
values['decoder_base'] = decoder.base_class(values['baseline'])
values['rule'] = action.rule()
values['qualifier'] = ''.join([s for s in action.safety()
if isinstance(s, str)])
if action.constraints():
values['qualifier'] += (action.constraints().other
if action.constraints().other else '')
else:
values['qualifier'] =''
values['pattern'] = action.pattern()
# Add dummies for row cases, in case not set up. See
# function _install_row_cases) for more details on these fields.
for field in [ 'base_test_case', 'test_case', 'test_pattern' ]:
if not values.get(field):
values[field] = ''
values['baseline_class'] = _decoder_replace(CLASS, 'baseline') % values
values['actual_class'] = _decoder_replace(CLASS, 'actual') % values
_install_baseline_and_actuals('named_DECODER_class', NAMED_CLASS, values)
_install_baseline_and_actuals('DECODER_instance', INSTANCE, values)
values['base_tester'] = BASE_TESTER % values
values['base_base_tester'] = BASE_BASE_TESTER % values
values['decoder_tester'] = DECODER_TESTER % values
def _decoder_replace(string, basis):
return string.replace('DECODER', basis)
def _install_key_pattern(key, pattern, basis, values):
# Replace DECODER in key and pattern with basis, then
# install into values.
values[_decoder_replace(key, basis)] = (
_decoder_replace(pattern, basis) % values)
def _install_baseline_and_actuals(key, pattern, values):
# Replace DECODER with 'baseline' and 'actual', apply it
# to the key and pattern, and then install into values.
for basis in ['baseline', 'actual']:
_install_key_pattern(key, pattern, basis, values)
def _generate_baseline_and_actual(code, symbol, decoder,
values, out, actions=['rule']):
""" Generates code to define the given symbol. Does so for both
baseline and actual decoders, filtering using actions.
code - The code to generate.
symbol - The symbol being defined.
decoder - The decoder (tables) to use.
values - The name map to use to generate code.
actions - The fields to keep when generating code.
"""
generated_symbols = set()
# Generate one for each type of basline decoder.
baseline_actions = actions[:]
baseline_actions.insert(0, 'baseline');
baseline_code = _decoder_replace(code, 'baseline')
baseline_symbol = _decoder_replace(symbol, 'baseline');
for d in decoder.action_filter(baseline_actions).decoders():
_install_action(decoder, d, values);
sym_name = (baseline_symbol % values)
if sym_name not in generated_symbols:
out.write(baseline_code % values)
generated_symbols.add(sym_name)
# Generate one for each actual type that is different than the
# baseline.
actual_actions = actions[:]
actual_actions.insert(0, 'actual-not-baseline')
actual_code = _decoder_replace(code, 'actual')
actual_symbol = _decoder_replace(symbol, 'actual')
for d in decoder.action_filter(actual_actions).decoders():
# Note: 'actual-not-baseline' sets actual to None if same as baseline.
if d.actual():
_install_action(decoder, d, values);
sym_name = (actual_symbol % values)
if sym_name not in generated_symbols:
out.write(actual_code % values)
generated_symbols.add(sym_name)
# Defines the header for decoder_bases.h
NAMED_BASES_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/actual_classes.h"
#include "native_client/src/trusted/validator_arm/baseline_classes.h"
#include "native_client/src/trusted/validator_arm/named_class_decoder.h"
#include "%(FILENAME_BASE)s_baselines.h"
namespace nacl_arm_test {
"""
GENERATED_BASELINE_HEADER="""
/*
* Define named class decoders for each automatically generated baseline
* decoder.
*/
"""
NAMED_GEN_BASE_DECLARE="""class Named%(gen_base)s
: public NamedClassDecoder {
public:
Named%(gen_base)s()
: NamedClassDecoder(decoder_, "%(gen_base)s")
{}
private:
nacl_arm_dec::%(gen_base)s decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(Named%(gen_base)s);
};
"""
NAMED_BASES_H_FOOTER="""
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
NAMED_BASES_H_SUFFIX = '_named_bases.h'
def generate_named_bases_h(decoder, decoder_name, filename, out, cl_args):
"""Defines named classes needed for testing generated baselines.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith(NAMED_BASES_H_SUFFIX)
_cl_args = cl_args
decoder = dgen_baselines.AddBaselinesToDecoder(decoder)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len(NAMED_BASES_H_SUFFIX)],
'decoder_name': decoder_name,
}
out.write(NAMED_BASES_H_HEADER % values)
_generate_generated_baseline(decoder, out)
out.write(NAMED_BASES_H_FOOTER % values)
def _generate_generated_baseline(decoder, out):
""" Generates code to define the given symbol. Does so for
the generated baseline decoders, filtering using actions.
"""
generated_symbols = set()
values = {}
out.write(GENERATED_BASELINE_HEADER % values)
for d in decoder.action_filter(['generated_baseline']).decoders():
gen_base = d.find('generated_baseline')
if gen_base and gen_base not in generated_symbols:
values['gen_base'] = gen_base
out.write(NAMED_GEN_BASE_DECLARE % values)
generated_symbols.add(gen_base)
# Defines the header for decoder_named_classes.h
NAMED_CLASSES_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/actual_classes.h"
#include "native_client/src/trusted/validator_arm/baseline_classes.h"
#include "native_client/src/trusted/validator_arm/named_class_decoder.h"
#include "%(FILENAME_BASE)s_actuals.h"
#include "%(FILENAME_BASE)s_named_bases.h"
"""
RULE_CLASSES_HEADER="""
/*
* Define rule decoder classes.
*/
namespace nacl_arm_dec {
"""
RULE_CLASS="""class %(DECODER_class)s
: public %(DECODER)s {
};
"""
RULE_CLASS_SYM="%(DECODER_class)s"
NAMED_DECODERS_HEADER="""} // nacl_arm_dec
namespace nacl_arm_test {
/*
* Define named class decoders for each class decoder.
* The main purpose of these classes is to introduce
* instances that are named specifically to the class decoder
* and/or rule that was used to parse them. This makes testing
* much easier in that error messages use these named classes
* to clarify what row in the corresponding table was used
* to select this decoder. Without these names, debugging the
* output of the test code would be nearly impossible
*/
"""
NAMED_CLASS_DECLARE="""class %(named_DECODER_class)s
: public NamedClassDecoder {
public:
%(named_DECODER_class)s()
: NamedClassDecoder(decoder_, "%(DECODER)s %(rule)s")
{}
private:
nacl_arm_dec::%(DECODER_class)s decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(%(named_DECODER_class)s);
};
"""
NAMED_CLASS_DECLARE_SYM="%(named_DECODER_class)s"
NAMED_CLASSES_H_FOOTER="""
// Defines the default parse action if the table doesn't define
// an action.
class NotImplementedNamed : public NamedClassDecoder {
public:
NotImplementedNamed()
: NamedClassDecoder(decoder_, "not implemented")
{}
private:
nacl_arm_dec::NotImplemented decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(NotImplementedNamed);
};
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
def generate_named_classes_h(decoder, decoder_name, filename, out, cl_args):
"""Defines named classes needed for decoder testing.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('_named_classes.h')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('_named_classes.h')],
'decoder_name': decoder_name,
}
out.write(NAMED_CLASSES_H_HEADER % values)
out.write(RULE_CLASSES_HEADER)
_generate_baseline_and_actual(RULE_CLASS, RULE_CLASS_SYM,
decoder, values, out)
out.write(NAMED_DECODERS_HEADER)
_generate_baseline_and_actual(NAMED_CLASS_DECLARE, NAMED_CLASS_DECLARE_SYM,
decoder, values, out)
out.write(NAMED_CLASSES_H_FOOTER % values)
NAMED_DECODER_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/decode.h"
#include "%(FILENAME_BASE)s_named_classes.h"
#include "native_client/src/trusted/validator_arm/named_class_decoder.h"
namespace nacl_arm_test {
// Defines a (named) decoder class selector for instructions
class Named%(decoder_name)s : nacl_arm_dec::DecoderState {
public:
explicit Named%(decoder_name)s();
// Parses the given instruction, returning the named class
// decoder to use.
const NamedClassDecoder& decode_named(
const nacl_arm_dec::Instruction) const;
// Parses the given instruction, returning the class decoder
// to use.
virtual const nacl_arm_dec::ClassDecoder& decode(
const nacl_arm_dec::Instruction) const;
// The following fields define the set of class decoders
// that can be returned by the API function "decode_named". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be bulit once (and reused
// for each call to "decode_named")."""
DECODER_STATE_FIELD="""
const %(named_DECODER_class)s %(DECODER_instance)s;"""
DECODER_STATE_FIELD_NAME="%(named_DECODER_class)s"
DECODER_STATE_DECODER_COMMENTS="""
private:
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction."""
DECODER_STATE_DECODER="""
inline const NamedClassDecoder& decode_%(table)s(
const nacl_arm_dec::Instruction inst) const;"""
NAMED_DECODER_H_FOOTER="""
// Defines default action if parse tables don't define what action
// to take.
const NotImplementedNamed not_implemented_;
};
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
def generate_named_decoder_h(decoder, decoder_name, filename, out, cl_args):
"""Generates the named decoder for testing.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('_named_decoder.h')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('_named_decoder.h')],
'decoder_name': decoder_name,
}
out.write(NAMED_DECODER_H_HEADER % values)
_generate_baseline_and_actual(DECODER_STATE_FIELD, DECODER_STATE_FIELD_NAME,
decoder, values, out)
out.write(DECODER_STATE_DECODER_COMMENTS)
for table in decoder.tables():
values['table'] = table.name
out.write(DECODER_STATE_DECODER % values)
out.write(NAMED_DECODER_H_FOOTER % values)
# Defines the source for DECODER_named.cc
NAMED_CC_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#include "%(FILENAME_BASE)s_decoder.h"
using nacl_arm_dec::ClassDecoder;
using nacl_arm_dec::Instruction;
namespace nacl_arm_test {
Named%(decoder_name)s::Named%(decoder_name)s()
{}
"""
PARSE_TABLE_METHOD_HEADER="""
/*
* Implementation of table %(table_name)s.
* Specified by: %(citation)s
*/
const NamedClassDecoder& Named%(decoder_name)s::decode_%(table_name)s(
const nacl_arm_dec::Instruction inst) const {
"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
PARSE_TABLE_METHOD_ROW="""
return %(action)s;
"""
METHOD_DISPATCH_CLOSE=""" }
"""
PARSE_TABLE_METHOD_FOOTER="""
// Catch any attempt to fall through...
return not_implemented_;
}
"""
NAMED_CC_FOOTER="""
const NamedClassDecoder& Named%(decoder_name)s::
decode_named(const nacl_arm_dec::Instruction inst) const {
return decode_%(entry_table_name)s(inst);
}
const nacl_arm_dec::ClassDecoder& Named%(decoder_name)s::
decode(const nacl_arm_dec::Instruction inst) const {
return decode_named(inst).named_decoder();
}
} // namespace nacl_arm_test
"""
def generate_named_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the test decoder in .cc file
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('.cc')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'FILENAME_BASE' : filename[:-len('.cc')],
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(NAMED_CC_HEADER % values)
_generate_decoder_method_bodies(decoder, values, out)
out.write(NAMED_CC_FOOTER % values)
def _generate_decoder_method_bodies(decoder, values, out):
global _cl_args
for table in decoder.tables():
# Add the default row as the last in the optimized row, so that
# it is applied if all other rows do not.
opt_rows = sorted(
dgen_opt.optimize_rows(
table.action_filter(['baseline', 'rule']).rows(False)))
if table.default_row:
opt_rows.append(table.default_row)
opt_rows = table.add_column_to_rows(opt_rows)
print ("Table %s: %d rows minimized to %d"
% (table.name, len(table.rows()), len(opt_rows)))
values['table_name'] = table.name
values['citation'] = table.citation,
out.write(PARSE_TABLE_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(METHOD_HEADER_TRACE % values)
# Add message to stop compilation warnings if this table
# doesn't require subtables to select a class decoder.
if not table.methods():
out.write(" UNREFERENCED_PARAMETER(inst);")
count = 0
for row in opt_rows:
count = count + 1
if row.action.__class__.__name__ == 'DecoderAction':
_install_action(decoder, row.action, values)
action = '%(baseline_instance)s' % values
elif row.action.__class__.__name__ == 'DecoderMethod':
action = 'decode_%s(inst)' % row.action.name
else:
raise Exception('Bad table action: %s' % row.action)
# Each row consists of a set of bit patterns defining if the row
# is applicable. Convert this into a sequence of anded C test
# expressions. For example, convert the following pair of bit
# patterns:
#
# xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxx0101
#
# Each instruction is masked to get the the bits, and then
# tested against the corresponding expected bits. Hence, the
# above example is converted to:
#
# ((inst & 0x0F000000) != 0x0C000000) &&
# ((inst & 0x0000000F) != 0x00000005)
out.write(METHOD_DISPATCH_BEGIN %
row.patterns[0].to_commented_bool())
for p in row.patterns[1:]:
out.write(METHOD_DISPATCH_CONTINUE % p.to_commented_bool())
out.write(METHOD_DISPATCH_END)
if _cl_args.get('trace') == 'True':
out.write(METHOD_DISPATCH_TRACE % count)
values['action'] = action
out.write(PARSE_TABLE_METHOD_ROW % values)
out.write(METHOD_DISPATCH_CLOSE)
out.write(PARSE_TABLE_METHOD_FOOTER % values)
# Define the source for DECODER_tests.cc
TEST_CC_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#include "gtest/gtest.h"
#include "native_client/src/trusted/validator_arm/actual_vs_baseline.h"
#include "native_client/src/trusted/validator_arm/baseline_vs_baseline.h"
#include "native_client/src/trusted/validator_arm/actual_classes.h"
#include "native_client/src/trusted/validator_arm/baseline_classes.h"
#include "native_client/src/trusted/validator_arm/inst_classes_testers.h"
#include "native_client/src/trusted/validator_arm/arm_helpers.h"
#include "native_client/src/trusted/validator_arm/gen/arm32_decode_named_bases.h"
using nacl_arm_dec::Instruction;
using nacl_arm_dec::ClassDecoder;
using nacl_arm_dec::Register;
using nacl_arm_dec::RegisterList;
namespace nacl_arm_test {
// The following classes are derived class decoder testers that
// add row pattern constraints and decoder restrictions to each tester.
// This is done so that it can be used to make sure that the
// corresponding pattern is not tested for cases that would be excluded
// due to row checks, or restrictions specified by the row restrictions.
"""
CONSTRAINT_TESTER_CLASS_HEADER="""
// %(row_comment)s
class %(base_tester)s
: public %(base_base_tester)s {
public:
%(base_tester)s(const NamedClassDecoder& decoder)
: %(base_base_tester)s(decoder) {}"""
CONSTRAINT_TESTER_RESTRICTIONS_HEADER="""
virtual bool PassesParsePreconditions(
nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder);"""
CONSTRAINT_TESTER_SANITY_HEADER="""
virtual bool ApplySanityChecks(nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder);"""
CONSTRAINT_TESTER_CLASS_CLOSE="""
};
"""
CONSTRAINT_TESTER_PARSE_HEADER="""
bool %(base_tester)s
::PassesParsePreconditions(
nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder) {"""
ROW_CONSTRAINTS_HEADER="""
// Check that row patterns apply to pattern being checked.'"""
PATTERN_CONSTRAINT_RESTRICTIONS_HEADER="""
// Check pattern restrictions of row."""
CONSTRAINT_CHECK="""
// %(comment)s
if (%(code)s) return false;"""
CONSTRAINT_TESTER_CLASS_FOOTER="""
// Check other preconditions defined for the base decoder.
return %(base_base_tester)s::
PassesParsePreconditions(inst, decoder);
}
"""
SAFETY_TESTER_HEADER="""
bool %(base_tester)s
::ApplySanityChecks(nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder) {
NC_PRECOND(%(base_base_tester)s::
ApplySanityChecks(inst, decoder));"""
SAFETY_TESTER_CHECK="""
// safety: %(comment)s
EXPECT_TRUE(%(code)s);"""
DEFS_SAFETY_CHECK="""
// defs: %(comment)s;
EXPECT_TRUE(decoder.defs(inst).IsSame(%(code)s));"""
SAFETY_TESTER_FOOTER="""
return true;
}
"""
TESTER_CLASS_HEADER="""
// The following are derived class decoder testers for decoder actions
// associated with a pattern of an action. These derived classes introduce
// a default constructor that automatically initializes the expected decoder
// to the corresponding instance in the generated DecoderState.
"""
TESTER_CLASS="""
// %(row_comment)s
class %(decoder_tester)s
: public %(base_tester)s {
public:
%(decoder_tester)s()
: %(base_tester)s(
state_.%(baseline_instance)s)
{}
};
"""
TEST_HARNESS="""
// Defines a gtest testing harness for tests.
class %(decoder_name)sTests : public ::testing::Test {
protected:
%(decoder_name)sTests() {}
};
// The following functions test each pattern specified in parse
// decoder tables.
"""
TEST_FUNCTION_ACTUAL_VS_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s baseline_tester;
%(named_actual_class)s actual;
ActualVsBaselineTester a_vs_b_tester(actual, baseline_tester);
a_vs_b_tester.Test("%(pattern)s");
}
"""
TEST_FUNCTION_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s tester;
tester.Test("%(pattern)s");
}
"""
TEST_FUNCTION_BASELINE_VS_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
BvB_%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s old_baseline_tester;
Named%(gen_decoder)s gen_baseline;
BaselineVsBaselineTester b_vs_b_tester(gen_baseline, old_baseline_tester);
b_vs_b_tester.Test("%(pattern)s");
}
"""
TEST_CC_FOOTER="""
} // namespace nacl_arm_test
int main(int argc, char* argv[]) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
"""
def generate_tests_cc(decoder, decoder_name, out, cl_args, tables):
"""Generates pattern tests for the rows in the given list of tables
in the given decoder."""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
decoder = dgen_baselines.AddBaselinesToDecoder(decoder, tables)
baselines = cl_args.get('test-base')
if not baselines: baselines = []
decoder = _decoder_restricted_to_tables(decoder, tables)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'decoder_name': decoder_name,
}
out.write(TEST_CC_HEADER % values)
_generate_constraint_testers(decoder, values, out)
_generate_rule_testers(decoder, values, out)
out.write(TEST_HARNESS % values)
_generate_test_patterns_with_baseline_tests(decoder, values, out, baselines)
out.write(TEST_CC_FOOTER % values)
def _filter_test_action(action, with_patterns, with_rules):
"""Filters the actions to pull out relavant entries, based on whether we
want to include patterns and rules.
"""
action_fields = ['actual', 'baseline', 'generated_baseline',
'constraints'] + dgen_decoder.METHODS
if with_patterns:
action_fields += ['pattern' ]
if with_rules:
action_fields += ['rule']
return action.action_filter(action_fields)
def _filter_test_row(row, with_patterns=False, with_rules=True):
"""Filters a row t pulll out actions with relavant entries, based on
whether we want to include patterns and rules.
"""
return row.copy_with_action(
_filter_test_action(row.action, with_patterns, with_rules))
def _install_row_cases(row, values):
"""Installs row case names, based on values entries."""
# First define base testers that add row constraints and safety checks.
constraint_rows_map = values.get('constraint_rows')
if constraint_rows_map:
base_row = _filter_test_row(row, with_rules=False)
values['base_test_case'] = (
'Case%s' % constraint_rows_map[dgen_core.neutral_repr(base_row)])
else:
values['base_test_case'] = ''
# Add test decoders associated with the row in the table.
decoder_rows_map = values.get('decoder_rows')
if decoder_rows_map:
decoder_row = _filter_test_row(row)
values['test_case'] = (
'Case%s' % decoder_rows_map[dgen_core.neutral_repr(decoder_row)])
else:
values['test_case'] = ''
# Encorporate patterns with each row.
pattern_rows_map = values.get('test_rows')
if pattern_rows_map:
pattern_row = _filter_test_row(row, with_patterns=True)
values['test_pattern'] = (
'Case%s' % pattern_rows_map[dgen_core.neutral_repr(pattern_row)])
else:
values['test_pattern'] = ''
def _install_test_row(row, decoder, values,
with_patterns=False, with_rules=True):
"""Installs data associated with the given row into the values map.
Installs the baseline class, rule name, and constraints associated
with the row. If with_patterns is specified, then pattern information and
actual class information is also inserted.
"""
action = _filter_test_action(row.action, with_patterns, with_rules)
values['row_comment'] = dgen_output.commented_string(
repr(row.copy_with_action(action)))
_install_action(decoder, action, values)
return action
def _rows_to_test(decoder, values, with_patterns=False, with_rules=True):
"""Returns the rows of the decoder that define enough information
that testing can be done.
"""
generated_names = set()
rows = []
for table in decoder.tables():
for row in table.rows():
if (isinstance(row.action, dgen_core.DecoderAction) and
row.action.pattern()):
new_row = row.copy_with_action(
_install_test_row(row, decoder, values, with_patterns, with_rules))
constraint_tester = dgen_core.neutral_repr(new_row)
if constraint_tester not in generated_names:
generated_names.add(constraint_tester)
rows.append(new_row)
return sorted(rows)
def _row_filter_interesting_patterns(row):
"""Builds a copy of the row, removing uninteresting column patterns."""
return row.copy_with_patterns(_interesting_patterns(row.patterns))
def _generate_constraint_testers(decoder, values, out):
"""Generates the testers needed to implement the constraints
associated with each row having a pattern.
"""
rows = _rows_to_test(decoder, values, with_rules=False)
values['constraint_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
action = _install_test_row(row, decoder, values)
safety_to_check = _safety_to_check(action.safety())
defs_to_check = action.defs()
out.write(CONSTRAINT_TESTER_CLASS_HEADER % values)
if row.patterns or action.constraints().restrictions:
out.write(CONSTRAINT_TESTER_RESTRICTIONS_HEADER % values);
if safety_to_check or defs_to_check:
out.write(CONSTRAINT_TESTER_SANITY_HEADER % values)
out.write(CONSTRAINT_TESTER_CLASS_CLOSE % values)
if row.patterns or action.constraints().restrictions:
out.write(CONSTRAINT_TESTER_PARSE_HEADER % values)
if row.patterns:
out.write(ROW_CONSTRAINTS_HEADER % values);
for p in row.patterns:
not_p = p.negate()
values['comment'] = dgen_output.commented_string(repr(not_p), ' ')
values['code'] = not_p.to_bool()
out.write(CONSTRAINT_CHECK % values)
if action.constraints().restrictions:
out.write(PATTERN_CONSTRAINT_RESTRICTIONS_HEADER)
for c in action.constraints().restrictions:
not_c = c.negate()
values['comment'] = dgen_output.commented_string(repr(not_c), ' ')
values['code'] = not_c.to_bool()
out.write(CONSTRAINT_CHECK % values)
out.write(CONSTRAINT_TESTER_CLASS_FOOTER % values)
if safety_to_check or defs_to_check:
out.write(SAFETY_TESTER_HEADER % values)
for check in safety_to_check:
values['comment'] = dgen_output.commented_string(
repr(check), ' ')
values['code'] = check.to_bool()
out.write(SAFETY_TESTER_CHECK % values)
if defs_to_check:
values['comment'] = dgen_output.commented_string(
repr(defs_to_check), ' ')
values['code'] = defs_to_check.to_register_list()
out.write(DEFS_SAFETY_CHECK % values)
out.write(SAFETY_TESTER_FOOTER % values)
def _generate_rule_testers(decoder, values, out):
"""Generates the testers that tests the rule associated with
each row having a pattern.
"""
out.write(TESTER_CLASS_HEADER % values)
rows = _rows_to_test(decoder, values)
values['decoder_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
_install_test_row(row, decoder, values)
out.write(TESTER_CLASS % values)
def _decoder_restricted_to_tables(decoder, tables):
"""Returns a copy of the decoder, with only the given table names (
or all tables if no names are specified.
"""
if not tables:
return decoder
new_decoder = dgen_core.Decoder()
for tbl in [tbl for tbl in decoder.tables() if tbl.name in tables]:
new_decoder.add(tbl)
new_decoder.set_class_defs(decoder.get_class_defs())
return new_decoder
def _generate_test_patterns_with_baseline_tests(
decoder, values, out, baseline_test_tables):
_generate_test_patterns(decoder, values, out, False)
_generate_test_patterns(
_decoder_restricted_to_tables(decoder, baseline_test_tables),
values, out, True)
def _generate_test_patterns(decoder, values, out, add_baseline_tests):
"""Generates a test function for each row having a pattern associated
with the table row.
"""
rows = _rows_to_test(decoder, values, with_patterns=True)
values['test_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
action = _install_test_row(row, decoder, values, with_patterns=True)
if add_baseline_tests:
if action.find('generated_baseline'):
values['gen_decoder'] = action.find('generated_baseline')
out.write(TEST_FUNCTION_BASELINE_VS_BASELINE % values)
elif action.actual() == action.baseline():
out.write(TEST_FUNCTION_BASELINE % values)
else:
out.write(TEST_FUNCTION_ACTUAL_VS_BASELINE % values)
def _index_neutral_map(values):
"""Returns a dictionary from each neutral_repr(value) in list
values, to its corresponding index. This is done to reduce the
number of compares to find the index, speeding up code
generation.
"""
lookup_map = {}
index = 0
for v in values:
lookup_map[dgen_core.neutral_repr(v)] = index
index += 1
return lookup_map
|
{
"content_hash": "0a3d9bef14029a45b6d0cde3a737ac70",
"timestamp": "",
"source": "github",
"line_count": 1147,
"max_line_length": 81,
"avg_line_length": 35.08805579773322,
"alnum_prop": 0.6947025791382995,
"repo_name": "nacl-webkit/native_client",
"id": "0a9015701ea8eccd074845ae0801846bbf889e5f",
"size": "40246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/trusted/validator_arm/dgen_test_output.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "164092"
},
{
"name": "C",
"bytes": "11922241"
},
{
"name": "C++",
"bytes": "7639856"
},
{
"name": "DOT",
"bytes": "6961"
},
{
"name": "JavaScript",
"bytes": "5956"
},
{
"name": "Logos",
"bytes": "6928"
},
{
"name": "Objective-C",
"bytes": "7466"
},
{
"name": "Python",
"bytes": "1548425"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ragel in Ruby Host",
"bytes": "87993"
},
{
"name": "Shell",
"bytes": "342591"
}
],
"symlink_target": ""
}
|
import pytest
from pandas import DataFrame
from bio_hansel.const import SCHEME_FASTAS
from bio_hansel.qc.const import QC
from bio_hansel.subtype import Subtype
from bio_hansel.subtyper import subtype_contigs
from . import check_subtype_attrs, check_df_fasta_cols
genome_name = 'test'
scheme_heidelberg = 'heidelberg'
scheme_enteritidis = 'enteritidis'
fasta_heidelberg_pass = 'tests/data/SRR1002850_SMALL.fasta'
fasta_gz_heidelberg_pass = 'tests/data/SRR1002850_SMALL.fasta.gz'
# input contigs that should give an unconfident result and a QC fail
fasta_enteritidis_unconfident = 'tests/data/fail-qc-unconfident-subtype.fasta'
fasta_gz_enteritidis_unconfident = 'tests/data/fail-qc-unconfident-subtype.fasta.gz'
# input contigs that should give a QC fail
fasta_enteritidis_fail = 'tests/data/fail-qc-unconfident-subtype.fasta'
fasta_gz_enteritidis_fail = 'tests/data/fail-qc-unconfident-subtype.fasta.gz'
@pytest.fixture()
def subtype_heidelberg_pass():
return Subtype(scheme=scheme_heidelberg,
scheme_version=SCHEME_FASTAS[scheme_heidelberg]['version'],
sample=genome_name,
file_path=fasta_heidelberg_pass,
subtype='2.2.2.2.1.4',
are_subtypes_consistent=True,
inconsistent_subtypes=None,
n_kmers_matching_all=202,
n_kmers_matching_all_expected='202',
n_kmers_matching_positive=17,
n_kmers_matching_positive_expected='17',
n_kmers_matching_subtype=3,
n_kmers_matching_subtype_expected='3',
qc_status=QC.PASS)
@pytest.fixture()
def subtype_enteritidis_fail_unconfident():
return Subtype(scheme=scheme_enteritidis,
scheme_version=SCHEME_FASTAS[scheme_enteritidis]['version'],
sample=genome_name,
subtype='2.1.1',
file_path=fasta_enteritidis_unconfident,
are_subtypes_consistent=True,
n_kmers_matching_all=154,
n_kmers_matching_all_expected='224',
n_kmers_matching_positive=9,
n_kmers_matching_positive_expected='9',
n_kmers_matching_subtype=1,
n_kmers_matching_subtype_expected='1',
qc_status=QC.FAIL)
@pytest.fixture()
def subtype_enteritidis_fail():
return Subtype(scheme=scheme_enteritidis,
scheme_version=SCHEME_FASTAS[scheme_enteritidis]['version'],
sample=genome_name,
subtype='2.1.1',
file_path=fasta_enteritidis_fail,
are_subtypes_consistent=True,
n_kmers_matching_all=154,
n_kmers_matching_all_expected='224',
n_kmers_matching_positive=9,
n_kmers_matching_positive_expected='9',
n_kmers_matching_subtype=1,
n_kmers_matching_subtype_expected='1',
qc_status=QC.FAIL)
def test_heidelberg_fasta_ac(subtype_heidelberg_pass):
st, df = subtype_contigs(fasta_path=fasta_heidelberg_pass,
genome_name=genome_name,
scheme=scheme_heidelberg)
stgz, dfgz = subtype_contigs(fasta_path=fasta_gz_heidelberg_pass,
genome_name=genome_name,
scheme=scheme_heidelberg)
assert isinstance(st, Subtype)
assert isinstance(df, DataFrame)
assert isinstance(stgz, Subtype)
assert isinstance(dfgz, DataFrame)
check_subtype_attrs(st, stgz, subtype_heidelberg_pass)
check_df_fasta_cols(df)
check_df_fasta_cols(dfgz)
def test_enteritidis_scheme_vs_qc_failing_contigs_unconfident_ac(subtype_enteritidis_fail_unconfident):
st, df = subtype_contigs(fasta_path=fasta_enteritidis_unconfident,
genome_name=genome_name,
scheme=scheme_enteritidis)
stgz, dfgz = subtype_contigs(fasta_path=fasta_gz_enteritidis_unconfident,
genome_name=genome_name,
scheme=scheme_enteritidis)
assert isinstance(st, Subtype)
assert isinstance(df, DataFrame)
assert isinstance(stgz, Subtype)
assert isinstance(dfgz, DataFrame)
check_subtype_attrs(st, stgz, subtype_enteritidis_fail_unconfident)
assert 'Unconfident Results Error 4' in st.qc_message
assert 'Unconfident Results Error 4' in stgz.qc_message
check_df_fasta_cols(df)
check_df_fasta_cols(dfgz)
def test_ac_vs_bad_contigs(subtype_enteritidis_fail):
st, df = subtype_contigs(fasta_path=fasta_enteritidis_fail,
genome_name=genome_name,
scheme=scheme_enteritidis)
stgz, dfgz = subtype_contigs(fasta_path=fasta_gz_enteritidis_fail,
scheme=scheme_enteritidis,
genome_name=genome_name)
assert isinstance(st, Subtype)
assert isinstance(df, DataFrame)
assert isinstance(stgz, Subtype)
assert isinstance(dfgz, DataFrame)
check_subtype_attrs(st, stgz, subtype_enteritidis_fail)
check_df_fasta_cols(df)
check_df_fasta_cols(dfgz)
|
{
"content_hash": "b68624e3dbd1b316c9bbe0b8199d8b76",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 103,
"avg_line_length": 43.07258064516129,
"alnum_prop": 0.617112900205954,
"repo_name": "phac-nml/bio_hansel",
"id": "bc09262044f2839dd3fff4bf5f78aa744e4ccc63",
"size": "5341",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "tests/test_subtyping_contigs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81880"
}
],
"symlink_target": ""
}
|
from google.cloud import notebooks_v1
async def sample_get_execution():
# Create a client
client = notebooks_v1.NotebookServiceAsyncClient()
# Initialize request argument(s)
request = notebooks_v1.GetExecutionRequest(
name="name_value",
)
# Make the request
response = await client.get_execution(request=request)
# Handle the response
print(response)
# [END notebooks_v1_generated_NotebookService_GetExecution_async]
|
{
"content_hash": "fb88ca833efd11e6929c4ea3743caf33",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 24.57894736842105,
"alnum_prop": 0.715203426124197,
"repo_name": "googleapis/python-notebooks",
"id": "370609582297d99ed01cac5b8752f0614e4aeeb6",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/notebooks_v1_generated_notebook_service_get_execution_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1752787"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib.auth.views import logout
from .views import login, UserCRUDL
logout_url = getattr(settings, 'LOGOUT_REDIRECT_URL', None)
urlpatterns = patterns('',
url(r'^login/$', login, dict(template_name='smartmin/users/login.html'), name="users.user_login"),
url(r'^logout/$', logout, dict(redirect_field_name='go', next_page=logout_url), name="users.user_logout"),
)
urlpatterns += UserCRUDL().as_urlpatterns()
|
{
"content_hash": "302b7dad2a9f591af797454707e776d8",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 118,
"avg_line_length": 37.06666666666667,
"alnum_prop": 0.7248201438848921,
"repo_name": "caktus/smartmin",
"id": "4a1fdab01e016a07fe2f1861152f58a3df4b589f",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartmin/users/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19950"
},
{
"name": "HTML",
"bytes": "25575"
},
{
"name": "JavaScript",
"bytes": "54172"
},
{
"name": "Python",
"bytes": "191040"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from etsdevtools.developer.tools.envisage_browser.plugin_definition_adapter import *
|
{
"content_hash": "87cefc3adc071aab2926a3c363801dac",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 84,
"avg_line_length": 62,
"alnum_prop": 0.8306451612903226,
"repo_name": "enthought/etsproxy",
"id": "5ce21ab48781dd90836762c83037f52fa3cfe441",
"size": "139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/developer/tools/envisage_browser/plugin_definition_adapter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"p6b6raj5z$v717ao=!_ao(@tutr@db5!kncl8g0a_0*_9j)&7d"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'registration',
'flavours',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# Apps specific for this project go here.
LOCAL_APPS = (
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
########## SOUTH CONFIGURATION
# See: http://south.readthedocs.org/en/latest/installation.html#configuring-your-django-installation
INSTALLED_APPS += (
# Database migration helpers:
'south',
)
# Don't need to use South when setting up a test database.
SOUTH_TESTS_MIGRATE = False
########## END SOUTH CONFIGURATION
|
{
"content_hash": "005f0b05e6627e9f9cca338e699950f1",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 100,
"avg_line_length": 29.724409448818896,
"alnum_prop": 0.683841059602649,
"repo_name": "haku86/icecream",
"id": "6b12173578727371f8268369580ac653172cfb4d",
"size": "7550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icecream/icecream/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "967"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "29609"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from modules import IndRNN
torch.manual_seed(1111)
# Hyper Parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
# MNIST Dataset
train_dataset = dsets.MNIST(root='../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='../data/',
train=False,
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# RNN Model (Many-to-One)
class RNNModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes,
bias=True, grad_clip=None):
super(RNNModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = IndRNN(input_size, hidden_size, num_layers=num_layers,
bias=bias, return_sequences=False, grad_clip=grad_clip)
self.fc = nn.Linear(hidden_size, num_classes, bias=bias)
def forward(self, x):
# Set initial states
initial_states = [Variable(torch.zeros(x.size(0), self.hidden_size)) for _ in range(self.num_layers)]
# Forward propagate RNN
out = self.rnn(x, initial_states)
# Decode hidden state of last time step
out = self.fc(out)
return out
rnn = RNNModel(input_size, hidden_size, num_layers, num_classes, bias=True, grad_clip=10)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
# Train the Model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images.view(-1, sequence_length, input_size))
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = rnn(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.item()))
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images.view(-1, sequence_length, input_size))
outputs = rnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
# Save the Model
torch.save(rnn.state_dict(), 'rnn.pkl')
|
{
"content_hash": "9e4a951bb52618f5a7a0c6d3f7768031",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 109,
"avg_line_length": 32.34653465346535,
"alnum_prop": 0.5876951331496786,
"repo_name": "DingKe/pytorch_workplace",
"id": "b739114763a7274ab6d43163d188ef58f4bea829",
"size": "3267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rnn/test_indrnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "680"
},
{
"name": "C++",
"bytes": "159"
},
{
"name": "Python",
"bytes": "119448"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import logging
_BASE = __name__.split(".", 1)[0]
# Add a BLATHER level, this matches the multiprocessing utils.py module (and
# kazoo and others) that declares a similar level, this level is for
# information that is even lower level than regular DEBUG and gives out so
# much runtime information that it is only useful by low-level/certain users...
BLATHER = 5
# Copy over *select* attributes to make it easy to use this module.
CRITICAL = logging.CRITICAL
DEBUG = logging.DEBUG
ERROR = logging.ERROR
FATAL = logging.FATAL
INFO = logging.INFO
NOTSET = logging.NOTSET
WARN = logging.WARN
WARNING = logging.WARNING
class _BlatherLoggerAdapter(logging.LoggerAdapter):
def blather(self, msg, *args, **kwargs):
"""Delegate a blather call to the underlying logger."""
self.log(BLATHER, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
"""Delegate a warning call to the underlying logger."""
self.warning(msg, *args, **kwargs)
def getLogger(name=_BASE, extra=None):
logger = logging.getLogger(name)
if not logger.handlers:
logger.addHandler(logging.NullHandler())
return _BlatherLoggerAdapter(logger, extra=extra)
|
{
"content_hash": "768d57a02bc5173b5f9c988292b9aa6f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 31.487179487179485,
"alnum_prop": 0.7133550488599348,
"repo_name": "pombredanne/taskflow-1",
"id": "823f8b0cac1e6b55e6edb9d008ce2a125ced0025",
"size": "1885",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "taskflow/logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1484277"
},
{
"name": "Shell",
"bytes": "1988"
}
],
"symlink_target": ""
}
|
import os, sys
from jadsh.prompt import Prompt
import jadsh.constants as constants
class Screen:
def __init__(self, stdin = sys.stdin, stdout = sys.stdout, stderr = sys.stderr):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.screen = ""
self.cursor = 0
self.terminal_length = 0
self.getTerminalSize()
def __call__(self, *argv):
self.draw(*argv)
def updateCursor(self, position):
self.cursor = self.cursor + int(position)
def getCursor(self, text):
if self.cursor < 0:
self.cursor = 0
elif self.cursor > len(text):
self.cursor = len(text)
return self.cursor
def saveCursor(self):
if self.stdout.isatty():
self.write("\x1b7")
def resetCursor(self):
if self.stdout.isatty():
self.write("\x1b8")
def getTerminalSize(self):
if self.stdout.isatty():
stty = os.popen('stty size', 'r')
self.rows, self.columns = stty.read().split()
stty.close()
else:
self.rows, self.columns = (-1, -1)
return (self.rows, self.columns)
def draw(self, *argv):
"""
Draw the terminal so the user can see it
"""
self.terminal_length = 0
if self.stdout.isatty():
# Reset cursor to previous position
# TODO: Handle edge case when current line is at the bottom of the terminal
self.screenAppend("\x1b8")
# Hide the cursor
self.screenAppend("\x1b[?25l");
# Clear everything after the current line
self.screenAppend("\x1b[J\r")
# Save the current cursor position
self.screenAppend("\x1b7")
# Output all args to the terminal
for arg in argv:
self.terminal_length += len(arg)
self.screenAppend(arg)
if self.stdout.isatty():
self.screenAppend("\x1b[?25h") # Show cursor
# Calculate cursor position and place at correct spot
# TODO: Handle edge case when user input goes to more than 1 line
position = len(argv[-1]) - self.getCursor(argv[-1])
if position > 0:
# Move cursor backwards
self.screenAppend("\x1b[%sD" % str(position))
if not self.stdout.isatty():
self.screenAppend("\n")
# Output everything to the screen
self.write(self.screen)
self.screen = ""
def title(self, title = "jadsh"):
if self.stdout.isatty():
self.write("\x1b]2;%s\x07", title)
def screenAppend(self, text):
"""
Add new text to the screen
"""
self.screen += str(text)
def message(self, title, message, status = False):
"""
Display message in the terminal
"""
self.write(self.hilite("%s: " % str(title), status), flush = False)
self.write("%s\n" % str(message))
self.saveCursor()
def write(self, prepared_string = "", items = None, flush = True):
"""
Output prepared string to terminal
"""
if items:
self.stdout.write(str(prepared_string) % items)
else:
self.stdout.write(str(prepared_string))
if flush: self.flush()
def flush(self):
self.stdout.flush()
def hilite(self, string, status = False, bold = False):
"""
Generate bold, or colored string using ANSI escape codes
@return String
"""
if not self.stdout.isatty(): return string
attr = []
if status:
# green
attr.append('32')
else:
# red
attr.append('31')
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
|
{
"content_hash": "1a21708423930f06dcd50de28ec516f6",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 81,
"avg_line_length": 23.394160583941606,
"alnum_prop": 0.656786271450858,
"repo_name": "BoringCode/jadsh",
"id": "e57f75d3f22a57d8704ffd36042a94b98842afb4",
"size": "3205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jadsh/screen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50060"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from thrift.protocol import fastproto, TBinaryProtocol, TCompactProtocol
from thrift.transport import TTransport
import timeit
import gc
from multiprocessing import Process, Queue
import os
import psutil
try:
from guppy import hpy
except:
hpy = None
from FastProto.ttypes import AStruct, OneOfEach
ooe = OneOfEach()
ooe.aBool = True
ooe.aByte = 1
ooe.anInteger16 = 234
ooe.anInteger32 = 2345678
ooe.anInteger64 = 23456789012345
ooe.aString = "This is my rifle" * 100
ooe.aDouble = 2.3456789012
ooe.aFloat = 12345.678
ooe.aList = [12, 34, 56, 78, 90, 100, 123, 456, 789]
ooe.aSet = set(["This", "is", "my", "rifle"])
ooe.aMap = {"What": 4, "a": 1, "wonderful": 9, "day": 3, "!": 1}
ooe.aStruct = AStruct(aString="isn't it?", anInteger=999)
trans = TTransport.TMemoryBuffer()
proto = TBinaryProtocol.TBinaryProtocol(trans)
ooe.write(proto)
binary_buf = trans.getvalue()
trans = TTransport.TMemoryBuffer()
proto = TCompactProtocol.TCompactProtocol(trans)
ooe.write(proto)
compact_buf = trans.getvalue()
class TDevNullTransport(TTransport.TTransportBase):
def __init__(self):
pass
def isOpen(self):
return True
iters = 1000000
def benchmark_fastproto():
setup_write = """
from __main__ import ooe, TDevNullTransport
from FastProto.ttypes import OneOfEach
from thrift.protocol import fastproto
trans = TDevNullTransport()
def doWrite():
buf = fastproto.encode(ooe, [OneOfEach, OneOfEach.thrift_spec, False],
utf8strings=0, protoid={0})
trans.write(buf)
"""
print("Fastproto binary write = {}".format(
timeit.Timer('doWrite()', setup_write.format(0))
.timeit(number=iters)))
print("Fastproto compact write = {}".format(
timeit.Timer('doWrite()', setup_write.format(2))
.timeit(number=iters)))
setup_read = """
from __main__ import binary_buf, compact_buf
from FastProto.ttypes import OneOfEach
from thrift.protocol import fastproto
from thrift.transport import TTransport
def doReadBinary():
trans = TTransport.TMemoryBuffer(binary_buf)
ooe = OneOfEach()
fastproto.decode(ooe, trans, [OneOfEach, OneOfEach.thrift_spec, False],
utf8strings=0, protoid=0)
def doReadCompact():
trans = TTransport.TMemoryBuffer(compact_buf)
ooe = OneOfEach()
fastproto.decode(ooe, trans, [OneOfEach, OneOfEach.thrift_spec, False],
utf8strings=0, protoid=2)
"""
print("Fastproto binary read = {}".format(
timeit.Timer("doReadBinary()", setup_read).timeit(number=iters)))
print("Fastproto compact read = {}".format(
timeit.Timer("doReadCompact()", setup_read).timeit(number=iters)))
def fastproto_encode(q, protoid):
hp = hpy()
trans = TDevNullTransport()
p = psutil.Process(os.getpid())
global ooe
before = hp.heap()
for i in range(iters):
buf = fastproto.encode(
ooe,
[OneOfEach, OneOfEach.thrift_spec, False],
utf8strings=0,
protoid=protoid)
trans.write(buf)
if (i + 1) % 100000 == 0:
q.put((i + 1, p.memory_info()))
gc.collect()
after = hp.heap()
leftover = after - before
q.put("Memory leftover in Python after {} times: {}".format(
iters, leftover))
def fastproto_decode(q, protoid):
hp = hpy()
p = psutil.Process(os.getpid())
before = hp.heap()
for i in range(iters):
trans = TTransport.TMemoryBuffer(
binary_buf if protoid == 0 else compact_buf)
ooe_local = OneOfEach()
fastproto.decode(
ooe_local,
trans,
[OneOfEach, OneOfEach.thrift_spec, False],
utf8strings=0,
protoid=protoid)
if (i + 1) % 100000 == 0:
q.put((i + 1, p.memory_info()))
gc.collect()
after = hp.heap()
leftover = after - before
q.put("Memory leftover in Python after {} times: {}".format(
iters, leftover))
def memory_usage_fastproto():
q = Queue()
for method in (fastproto_encode, fastproto_decode):
print("Memory usage with {}:".format(method.__name__))
for protoid in (0, 2):
print("Binary" if protoid == 0 else "Compact")
p = Process(target=method, args=(q, protoid))
p.start()
while True:
ret = q.get()
if isinstance(ret, tuple):
print("Memory info after {} times: {}".format(
ret[0], ret[1]))
else:
print(ret)
p.join()
break
if __name__ == "__main__":
print("Starting Benchmarks")
benchmark_fastproto()
if hpy is not None:
memory_usage_fastproto()
|
{
"content_hash": "f498f561943ae5e106bdc793a4e3fb52",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 75,
"avg_line_length": 29.596385542168676,
"alnum_prop": 0.6163240382658254,
"repo_name": "getyourguide/fbthrift",
"id": "e7ce69d5346a52736cf65468dd0ac9d3e26ccda4",
"size": "4913",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "thrift/test/py/FastprotoBenchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "154349"
},
{
"name": "C#",
"bytes": "28929"
},
{
"name": "C++",
"bytes": "17798156"
},
{
"name": "CMake",
"bytes": "33182"
},
{
"name": "D",
"bytes": "669764"
},
{
"name": "Emacs Lisp",
"bytes": "5154"
},
{
"name": "Erlang",
"bytes": "23039"
},
{
"name": "Go",
"bytes": "375816"
},
{
"name": "HTML",
"bytes": "404999"
},
{
"name": "Hack",
"bytes": "768869"
},
{
"name": "Haskell",
"bytes": "305707"
},
{
"name": "Java",
"bytes": "2408919"
},
{
"name": "JavaScript",
"bytes": "6018"
},
{
"name": "Lex",
"bytes": "11934"
},
{
"name": "M4",
"bytes": "99563"
},
{
"name": "Makefile",
"bytes": "53670"
},
{
"name": "OCaml",
"bytes": "32043"
},
{
"name": "Objective-C",
"bytes": "152361"
},
{
"name": "PHP",
"bytes": "322092"
},
{
"name": "Perl",
"bytes": "70682"
},
{
"name": "Protocol Buffer",
"bytes": "585"
},
{
"name": "Python",
"bytes": "2413275"
},
{
"name": "Ruby",
"bytes": "328584"
},
{
"name": "Shell",
"bytes": "32559"
},
{
"name": "Smalltalk",
"bytes": "22812"
},
{
"name": "TeX",
"bytes": "48707"
},
{
"name": "Thrift",
"bytes": "259661"
},
{
"name": "Vim script",
"bytes": "2837"
},
{
"name": "Yacc",
"bytes": "36158"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User,Group,Permission
from django.contrib.contenttypes.models import ContentType
import unittest
from annotatetext.models import Annotation
from actstream.models import Action
from tagging.models import Tag, TaggedItem
from laws.models import Bill
from mks.models import Member, Knesset
from links.models import LinkType
from models import Committee, CommitteeMeeting, Topic
from models import TOPIC_REJECTED
just_id = lambda x: x.id
APP = 'committees'
class CommitteeMeetingDetailViewTest(TestCase):
def setUp(self):
self.knesset = Knesset.objects.create(number=1,
start_date=datetime.today()-timedelta(days=1))
self.committee_1 = Committee.objects.create(name='c1')
self.committee_2 = Committee.objects.create(name='c2')
self.meeting_1 = self.committee_1.meetings.create(date=datetime.now(),
topics = "django",
protocol_text='''jacob:
I am a perfectionist
adrian:
I have a deadline''')
self.meeting_1.create_protocol_parts()
self.meeting_2 = self.committee_1.meetings.create(date=datetime.now(),
topics = "python",
protocol_text='m2')
self.meeting_2.create_protocol_parts()
self.jacob = User.objects.create_user('jacob', 'jacob@example.com',
'JKM')
self.adrian = User.objects.create_user('adrian', 'adrian@example.com',
'ADRIAN')
(self.group, created) = Group.objects.get_or_create(name='Valid Email')
if created:
self.group.save()
self.group.permissions.add(Permission.objects.get(name='Can add annotation'))
self.jacob.groups.add(self.group)
ct = ContentType.objects.get_for_model(Tag)
self.adrian.user_permissions.add(Permission.objects.get(codename='add_tag', content_type=ct))
self.bill_1 = Bill.objects.create(stage='1', title='bill 1')
self.mk_1 = Member.objects.create(name='mk 1')
self.topic = self.committee_1.topic_set.create(creator=self.jacob,
title="hello", description="hello world")
self.tag_1 = Tag.objects.create(name='tag1')
self.meeting_1.mks_attended.add(self.mk_1)
def testProtocolPart(self):
parts_list = self.meeting_1.parts.list()
self.assertEqual(parts_list.count(), 2)
self.assertEqual(parts_list[0].header, u'jacob')
self.assertEqual(parts_list[0].body, 'I am a perfectionist')
self.assertEqual(parts_list[1].header, u'adrian')
self.assertEqual(parts_list[1].body, 'I have a deadline')
def testPartAnnotation(self):
'''this is more about testing the annotatext app '''
self.assertTrue(self.client.login(username='jacob', password='JKM'))
part = self.meeting_1.parts.list()[0]
res = self.client.post(reverse('annotatetext-post_annotation'),
{'selection_start': 7,
'selection_end': 14,
'flags': 0,
'color': '#000',
'lengthcheck': len(part.body),
'comment' : 'just perfect',
'object_id': part.id,
'content_type': ContentType.objects.get_for_model(part).id,
})
self.assertEqual(res.status_code, 302)
annotation = Annotation.objects.get(object_id=part.id,
content_type=ContentType.objects.get_for_model(part).id)
self.assertEqual(annotation.selection, 'perfect')
# ensure the activity has been recorded
stream = Action.objects.stream_for_actor(self.jacob)
self.assertEqual(stream.count(), 2)
self.assertEqual(stream[0].verb, 'started following')
self.assertEqual(stream[0].target.id, self.meeting_1.id)
self.assertEqual(stream[1].verb, 'annotated')
self.assertEqual(stream[1].target.id, annotation.id)
# ensure we will see it on the committee page
annotations = self.committee_1.annotations
self.assertEqual(annotations.count(), 1)
self.assertEqual(annotations[0].comment, 'just perfect')
# test the deletion of an annotation
annotation.delete()
stream = Action.objects.stream_for_actor(self.jacob)
self.assertEqual(stream.count(), 1)
def testTwoAnnotations(self):
'''create two annotations on same part, and delete them'''
self.assertTrue(self.client.login(username='jacob', password='JKM'))
part = self.meeting_1.parts.list()[0]
res = self.client.post(reverse('annotatetext-post_annotation'),
{'selection_start': 7,
'selection_end': 14,
'flags': 0,
'color': '#000',
'lengthcheck': len(part.body),
'comment' : 'just perfect',
'object_id': part.id,
'content_type': ContentType.objects.get_for_model(part).id,
})
self.assertEqual(res.status_code, 302)
res = self.client.post(reverse('annotatetext-post_annotation'),
{'selection_start': 8,
'selection_end': 15,
'flags': 0,
'color': '#000',
'lengthcheck': len(part.body),
'comment' : 'not quite',
'object_id': part.id,
'content_type': ContentType.objects.get_for_model(part).id,
})
self.assertEqual(res.status_code, 302)
annotations = Annotation.objects.filter(object_id=part.id,
content_type=ContentType.objects.get_for_model(part).id)
self.assertEqual(annotations.count(), 2)
# ensure we will see it on the committee page
c_annotations = self.committee_1.annotations
self.assertEqual(c_annotations.count(), 2)
self.assertEqual(c_annotations[0].comment, 'just perfect')
self.assertEqual(c_annotations[1].comment, 'not quite')
# test the deletion of an annotation
annotations[0].delete()
c_annotations = self.committee_1.annotations
self.assertEqual(c_annotations.count(), 1)
def testAnnotationForbidden(self):
self.jacob.groups.clear() # invalidate this user's email
self.assertTrue(self.client.login(username='jacob', password='JKM'))
part = self.meeting_1.parts.list()[0]
res = self.client.post(reverse('annotatetext-post_annotation'),
{'selection_start': 7,
'selection_end': 14,
'flags': 0,
'color': '#000',
'lengthcheck': len(part.body),
'comment' : 'just perfect',
'object_id': part.id,
'content_type': ContentType.objects.get_for_model(part).id,
})
self.assertEqual(res.status_code, 403) # 403 Forbidden. 302 means a user with unverified email has posted an annotation.
def testCommitteeList(self):
res = self.client.get(reverse('committee-list'))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'committees/committee_list.html')
committees = res.context['committees']
self.assertEqual(map(just_id, committees),
[ self.committee_1.id, self.committee_2.id, ])
self.assertQuerysetEqual(res.context['topics'],
["<Topic: hello>"])
def testCommitteeMeetings(self):
res = self.client.get(self.committee_1.get_absolute_url())
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,
'committees/committee_detail.html')
object_list = res.context['meetings_list']
self.assertEqual(map(just_id, object_list),
[self.meeting_1.id, self.meeting_2.id, ],
'object_list has wrong objects: %s' % object_list)
def test_committee_meeting(self):
res = self.client.get(self.meeting_1.get_absolute_url())
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,
'committees/committeemeeting_detail.html')
members = res.context['members']
self.assertEqual(map(just_id, members),
[self.mk_1.id],
'members has wrong objects: %s' % members)
def testLoginRequired(self):
res = self.client.post(reverse('committee-meeting',
kwargs={'pk': self.meeting_1.id}))
self.assertFalse(self.bill_1 in self.meeting_1.bills_first.all())
self.assertEqual(res.status_code, 302)
self.assertTrue(res['location'].startswith('%s%s' %
('http://testserver', settings.LOGIN_URL)))
def testConnectToMK(self):
self.assertTrue(self.client.login(username='jacob', password='JKM'))
res = self.client.post(reverse('committee-meeting',
kwargs={'pk': self.meeting_1.id}),
{'user_input_type': 'mk',
'mk_name': self.mk_1.name})
self.assertEqual(res.status_code, 302)
self.assertTrue(self.meeting_1 in self.mk_1.committee_meetings.all())
self.client.logout()
def testConnectToBill(self):
self.assertTrue(self.client.login(username='jacob', password='JKM'))
res = self.client.post(reverse('committee-meeting',
kwargs={'pk':
self.meeting_1.id}),
{'user_input_type': 'bill',
'bill_id': self.bill_1.id})
self.assertEqual(res.status_code, 302)
self.assertTrue(self.bill_1 in self.meeting_1.bills_first.all())
self.client.logout()
def test_add_tag_login_required(self):
url = reverse('add-tag-to-object',
kwargs={'app':APP,
'object_type':'committeemeeting',
'object_id': self.meeting_1.id})
res = self.client.post(url, {'tag_id':self.tag_1})
self.assertRedirects(res, "%s?next=%s" % (settings.LOGIN_URL, url),
status_code=302)
def test_add_tag(self):
self.assertTrue(self.client.login(username='jacob', password='JKM'))
url = reverse('add-tag-to-object',
kwargs={'app':APP,
'object_type': 'committeemeeting',
'object_id': self.meeting_1.id})
res = self.client.post(url, {'tag_id':self.tag_1.id})
self.assertEqual(res.status_code, 200)
self.assertIn(self.tag_1, self.meeting_1.tags)
@unittest.skip("creating tags currently disabled")
def test_create_tag_permission_required(self):
self.assertTrue(self.client.login(username='jacob', password='JKM'))
url = reverse('create-tag',
kwargs={'app':APP,
'object_type': 'committeemeeting',
'object_id': self.meeting_1.id})
res = self.client.post(url, {'tag':'new tag'})
self.assertRedirects(res, "%s?next=%s" % (settings.LOGIN_URL, url),
status_code=302)
@unittest.skip("creating tags currently disabled")
def test_create_tag(self):
self.assertTrue(self.client.login(username='adrian',
password='ADRIAN'))
url = reverse('create-tag',
kwargs={'app':APP,
'object_type': 'committeemeeting',
'object_id': self.meeting_1.id})
res = self.client.post(url, {'tag':'new tag'})
self.assertEqual(res.status_code, 200)
self.new_tag = Tag.objects.get(name='new tag')
self.assertIn(self.new_tag, self.meeting_1.tags)
def test_committeemeeting_by_tag(self):
res = self.client.get('%s?tagged=false' % reverse('committee-all-meetings'))
self.assertQuerysetEqual(res.context['object_list'],
['<CommitteeMeeting: c1 - python>',
'<CommitteeMeeting: c1 - django>'],
)
self.ti = TaggedItem._default_manager.create(
tag=self.tag_1,
content_type=ContentType.objects.get_for_model(CommitteeMeeting),
object_id=self.meeting_1.id)
res = self.client.get(reverse('committeemeeting-tag', args=[self.tag_1.name]))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'committees/committeemeeting_list_by_tag.html')
tag = res.context['tag']
self.assertEqual(tag, self.tag_1)
self.assertQuerysetEqual(res.context['object_list'],
['<CommitteeMeeting: c1 - django>'])
res = self.client.get('%s?tagged=false' % reverse('committee-all-meetings'))
self.assertQuerysetEqual(res.context['object_list'],
['<CommitteeMeeting: c1 - python>'])
# cleanup
self.ti.delete()
def tearDown(self):
self.meeting_1.delete()
self.meeting_2.delete()
self.committee_1.delete()
self.committee_2.delete()
self.jacob.delete()
self.group.delete()
self.bill_1.delete()
self.mk_1.delete()
self.topic.delete()
class TopicsTest(TestCase):
def setUp(self):
self.committee_1 = Committee.objects.create(name='c1')
self.committee_2 = Committee.objects.create(name='c2')
self.meeting_1 = self.committee_1.meetings.create(date=datetime.now(),
protocol_text='''jacob:
I am a perfectionist
adrian:
I have a deadline''')
self.meeting_1.create_protocol_parts()
self.meeting_2 = self.committee_1.meetings.create(date=datetime.now(),
protocol_text='m2')
self.meeting_2.create_protocol_parts()
self.jacob = User.objects.create_user('jacob', 'jacob@example.com',
'JKM')
self.ofri = User.objects.create_user('ofri', 'ofri@example.com',
'ofri')
(self.group, created) = Group.objects.get_or_create(name='Valid Email')
if created:
self.group.save()
self.group.permissions.add(Permission.objects.get(name='Can add Topic'))
self.jacob.groups.add(self.group)
self.mk_1 = Member.objects.create(name='mk 1')
self.topic = self.committee_1.topic_set.create(creator=self.jacob,
title="hello", description="hello world")
self.topic2 = self.committee_1.topic_set.create(creator=self.ofri,
title="bye", description="goodbye")
self.linktype = LinkType.objects.create(title='default')
def testBasic(self):
self.topic2.set_status(TOPIC_REJECTED, "just because")
self.assertEqual(self.committee_1.topic_set.get_public().count(), 1)
self.assertEqual(Topic.objects.get_public().count(), 1)
self.topic.set_status(TOPIC_REJECTED, "because I feel like it")
self.assertEqual(self.committee_1.topic_set.get_public().count(), 0)
def testPermissions(self):
self.assertTrue(self.topic.can_edit(self.jacob))
self.assertFalse(self.topic.can_edit(self.ofri))
self.topic.editors.add(self.ofri)
self.assertTrue(self.topic.can_edit(self.ofri))
self.topic.editors.remove(self.ofri)
def test_edit_topic_form(self):
res = self.client.get(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}))
self.assertEqual(res.status_code, 302) # login required
self.assertTrue(self.client.login(username='ofri',
password='ofri'))
res = self.client.get(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}))
self.assertEqual(res.status_code, 403) # user is not an editor
self.assertTrue(self.client.login(username='jacob',
password='JKM'))
res = self.client.get(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}))
self.assertEqual(res.status_code, 200) # user is an editor
self.assertTemplateUsed(res, 'committees/edit_topic.html')
def test_edit_topic_logged_required(self):
res = self.client.post(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}),
{'title':'test topic title',
'description': 'test topic description',
'committees':self.committee_1.id,
'form-INITIAL_FORMS':0,
'form-MAX_NUM_FORMS':'',
'form-TOTAL_FORMS':3})
self.assertEqual(res.status_code, 302) # redirect to login
self.assertTrue(res['location'].startswith('%s%s' %
('http://testserver', settings.LOGIN_URL)))
def test_edit_topic(self):
self.assertTrue(self.client.login(username='jacob',
password='JKM'))
res = self.client.post(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}),
{'title':'test topic title',
'description': 'test topic description',
'committees':self.committee_1.id,
'form-INITIAL_FORMS':0,
'form-MAX_NUM_FORMS':'',
'form-TOTAL_FORMS':3})
self.assertEqual(res.status_code, 302) # redirect after POST
t = Topic.objects.get(pk=self.topic.id)
self.assertEqual(t.title, 'test topic title')
self.assertEqual(t.description, 'test topic description')
self.assertEqual(Topic.objects.count(), 2) # make sure we didn't create
# a new topic
def test_add_topic(self):
self.assertTrue(self.client.login(username='jacob',
password='JKM'))
res = self.client.post(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id}),
{'title':'test topic title',
'description': 'test topic description',
'committees':self.committee_1.id,
'form-INITIAL_FORMS':0,
'form-MAX_NUM_FORMS':'',
'form-TOTAL_FORMS':3})
self.assertEqual(res.status_code, 302) # redirect after POST
topic_id = res['location'].split('/')[-2] # id of the new topic
t = Topic.objects.get(pk=topic_id)
self.assertEqual(t.title, 'test topic title')
self.assertEqual(t.description, 'test topic description')
self.assertEqual(Topic.objects.count(), 3) # make sure we created
# a new topic
# cleanup
t.delete()
def testListView (self):
res = self.client.get(reverse('topic-list'))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'committees/topic_list.html')
self.assertQuerysetEqual(res.context['topics'].order_by('pk'),
["<Topic: hello>", "<Topic: bye>"])
def testRanking(self):
self.assertQuerysetEqual(Topic.objects.order_by('pk'),
["<Topic: hello>", "<Topic: bye>"])
self.topic2.rating.add(score=4, user=self.ofri, ip_address="127.0.0.1")
self.assertQuerysetEqual(Topic.objects.by_rank(),
["<Topic: bye>", "<Topic: hello>"])
def tearDown(self):
self.meeting_1.delete()
self.meeting_2.delete()
self.committee_1.delete()
self.committee_2.delete()
self.jacob.delete()
self.group.delete()
self.mk_1.delete()
self.topic.delete()
|
{
"content_hash": "93063889d84847cd63831ed624bb2475",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 128,
"avg_line_length": 50.43418013856813,
"alnum_prop": 0.5397930213389505,
"repo_name": "habeanf/Open-Knesset",
"id": "a8917e219a3259c4813df0bad04693336244bd48",
"size": "21838",
"binary": false,
"copies": "8",
"ref": "refs/heads/upmaster",
"path": "committees/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "346134"
},
{
"name": "HTML",
"bytes": "689299"
},
{
"name": "JavaScript",
"bytes": "214741"
},
{
"name": "Python",
"bytes": "3990592"
},
{
"name": "Shell",
"bytes": "203"
}
],
"symlink_target": ""
}
|
"""
Spanakopita markup tool. The markup is loosely based on a combination of
http://txt2tags.sf.net with Python's concept of significant whitespace.
Blank lines and indentation are significant. More indentation than the previous
line will cause block indentation. In a list, indentation creates sublists. I
strongly suggest avoiding tabs and using only spaces (Soft Tabs, in
TextMate-speak), although in theory the code should work so long as you don't
mix them up.
Comments and special:
## At the beginning of a line is for special stuff.
## TABLE_OF_CONTENTS
Headers:
___ Level 1 __________________________________________________________
______ Level 2 _______________________________________________________
_________ Level 3 ____________________________________________________
Any number of trailing underscores is permitted.
Escaping:
Backslash generally acts as an escape-character.
Beautifiers:
//italics//
**bold**
__underline__
--strikeout--
Beautifiers can only be used **inline**.
Code:
Code can be designated using { and }, either {inline} or in a block:
{
void aCodeBlock() {
}
}
Links are still supported within code but all other markup is
disabled.
Lists:
Lists begin with "-":
- This
- Is
- A
- List
We only support numbered lists.
Tables:
Place || at the beginning and end of a row. Use | to separate
cells within a row. Cells can either be defined inline:
|| r1 c1 | r1 c2 | r1 c3 ||
|| r2 c1 | c2 c2 | r2 c3 ||
Or using indentation:
||
r1 c1
|
r1 c2
|
r1 c3
||
You can also mix and match on a per-cell basis. The advantage
of using indentation is that it supports nested tables.
Meta-discussion about links:
In the examples that follow, a link may either be an absolute URL
or a relative path. Relative paths may include spaces etc and we will
do our best to apply URL escaping in all cases.
Images:
[link] includes an image at the path ``link``. The image will be
be linked to its source in such a way that clicked it will cause the
source to be "opened".
Links:
[[Some Text]] links to "Some Text.sp" with the link text "Some Text".
[[http://an.absolute.url/]] should work as well.
[[Some Text @ link]] links to {link} with the link text "Some Text".
[[[link] @ link]] links to {link} with an image.
"""
import sys, re, urllib
DEBUG = False
# ___ Ast Classes ______________________________________________________
class Ast(object):
# Abstract.
def __init__(self, pos):
self.pos = pos
pass
@property
def tag(self):
return self.__class__.__name__
def __str__(self):
return "%s()" % (self.tag)
class LeafAst(Ast):
def __init__(self, pos):
super(LeafAst, self).__init__(pos)
self.children_a = ()
def dump(self, indent, out):
out.write("%s%s:\n" % (indent, self))
class ParentAst(Ast):
def __init__(self, pos):
super(ParentAst, self).__init__(pos)
self.children_a = []
def append_child(self, a_node):
self.children_a.append(a_node)
def rstrip_text(self):
if self.children_a and isinstance(self.children_a[-1], Text):
u_text = self.children_a[-1].u_text.rstrip()
if not u_text:
self.children_a[-1:] = [] # Remove last entry
else:
self.children_a[-1].u_text = u_text
def append_text(self, pos, u_text):
if self.children_a and isinstance(self.children_a[-1], Text):
self.children_a[-1].u_text += u_text
else:
self.append_child(Text(pos, u_text))
def to_html(self, out):
out.write('<%s>\n' % self.html)
for c in self.children_a: c.to_html(out)
out.write('</%s>\n' % self.html)
def dump(self, indent, out):
out.write("%s%s:\n" % (indent, self))
cindent = indent + " "
for c in self.children_a:
c.dump(cindent, out)
def escape_text(u_text):
unescaped = re.compile(ur"[a-zA-Z0-9./,;: (){}\[\]*+-]")
def filter(u_char):
if unescaped.match(u_char):
return u_char.encode('ASCII')
return "&#x%04x;" % ord(u_char)
return "".join(filter(u) for u in u_text)
class Html(ParentAst):
html = "html"
class Body(ParentAst):
html = "body"
class Text(LeafAst):
def __init__(self, pos, u_text):
super(Text, self).__init__(pos)
self.u_text = u_text
def to_html(self, out):
out.write(escape_text(self.u_text))
def __str__(self):
return "%s(text=%s)" % (self.tag, self.u_text)
class Para(LeafAst):
def to_html(self, out):
out.write("<p>")
class Indented(ParentAst):
html = "blockquote"
class Code(ParentAst):
html = "pre"
class Header(ParentAst):
def __init__(self, pos, level):
super(Header, self).__init__(pos)
self.level = level
def to_html(self, out):
out.write('<h%d>' % self.level)
for c in self.children_a: c.to_html(out)
out.write('</h%d>' % self.level)
class Beautified(ParentAst):
# Abstract.
def to_html(self, out):
out.write('<%s>' % self.html)
for c in self.children_a: c.to_html(out)
out.write('</%s>' % self.html)
class Italicized(Beautified):
html = "i"
class Bolded(Beautified):
html = "b"
class Monospaced(Beautified):
html = "tt"
class Underlined(Beautified):
html = "u"
class Struckout(Beautified):
html = "strike"
class ListItem(ParentAst):
def to_html(self, out):
out.write('\n<li> ')
for c in self.children_a: c.to_html(out)
class List(ParentAst):
# Abstract. Children: list items.
pass
class OrderedList(List):
html = "ol"
class Table(ParentAst):
# Children: table rows.
html = "table"
def to_html(self, out):
out.write('<table border="1">\n')
for c in self.children_a: c.to_html(out)
out.write('</table>\n')
class TableRow(ParentAst):
# Children: table columns.
html = "tr"
class TableCell(ParentAst):
# Children: misc elems.
html = "td"
class Image(LeafAst):
def __init__(self, pos):
super(Image, self).__init__(pos)
self.url = None # must be set at some point
def to_html(self, out):
out.write('<img src="%s">' % self.url)
#out.write('<object data="%s"></object>' % self.url)
def __str__(self):
return "%s(url=%s)" % (self.tag, self.url)
class Link(ParentAst):
def __init__(self, pos):
super(Link, self).__init__(pos)
self.url = None # must be set at some point
def to_html(self, out):
out.write('<a href="%s">' % self.url)
for c in self.children_a: c.to_html(out)
out.write('</a>')
def __str__(self):
return "%s(url=%s)" % (self.tag, self.url)
# ___ Lexer ____________________________________________________________
#
# Newlines and whitespace are handled as follows:
#
# Ignorable whitespace generates the token SPACE. This
# includes a single newline which does not change the indentation level.
#
# Two or more consecutive newlines generates the token BLANK_LINE.
#
# In addition, whenever a newline is followed by a new level of
# indentation, one or mode INDENT or UNDENT tokens are produced.
# Applied within lines (order is significant):
WITHIN_REGULAR_EXPRESSIONS = [
('AT', re.compile(r'@')),
('EMDASH', re.compile(r'---')),
('ITAL', re.compile(r'//')),
('BOLD', re.compile(r'\*\*')),
('UNDER', re.compile(r'__')),
('STRIKE', re.compile(r'--')),
('TABLE_ROW', re.compile(r'\|\|')),
('TABLE_CELL', re.compile(r'\|')),
('L_CURLY', re.compile(r'{')),
('R_CURLY', re.compile(r'}')),
('L_SQUARE_SQUARE', re.compile(r'\[\s*\[')),
('R_SQUARE_SQUARE', re.compile(r'\]\s*\]')),
('L_SQUARE', re.compile(r'\[')),
('R_SQUARE', re.compile(r'\]'))
]
# Checked only at the start of a line:
START_REGULAR_EXPRESSIONS = [
('HEADER', re.compile(r'___+')),
('BULLET', re.compile(r'-(?!-)')),
]
class Token(object):
def __init__(self, tag, u_text):
self.tag = tag
self.u_text = u_text
def __str__(self):
return "%s(%s)" % (self.tag, self.u_text.encode("utf-8"))
class Lexer(object):
def __init__(self, u_text):
self.u_text = u_text
self.eol = True
self.pos = 0
self.indent = 0 # Indent of current line.
self.indents = [0] # Indents for which we have issued tokens.
self.token = None
self.verbatim_mode = 0
self.next()
def skip_space(self):
newlines = 0
len_text = len(self.u_text)
p = self.pos
while p < len_text and self.u_text[p].isspace():
if self.u_text[p] == u"\n":
self.indent = 0
newlines += 1
elif newlines:
self.indent += 1
p += 1
if newlines >= 2: # At least one blank line.
tag = 'BLANK_LINE'
else:
tag = 'SPACE'
self.token = Token(tag, self.u_text[self.pos:p])
self.pos = p
self.eol = (newlines >= 1)
return self.token
def check_regexp(self, tag, regexp):
mo = regexp.match(self.u_text, self.pos)
if mo:
self.token = Token(tag, mo.group(0))
self.pos += len(mo.group(0))
return True
return False
def skip_to_eol(self):
# Advances pos to the end of the line and loads next token. Returns skipped text.
p = self.pos
while p < len(self.u_text) and self.u_text[p] != u'\n':
p += 1
u_result = self.u_text[self.pos:p]
self.pos = p
return u_result
def is_word(self, c):
return not (c.isspace() or c in u"_-*/\\{}[]|\n")
def next(self):
self._next()
if DEBUG: sys.stderr.write("%s\n" % (self.token,))
return self.token
def push_indentation_level(self, rel_amnt):
"""
Pushes an indentation level to rel_amnt more than it is now.
When the indentation drops lower, an UNDENT token will be generated.
Used for bullet lists.
"""
self.indent += rel_amnt
self.indents.append(self.indent)
def start_verbatim_mode(self):
assert not self.verbatim_mode
self.verbatim_mode = 1
def _next(self):
if self.pos >= len(self.u_text):
if len(self.indents) > 1:
self.indents.pop()
self.token = Token('UNDENT', u"")
else:
self.token = Token('EOF', u"")
return
u_chr = self.u_text[self.pos]
# Whitespace:
if self.u_text[self.pos].isspace():
self.skip_space()
return
# Adjust indentation:
if self.indent != self.indents[-1]:
if self.indent > self.indents[-1]:
self.token = Token('INDENT', u"")
self.indents.append(self.indent)
return
elif self.indent < self.indents[-1]:
self.indents.pop()
self.token = Token('UNDENT', u"")
return
if self.verbatim_mode:
self.verbatim_next()
else:
self.non_verbatim_next()
def verbatim_next(self):
u_char = self.u_text[self.pos]
# Check for escape characters:
if u_char == u"\\" and self.pos + 1 < len(self.u_text):
self.pos += 1
# Escaped braces and backslashes are just text:
u_next_char = self.u_text[self.pos]
if u_next_char in [u"{", u"}", u"\\"]:
self.pos += 1
self.token = Token('TEXT', u_next_char)
return
# But otherwise escaped characters are markup:
self.non_verbatim_next()
return
# Count matching braces:
if u_char == u"{":
self.pos += 1
self.verbatim_mode += 1
self.token = Token('TEXT', u_char)
return
if u_char == u"}":
self.pos += 1
self.verbatim_mode -= 1
if self.verbatim_mode:
self.token = Token('TEXT', u_char)
else:
self.token = Token('R_CURLY', u_char)
return
# Otherwise just return as text:
self.next_word()
def non_verbatim_next(self):
# Check for escape character:
if self.u_text[self.pos] == u"\\" and self.pos + 1 < len(self.u_text):
self.token = Token('TEXT', self.u_text[self.pos + 1])
self.pos += 2
return
# Check for characters only significant at the start of the line:
if self.eol:
for (tag, regexp) in START_REGULAR_EXPRESSIONS:
if self.check_regexp(tag, regexp):
return
self.eol = False
# Check for all special characters:
for (tag, regexp) in WITHIN_REGULAR_EXPRESSIONS:
if self.check_regexp(tag, regexp):
return
# Just accumulate one word of text:
self.next_word()
def next_word(self):
p = self.pos + 1
while p < len(self.u_text) and self.is_word(self.u_text[p]):
p += 1
self.token = Token('TEXT', self.u_text[self.pos:p])
self.pos = p
return
@property
def cur_line_number(self):
return self.u_text[:self.pos].count('\n') + 1
@property
def cur_column(self):
if self.cur_line_number == 1:
return self.pos
return (self.pos - self.u_text[:self.pos].rindex('\n')) + 1
def require(self, tags):
if not self.token.tag in tags:
raise ParseError(self, tags)
def is_a(self, tag):
return self.token.tag == tag
# ___ Parser (Recursive Descent) _______________________________________
def parse(inputstream):
""" Returns an Ast or else throws a ParseError. """
bytes = inputstream.read()
u_text = bytes.decode("utf-8") # XXX Search for an encoding string a la emacs
lexer = Lexer(u_text)
a_html = Html(lexer.pos)
a_body = Body(lexer.pos)
a_html.append_child(a_body)
elems(lexer, a_body, [])
if not lexer.is_a('EOF'):
raise ParseError(lexer, 'EOF')
return a_html
class ParseError(Exception):
def __init__(self, lexer, expected):
Exception.__init__(self)
self.token = lexer.token
self.line_number = lexer.cur_line_number
self.column = lexer.cur_column
self.pos = lexer.pos
self.expected = expected
def __str__(self):
base = "Unexpected token %s at %s:%d" % (
self.token.tag, self.line_number, self.column)
if not self.expected:
return base
return base + ", expected one of " + str(self.expected)
LIST_TOKENS = {
'BULLET': OrderedList,
}
BEAUTIFIER_TOKENS = {
'ITAL': (Italicized, 'ITAL'),
'BOLD': (Bolded, 'BOLD'),
'UNDER': (Underlined, 'UNDER'),
'STRIKE': (Struckout, 'STRIKE'),
}
def add_elem(lexer, a_parent):
if lexer.is_a('SPACE'):
a_parent.append_text(lexer.pos, u' ')
lexer.next()
elif lexer.is_a('BLANK_LINE'):
a_parent.append_child(Para(lexer.pos))
lexer.next()
elif lexer.is_a('HEADER'):
a_parent.append_child(header(lexer))
elif lexer.is_a('EMDASH'):
a_parent.append_text(lexer.pos, unichr(8212))
lexer.next()
elif lexer.token.tag in LIST_TOKENS:
a_parent.append_child(any_list(lexer))
elif lexer.is_a('TABLE_ROW'):
a_parent.append_child(table(lexer))
elif lexer.is_a('L_SQUARE_SQUARE'):
a_parent.append_child(link(lexer))
elif lexer.is_a('L_SQUARE'):
a_parent.append_child(image(lexer))
elif lexer.is_a('INDENT'):
a_parent.append_child(indented(lexer))
elif lexer.is_a('TEXT') or lexer.is_a('SPACE'):
a_parent.append_text(lexer.pos, lexer.token.u_text)
lexer.next()
elif lexer.is_a('L_CURLY'):
a_parent.append_child(code(lexer))
elif lexer.token.tag in BEAUTIFIER_TOKENS:
a_parent.append_child(beautifier(lexer))
elif lexer.is_a('INDENT'):
a_parent.append_child(indented(lexer))
else:
return False
return True
def elems(lexer, a_parent, stop_on_tags):
while True:
if lexer.token.tag in stop_on_tags:
return a_parent
if not add_elem(lexer, a_parent):
return a_parent
def header(lexer):
hlen = len(lexer.token.u_text) / 3
a_hdr = Header(lexer.pos, hlen)
u_text = lexer.skip_to_eol()
u_text = u_text.strip()
while u_text and u_text[-1] == '_': u_text = u_text[:-1]
u_text = u_text.strip()
a_hdr.append_text(lexer.pos, u_text)
lexer.next()
return a_hdr
def code(lexer):
lexer.start_verbatim_mode()
lexer.next() # Consume the '{'
# if followed by an indent, begins an indented code block:
# XXX Move this "unindenting white space" logic into lexer
# so that this code is less special.
if lexer.is_a('SPACE'):
u_ignored_space = lexer.token.u_text
lexer.next()
if lexer.is_a('INDENT'):
a_code = Code(lexer.pos)
skip = u"\n" + (u" " * lexer.indent)
indent_counter = 1
lexer.next()
while lexer.token.tag not in ['R_CURLY', 'EOF']:
#if DEBUG: sys.stderr.write("Indent Counter: %d\n" % indent_counter)
if lexer.is_a('INDENT'):
indent_counter += 1
lexer.next()
elif lexer.is_a('UNDENT'):
indent_counter -= 1
if indent_counter < 0: # Cannot unindent without exiting code mode:
raise ParseError(lexer, ['R_CURLY'])
lexer.next()
elif lexer.token.tag in ['SPACE', 'BLANK_LINE']:
u_text = lexer.token.u_text.replace(skip, u"\n")
#if DEBUG: sys.stderr.write("Space: '%s'\n" % u_text)
a_code.append_text(lexer.pos, u_text)
lexer.next()
else:
if not add_elem(lexer, a_code):
raise ParseError(lexer, [])
lexer.require(['R_CURLY'])
if indent_counter: # Indentation must have returned to where we started:
raise ParseError(lexer, ['UNDENT'])
lexer.next()
return a_code
else:
u_ignored_space = u""
# otherwise, inline code block:
a_code = Monospaced(lexer.pos)
if u_ignored_space:
a_code.append_text(lexer.pos, u_ignored_space)
elems(lexer, a_code, ['R_CURLY', 'BLANK_LINE'])
lexer.require(['R_CURLY'])
lexer.next()
return a_code
def beautifier(lexer):
(ast_cls, end_tag) = BEAUTIFIER_TOKENS[lexer.token.tag]
a_node = ast_cls(lexer.pos)
lexer.next()
elems(lexer, a_node, [end_tag, 'BLANK_LINE'])
lexer.require([end_tag])
lexer.next()
return a_node
def list_item(lexer):
a_item = ListItem(lexer.pos)
# Adjust intention level. This way, if user writes:
# - A
# B
# - C
# D
# then B is considered to be at the current indentation
# level but lines C and D are not.
new_indent = len(lexer.token.u_text) + 1
lexer.push_indentation_level(new_indent)
lexer.next()
elems(lexer, a_item, ['UNDENT'])
lexer.require(['UNDENT'])
lexer.next()
return a_item
def any_list(lexer):
#sys.stderr.write("any_list(%s)\n" % lexer.token)
list_tag = lexer.token.tag
ast_cls = LIST_TOKENS[list_tag]
a_list = ast_cls(lexer.pos)
while lexer.token.tag == list_tag:
a_item = list_item(lexer)
a_list.append_child(a_item)
return a_list
def elems_until_undent(lexer, a_parent):
elems(lexer, a_parent, ())
lexer.require(['UNDENT'])
lexer.next()
return a_parent
def indented(lexer):
lexer.next() # consume INDENT
if lexer.token.tag in LIST_TOKENS:
# An indented list is just a list.
a_result = any_list(lexer)
lexer.require(['UNDENT'])
lexer.next()
return a_result
else:
a_result = Indented(lexer.pos)
return elems_until_undent(lexer, a_result)
def table_row(lexer):
a_row = TableRow(lexer.pos)
while True:
# Consume a CELL:
a_cell = TableCell(lexer.pos)
a_row.append_child(a_cell)
lexer.next()
skip_space(lexer)
# A cell followed by a newline and an indent
# reads its content until the undent. Otherwise,
# we read until a blank line, row, or cell:
if lexer.is_a('INDENT'):
lexer.next()
elems_until_undent(lexer, a_cell)
else:
elems(lexer, a_cell, ['TABLE_ROW', 'TABLE_CELL'])
# If we found a cell separator, consume another CELL:
if lexer.token.tag == 'TABLE_CELL':
continue
# Otherwise, we expect another ROW indicator to end the row:
lexer.require('TABLE_ROW')
lexer.next()
return a_row
def table(lexer):
a_table = Table(lexer.pos)
while lexer.token.tag == 'TABLE_ROW':
a_table.append_child(table_row(lexer))
skip_space(lexer)
return a_table
def skip_space(lexer):
while lexer.token.tag == 'SPACE':
lexer.next()
scheme = re.compile(ur"[a-zA-Z0-9+.-]+:")
def path_to_url(u_path):
if scheme.match(u_path):
i = u_path.index(u":")+1
else:
i = 0
return u_path[:i].encode('ASCII') + urllib.quote(u_path[i:].encode('UTF-8'))
def url(lexer, a_node, term):
# Only accept text and whitespace until ']':
u_path = u""
skip_space(lexer)
# XXX Rewrite to use verbatim_mode of lexer?
VERB_TAGS = ['TEXT', 'SPACE'] + BEAUTIFIER_TOKENS.keys()
while True:
if lexer.token.tag in VERB_TAGS:
u_path += lexer.token.u_text
lexer.next()
elif lexer.token.tag == term:
a_node.url = path_to_url(u_path)
return
else:
raise ParseError(lexer, [term])
ext = re.compile(ur"\.[a-zA-Z0-9-]+$")
def default_text(url):
u_text = urllib.unquote(url).decode("UTF-8")
if not re.match(r"[a-zA-Z0-9+.-]+:", url):
# Relative: strip extension if any
mo = ext.search(u_text)
if mo:
u_text = u_text[:-len(mo.group(0))]
return u_text
def link(lexer):
# Saw [[
a_link = Link(lexer.pos)
lexer.next()
elems(lexer, a_link, ['AT'])
lexer.require('AT')
a_link.rstrip_text()
lexer.next()
url(lexer, a_link, 'R_SQUARE_SQUARE')
lexer.require(['R_SQUARE_SQUARE'])
lexer.next()
if not a_link.children_a:
# Insert default text from URL.
a_link.append_text(lexer.pos, default_text(a_link.url))
return a_link
def image(lexer):
# Saw '[':
a_img = Image(lexer.pos)
lexer.next()
url(lexer, a_img, 'R_SQUARE')
lexer.require(['R_SQUARE'])
lexer.next()
return a_img
# ___ Main _____________________________________________________________
def main(argv):
dump = (argv and argv[0] == "-d")
try:
ast = parse(sys.stdin)
if dump:
ast.dump("", sys.stdout)
else:
ast.to_html(sys.stdout)
except ParseError, e:
print "<html><body>Error: %s</body></html>" % e
if __name__ == "__main__":
main(sys.argv[1:])
|
{
"content_hash": "df46b50d0aa9c3e224c111286bd21114",
"timestamp": "",
"source": "github",
"line_count": 826,
"max_line_length": 90,
"avg_line_length": 29.9636803874092,
"alnum_prop": 0.5258181818181819,
"repo_name": "nikomatsakis/SpanakopitaPlugin",
"id": "f4052bd7999152984ffb8c4b4838d2ac79d21930",
"size": "24824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Spanakopita/filter/filter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "26817"
},
{
"name": "Python",
"bytes": "24824"
}
],
"symlink_target": ""
}
|
from random import randint
from .ValueFuzz import StringFuzzer, IntegerFuzzer, FuzzableInteger
################################################################################
# Classes
################################################################################
class XmlFuzzableAttribute(object):
"""
XmlFuzzableAttribute is a description of an element attribute which
is fuzzable for an element like this:
'<element attribute="value" ... >'
Not expected to be exported, only used to preserve state between
:func:`find_xmlattrs` and :func:`fuzz_xmlattrs`.
name: the attribute
value: the value, either str or FuzzableInteger
fileid: the xml file identifier (used to map from attributes -> files)
start: the file position of the start of 'value'
end: the file position of the end of 'value'
"""
def __init__(self, name, value, element, fileid, start, end):
self.name = name
self.value = value
self.element = element
self.fileid = fileid
self.start = start
self.end = end
def fuzzed_value(self, strfuzz, intfuzz):
"""fuzz and return the attribute as a string"""
if isinstance(self.value, str):
return strfuzz.fuzz_value(self.value)
else:
return intfuzz.fuzz_value(self.value)
################################################################################
# Functions
################################################################################
def find_xmlattrs(files):
"""
This function scans a set of XML documents and identifies all of
the element attributes for later fuzzing. The XML documents are
given in a list of dicts, each having a ``data`` field and any
number of other user defined fields to identify the file.
The result is a list of opaque objects identifying the fuzzable
attributes, which should be used as input to :func:`fuzz_xmlattrs`.
"""
attributes = []
for fileid, f in enumerate(files):
file_data = f["data"]
if file_data.startswith("<?"):
off = file_data.find("?>") + 2
else:
off = 0
try:
while True:
# find the next element starting with '<' (st) and ending with '>' (en)
st = file_data.index("<", off)
en = file_data.index(">", st)
off = en + 1
# find a space within the element
sp = file_data.find(" ", st, en)
if sp == -1:
continue
# extract element name
# (don't really need this, don't bother for now)
element = file_data[st+1:sp]
sp += 1
# find all the attributes within this element
while True:
# find the start and end of the value
try:
eq = file_data.index("=\"", sp, en)
name = file_data[sp:eq] # extract the attribute name
stq = eq + 2 # first char in the value (start quote)
enq = file_data.index("\"", stq, en) # end quote pos
except ValueError:
break
val = file_data[stq:enq] # extract the attribute value
sp = enq + 2 # move sp past the end of the end quote
# skip xml namespace attributes
if name.startswith("xmlns"):
continue
# cascade through the try blocks to find what kind of value this is
try:
val = FuzzableInteger(val)
except ValueError:
# leave val as a string if we can't create a FuzzableInteger from it
pass
# got everything, add to the result list
attributes.append(XmlFuzzableAttribute(name, val, element, fileid, stq, enq))
except ValueError:
pass
return attributes
def fuzz_xmlattrs(files, attributes, aggression, strfuzzer=None, intfuzzer=None):
"""
This method fuzzes the set of attributes found by
:func:`~find_xmlattrs`. The *files* parameter should be the same
one given in :func:`~find_xmlattrs`, and the *attributes* parameter
should be the list returned by the same.
*aggression* is the inverse of the number of attributes to mutate
as a ratio of the total number of attributes found. (ie.
num_attrs/aggression == number of attrs to mutate). Note that in
this mode, aggression is statistical, so even an *aggression* of 1
will not hit every attribute in the input.
*strfuzzer* and *intfuzzer* are optional instances of
:class:`~StringFuzzer` and :class:`~IntegerFuzzer` to be used on
their respective types. If not specified, one of each will be
instantiated using the default parameters for each.
The result is a mutated copy of *files*.
"""
if not aggression:
return files
if strfuzzer is None:
strfuzzer = StringFuzzer()
if intfuzzer is None:
intfuzzer = IntegerFuzzer()
result = []
nattributes = len(attributes)
if aggression > 0:
fuzzable = nattributes / aggression
else:
fuzzable = -aggression
fuzzes = sorted(set(randint(0, nattributes-1) for _ in range(fuzzable)))
fuzzes.reverse()
try:
tofuzz = attributes[fuzzes.pop()]
except (IndexError, AttributeError):
tofuzz = None
for i, cf in enumerate(files):
if tofuzz is None or tofuzz.fileid != i:
result.append(cf)
else:
off = 0
fileparts = []
try:
while tofuzz.fileid == i:
fileparts.append(cf["data"][off:tofuzz.start])
off = tofuzz.end
fileparts.append(tofuzz.fuzzed_value(strfuzzer, intfuzzer))
tofuzz = attributes[fuzzes.pop()]
except IndexError:
tofuzz = None
fileparts.append(cf["data"][off:])
newf = dict(cf)
newf["data"] = "".join(str(f) for f in fileparts)
result.append(newf)
return result
|
{
"content_hash": "b64caa88475cc2ae9d2b852b13d72a97",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 97,
"avg_line_length": 37.964071856287426,
"alnum_prop": 0.5403785488958991,
"repo_name": "blackberry/ALF",
"id": "f97d4664d4b23a64c943a24bb0ead7ef583a3378",
"size": "7162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alf/fuzz/XmlAttributeFuzz.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "97347"
},
{
"name": "Python",
"bytes": "280755"
},
{
"name": "Shell",
"bytes": "4796"
}
],
"symlink_target": ""
}
|
from frappe.model.document import Document
class DocTypeLayoutField(Document):
pass
|
{
"content_hash": "b64ef1034f4ff02d4f0c4cbee528d968",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 21.5,
"alnum_prop": 0.8372093023255814,
"repo_name": "mhbu50/frappe",
"id": "3f8487b6593bc32922ea2a749ae413a6e27ea4d5",
"size": "214",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/custom/doctype/doctype_layout_field/doctype_layout_field.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "247122"
},
{
"name": "JavaScript",
"bytes": "2359670"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3464477"
},
{
"name": "SCSS",
"bytes": "248877"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
}
|
"""Config flow for the MELCloud platform."""
import asyncio
import logging
from typing import Optional
from aiohttp import ClientError, ClientResponseError
from async_timeout import timeout
import pymelcloud
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME, HTTP_FORBIDDEN
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
class FlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def _create_entry(self, username: str, token: str):
"""Register new entry."""
await self.async_set_unique_id(username)
self._abort_if_unique_id_configured({CONF_TOKEN: token})
return self.async_create_entry(
title=username, data={CONF_USERNAME: username, CONF_TOKEN: token}
)
async def _create_client(
self,
username: str,
*,
password: Optional[str] = None,
token: Optional[str] = None,
):
"""Create client."""
if password is None and token is None:
raise ValueError(
"Invalid internal state. Called without either password or token"
)
try:
with timeout(10):
acquired_token = token
if acquired_token is None:
acquired_token = await pymelcloud.login(
username,
password,
self.hass.helpers.aiohttp_client.async_get_clientsession(),
)
await pymelcloud.get_devices(
acquired_token,
self.hass.helpers.aiohttp_client.async_get_clientsession(),
)
except ClientResponseError as err:
if err.status == 401 or err.status == HTTP_FORBIDDEN:
return self.async_abort(reason="invalid_auth")
return self.async_abort(reason="cannot_connect")
except (asyncio.TimeoutError, ClientError):
return self.async_abort(reason="cannot_connect")
return await self._create_entry(username, acquired_token)
async def async_step_user(self, user_input=None):
"""User initiated config flow."""
if user_input is None:
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
),
)
username = user_input[CONF_USERNAME]
return await self._create_client(username, password=user_input[CONF_PASSWORD])
async def async_step_import(self, user_input):
"""Import a config entry."""
return await self._create_client(
user_input[CONF_USERNAME], token=user_input[CONF_TOKEN]
)
|
{
"content_hash": "de39fc2c1d95bcb8185dd819e6522966",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 88,
"avg_line_length": 35.69047619047619,
"alnum_prop": 0.6020680453635757,
"repo_name": "titilambert/home-assistant",
"id": "ed6fc31c41482dbcf6cf7bce78cc4ef2912bfc6f",
"size": "2998",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/melcloud/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
}
|
"""Check the generated site."""
from collections import defaultdict
import os
import re
import sys
import time
import logbook
try:
from urllib import unquote
from urlparse import urlparse, urljoin, urldefrag
except ImportError:
from urllib.parse import unquote, urlparse, urljoin, urldefrag # NOQA
from doit.loader import generate_tasks
import lxml.html
import requests
from nikola.plugin_categories import Command
from nikola.utils import get_logger
def _call_nikola_list(site, cache=None):
if cache is not None:
if 'files' in cache and 'deps' in cache:
return cache['files'], cache['deps']
files = []
deps = defaultdict(list)
for task in generate_tasks('render_site', site.gen_tasks('render_site', "Task", '')):
files.extend(task.targets)
for target in task.targets:
deps[target].extend(task.file_dep)
for task in generate_tasks('post_render', site.gen_tasks('render_site', "LateTask", '')):
files.extend(task.targets)
for target in task.targets:
deps[target].extend(task.file_dep)
if cache is not None:
cache['files'] = files
cache['deps'] = deps
return files, deps
def real_scan_files(site, cache=None):
"""Scan for files."""
task_fnames = set([])
real_fnames = set([])
output_folder = site.config['OUTPUT_FOLDER']
# First check that all targets are generated in the right places
for fname in _call_nikola_list(site, cache)[0]:
fname = fname.strip()
if fname.startswith(output_folder):
task_fnames.add(fname)
# And now check that there are no non-target files
for root, dirs, files in os.walk(output_folder, followlinks=True):
for src_name in files:
fname = os.path.join(root, src_name)
real_fnames.add(fname)
only_on_output = list(real_fnames - task_fnames)
only_on_input = list(task_fnames - real_fnames)
return (only_on_output, only_on_input)
def fs_relpath_from_url_path(url_path):
"""Create a filesystem relative path from an URL path."""
# Expects as input an urlparse(s).path
url_path = unquote(url_path)
# in windows relative paths don't begin with os.sep
if sys.platform == 'win32' and len(url_path):
url_path = url_path.replace('/', '\\')
return url_path
class CommandCheck(Command):
"""Check the generated site."""
name = "check"
logger = None
doc_usage = "[-v] (-l [--find-sources] [-r] | -f [--clean-files])"
doc_purpose = "check links and files in the generated site"
cmd_options = [
{
'name': 'links',
'short': 'l',
'long': 'check-links',
'type': bool,
'default': False,
'help': 'Check for dangling links',
},
{
'name': 'files',
'short': 'f',
'long': 'check-files',
'type': bool,
'default': False,
'help': 'Check for unknown (orphaned and not generated) files',
},
{
'name': 'clean',
'long': 'clean-files',
'type': bool,
'default': False,
'help': 'Remove all unknown files, use with caution',
},
{
'name': 'find_sources',
'long': 'find-sources',
'type': bool,
'default': False,
'help': 'List possible source files for files with broken links.',
},
{
'name': 'verbose',
'long': 'verbose',
'short': 'v',
'type': bool,
'default': False,
'help': 'Be more verbose.',
},
{
'name': 'remote',
'long': 'remote',
'short': 'r',
'type': bool,
'default': False,
'help': 'Check that remote links work.',
},
]
def _execute(self, options, args):
"""Check the generated site."""
self.logger = get_logger('check')
if not options['links'] and not options['files'] and not options['clean']:
print(self.help())
return False
if options['verbose']:
self.logger.level = logbook.DEBUG
else:
self.logger.level = logbook.NOTICE
failure = False
if options['links']:
failure |= self.scan_links(options['find_sources'], options['remote'])
if options['files']:
failure |= self.scan_files()
if options['clean']:
failure |= self.clean_files()
if failure:
return 1
existing_targets = set([])
checked_remote_targets = {}
cache = {}
def analyze(self, fname, find_sources=False, check_remote=False):
"""Analyze links on a page."""
rv = False
self.whitelist = [re.compile(x) for x in self.site.config['LINK_CHECK_WHITELIST']]
self.internal_redirects = [urljoin('/', _[0]) for _ in self.site.config['REDIRECTIONS']]
base_url = urlparse(self.site.config['BASE_URL'])
self.existing_targets.add(self.site.config['SITE_URL'])
self.existing_targets.add(self.site.config['BASE_URL'])
url_type = self.site.config['URL_TYPE']
deps = {}
if find_sources:
deps = _call_nikola_list(self.site, self.cache)[1]
if url_type in ('absolute', 'full_path'):
url_netloc_to_root = urlparse(self.site.config['BASE_URL']).path
try:
filename = fname
if filename.startswith(self.site.config['CACHE_FOLDER']):
# Do not look at links in the cache, which are not parsed by
# anyone and may result in false positives. Problems arise
# with galleries, for example. Full rationale: (Issue #1447)
self.logger.notice("Ignoring {0} (in cache, links may be incorrect)".format(filename))
return False
if not os.path.exists(fname):
# Quietly ignore files that don’t exist; use `nikola check -f` instead (Issue #1831)
return False
if '.html' == fname[-5:]:
with open(filename, 'rb') as inf:
d = lxml.html.fromstring(inf.read())
extra_objs = lxml.html.fromstring('<html/>')
# Turn elements with a srcset attribute into individual img elements with src attributes
for obj in list(d.xpath('(*//img|*//source)')):
if 'srcset' in obj.attrib:
for srcset_item in obj.attrib['srcset'].split(','):
extra_objs.append(lxml.etree.Element('img', src=srcset_item.strip().split(' ')[0]))
link_elements = list(d.iterlinks()) + list(extra_objs.iterlinks())
# Extract links from XML formats to minimal HTML, allowing those to go through the link checks
elif '.atom' == filename[-5:]:
d = lxml.etree.parse(filename)
link_elements = lxml.html.fromstring('<html/>')
for elm in d.findall('*//{http://www.w3.org/2005/Atom}link'):
feed_link = elm.attrib['href'].split('?')[0].strip() # strip FEED_LINKS_APPEND_QUERY
link_elements.append(lxml.etree.Element('a', href=feed_link))
link_elements = list(link_elements.iterlinks())
elif filename.endswith('sitemap.xml') or filename.endswith('sitemapindex.xml'):
d = lxml.etree.parse(filename)
link_elements = lxml.html.fromstring('<html/>')
for elm in d.getroot().findall("*//{http://www.sitemaps.org/schemas/sitemap/0.9}loc"):
link_elements.append(lxml.etree.Element('a', href=elm.text.strip()))
link_elements = list(link_elements.iterlinks())
else: # unsupported file type
return False
for l in link_elements:
target = l[2]
if target == "#":
continue
target = urldefrag(target)[0]
if any([urlparse(target).netloc.endswith(_) for _ in ['example.com', 'example.net', 'example.org']]):
self.logger.debug("Not testing example address \"{0}\".".format(target))
continue
# absolute URL to root-relative
if target.startswith(base_url.geturl()):
target = target.replace(base_url.geturl(), '/')
parsed = urlparse(target)
# Warn about links from https to http (mixed-security)
if base_url.netloc == parsed.netloc and base_url.scheme == "https" and parsed.scheme == "http":
self.logger.warn("Mixed-content security for link in {0}: {1}".format(filename, target))
# Link to an internal REDIRECTIONS page
if target in self.internal_redirects:
redir_status_code = 301
redir_target = [_dest for _target, _dest in self.site.config['REDIRECTIONS'] if urljoin('/', _target) == target][0]
self.logger.warn("Remote link moved PERMANENTLY to \"{0}\" and should be updated in {1}: {2} [HTTP: 301]".format(redir_target, filename, target))
# Absolute links to other domains, skip
# Absolute links when using only paths, skip.
if ((parsed.scheme or target.startswith('//')) and parsed.netloc != base_url.netloc) or \
((parsed.scheme or target.startswith('//')) and url_type in ('rel_path', 'full_path')):
if not check_remote or parsed.scheme not in ["http", "https"]:
continue
if target in self.checked_remote_targets: # already checked this exact target
if self.checked_remote_targets[target] in [301, 308]:
self.logger.warn("Remote link PERMANENTLY redirected in {0}: {1} [Error {2}]".format(filename, target, self.checked_remote_targets[target]))
elif self.checked_remote_targets[target] in [302, 307]:
self.logger.debug("Remote link temporarily redirected in {0}: {1} [HTTP: {2}]".format(filename, target, self.checked_remote_targets[target]))
elif self.checked_remote_targets[target] > 399:
self.logger.error("Broken link in {0}: {1} [Error {2}]".format(filename, target, self.checked_remote_targets[target]))
continue
# Skip whitelisted targets
if any(re.search(_, target) for _ in self.whitelist):
continue
# Check the remote link works
req_headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0 (Nikola)'} # I’m a real boy!
resp = requests.head(target, headers=req_headers, allow_redirects=False)
# Retry client errors (4xx) as GET requests because many servers are broken
if resp.status_code >= 400 and resp.status_code <= 499:
time.sleep(0.5)
resp = requests.get(target, headers=req_headers, allow_redirects=False)
# Follow redirects and see where they lead, redirects to errors will be reported twice
if resp.status_code in [301, 302, 307, 308]:
redir_status_code = resp.status_code
time.sleep(0.5)
# Known redirects are retested using GET because IIS servers otherwise get HEADaches
resp = requests.get(target, headers=req_headers, allow_redirects=True)
# Permanent redirects should be updated
if redir_status_code in [301, 308]:
self.logger.warn("Remote link moved PERMANENTLY to \"{0}\" and should be updated in {1}: {2} [HTTP: {3}]".format(resp.url, filename, target, redir_status_code))
if redir_status_code in [302, 307]:
self.logger.debug("Remote link temporarily redirected to \"{0}\" in {1}: {2} [HTTP: {3}]".format(resp.url, filename, target, redir_status_code))
self.checked_remote_targets[resp.url] = resp.status_code
self.checked_remote_targets[target] = redir_status_code
else:
self.checked_remote_targets[target] = resp.status_code
if resp.status_code > 399: # Error
self.logger.error("Broken link in {0}: {1} [Error {2}]".format(filename, target, resp.status_code))
continue
elif resp.status_code <= 399: # The address leads *somewhere* that is not an error
self.logger.debug("Successfully checked remote link in {0}: {1} [HTTP: {2}]".format(filename, target, resp.status_code))
continue
self.logger.warn("Could not check remote link in {0}: {1} [Unknown problem]".format(filename, target))
continue
if url_type == 'rel_path':
if target.startswith('/'):
target_filename = os.path.abspath(
os.path.join(self.site.config['OUTPUT_FOLDER'], unquote(target.lstrip('/'))))
else: # Relative path
unquoted_target = unquote(target).encode('utf-8')
target_filename = os.path.abspath(
os.path.join(os.path.dirname(filename).encode('utf-8'), unquoted_target))
elif url_type in ('full_path', 'absolute'):
if url_type == 'absolute':
# convert to 'full_path' case, ie url relative to root
url_rel_path = parsed.path[len(url_netloc_to_root):]
else:
# convert to relative to base path
url_rel_path = target[len(url_netloc_to_root):]
if url_rel_path == '' or url_rel_path.endswith('/'):
url_rel_path = urljoin(url_rel_path, self.site.config['INDEX_FILE'])
fs_rel_path = fs_relpath_from_url_path(url_rel_path)
target_filename = os.path.join(self.site.config['OUTPUT_FOLDER'], fs_rel_path)
if any(re.search(x, target_filename) for x in self.whitelist):
continue
elif target_filename not in self.existing_targets:
if os.path.exists(target_filename):
self.logger.info("Good link {0} => {1}".format(target, target_filename))
self.existing_targets.add(target_filename)
else:
rv = True
self.logger.warn("Broken link in {0}: {1}".format(filename, target))
if find_sources:
self.logger.warn("Possible sources:")
self.logger.warn("\n".join(deps[filename]))
self.logger.warn("===============================\n")
except Exception as exc:
self.logger.error(u"Error with: {0} {1}".format(filename, exc))
return rv
def scan_links(self, find_sources=False, check_remote=False):
"""Check links on the site."""
self.logger.debug("Checking Links:")
self.logger.debug("===============\n")
self.logger.debug("{0} mode".format(self.site.config['URL_TYPE']))
failure = False
# Maybe we should just examine all HTML files
output_folder = self.site.config['OUTPUT_FOLDER']
if urlparse(self.site.config['BASE_URL']).netloc == 'example.com':
self.logger.error("You've not changed the SITE_URL (or BASE_URL) setting from \"example.com\"!")
for fname in _call_nikola_list(self.site, self.cache)[0]:
if fname.startswith(output_folder):
if '.html' == fname[-5:]:
if self.analyze(fname, find_sources, check_remote):
failure = True
if '.atom' == fname[-5:]:
if self.analyze(fname, find_sources, False):
failure = True
if fname.endswith('sitemap.xml') or fname.endswith('sitemapindex.xml'):
if self.analyze(fname, find_sources, False):
failure = True
if not failure:
self.logger.debug("All links checked.")
return failure
def scan_files(self):
"""Check files in the site, find missing and orphaned files."""
failure = False
self.logger.debug("Checking Files:")
self.logger.debug("===============\n")
only_on_output, only_on_input = real_scan_files(self.site, self.cache)
# Ignore folders
only_on_output = [p for p in only_on_output if not os.path.isdir(p)]
only_on_input = [p for p in only_on_input if not os.path.isdir(p)]
if only_on_output:
only_on_output.sort()
self.logger.warn("Files from unknown origins (orphans):")
for f in only_on_output:
self.logger.warn(f)
failure = True
if only_on_input:
only_on_input.sort()
self.logger.warn("Files not generated:")
for f in only_on_input:
self.logger.warn(f)
if not failure:
self.logger.debug("All files checked.")
return failure
def clean_files(self):
"""Remove orphaned files."""
only_on_output, _ = real_scan_files(self.site, self.cache)
for f in only_on_output:
self.logger.debug('removed: {0}'.format(f))
os.unlink(f)
warn_flag = bool(only_on_output)
# Find empty directories and remove them
output_folder = self.site.config['OUTPUT_FOLDER']
all_dirs = []
for root, dirs, files in os.walk(output_folder, followlinks=True):
all_dirs.append(root)
all_dirs.sort(key=len, reverse=True)
for d in all_dirs:
try:
os.rmdir(d)
self.logger.debug('removed: {0}/'.format(d))
warn_flag = True
except OSError:
pass
if warn_flag:
self.logger.warn('Some files or directories have been removed, your site may need rebuilding')
return True
|
{
"content_hash": "0d032fde52d6f7bad964a23717682667",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 188,
"avg_line_length": 45.55690072639225,
"alnum_prop": 0.539622641509434,
"repo_name": "xuhdev/nikola",
"id": "dfb5820d563f7c6a9d488afca3cb39b76b4ca56f",
"size": "19961",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nikola/plugins/command/check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21532"
},
{
"name": "HTML",
"bytes": "239"
},
{
"name": "JavaScript",
"bytes": "37423"
},
{
"name": "Jupyter Notebook",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1228539"
},
{
"name": "Shell",
"bytes": "9993"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import os
from flake8.api.legacy import StyleGuide
from flake8.main.application import Application
def test_flake8_conformance():
"""Test source code for flake8 conformance."""
argv = [
'--ignore=%s' % ','.join([
'D100', 'D101', 'D102', 'D103', 'D104', 'D105',
'E501']),
'--import-order-style=google',
]
style_guide = get_style_guide(argv)
base_path = os.path.join(os.path.dirname(__file__), '..')
paths = [
os.path.join(base_path, 'ros_buildfarm'),
os.path.join(base_path, 'scripts'),
os.path.join(base_path, 'test'),
]
report = style_guide.check_files(paths)
assert report.total_errors == 0, \
'Found %d code style warnings' % report.total_errors
def get_style_guide(argv=None):
# this is a fork of flake8.api.legacy.get_style_guide
# to allow passing command line argument
application = Application()
application.find_plugins()
application.register_plugin_options()
application.parse_configuration_and_cli(argv)
application.make_formatter()
application.make_notifier()
application.make_guide()
application.make_file_checker_manager()
return StyleGuide(application)
if __name__ == '__main__':
test_flake8_conformance()
|
{
"content_hash": "63fd3fca38e5903c837631dbc71d21b1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 61,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.6370716510903427,
"repo_name": "ruffsl/ros_buildfarm",
"id": "734de29fc24b9080b32989dbaa851a6bc44f1e81",
"size": "1886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_flake8.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5231"
},
{
"name": "EmberScript",
"bytes": "228125"
},
{
"name": "Groovy",
"bytes": "1561"
},
{
"name": "JavaScript",
"bytes": "12982"
},
{
"name": "Python",
"bytes": "478017"
},
{
"name": "Shell",
"bytes": "9651"
}
],
"symlink_target": ""
}
|
from .. import Provider as SsnProvider
def checksum(digits):
"""
Calculate and return control digit for given list of digits based on
ISO7064, MOD 11,10 standard.
"""
remainder = 10
for digit in digits:
remainder = (remainder + digit) % 10
if remainder == 0:
remainder = 10
remainder = (remainder * 2) % 11
control_digit = 11 - remainder
if control_digit == 10:
control_digit = 0
return control_digit
class Provider(SsnProvider):
"""
The Personal identification number (Croatian: Osobni identifikacijski
broj or OIB) is a permanent national identification number of every
Croatian citizen and legal persons domiciled in the Republic of Croatia.
OIB consists of 11 digits which contain no personal information. The OIB
is constructed from ten randomly chosen digits and one digit control number
(international standard ISO 7064, module 11.10).
"""
def ssn(self):
digits = self.generator.random.sample(range(10), 10)
digits.append(checksum(digits))
return ''.join(map(str, digits))
vat_id_formats = (
'HR###########',
)
def vat_id(self):
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Croatian VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
|
{
"content_hash": "04bbf370b569c9868dc915459d73154b",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 28.775510204081634,
"alnum_prop": 0.6404255319148936,
"repo_name": "danhuss/faker",
"id": "0ccc057693457456369b377a9dc7ceaa4f311922",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faker/providers/ssn/hr_HR/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1411894"
}
],
"symlink_target": ""
}
|
"""
Mulled Tests
"""
import subprocess as sp
import tempfile
import tarfile
import os
import shlex
from shutil import which
import logging
from . import utils
import conda_build.api
from conda_build.metadata import MetaData
logger = logging.getLogger(__name__)
# TODO: Make this configurable in bioconda_utils.build and bioconda_utils.cli.
MULLED_CONDA_IMAGE = "quay.io/bioconda/create-env:latest"
def get_tests(path):
"Extract tests from a built package"
tmp = tempfile.mkdtemp()
t = tarfile.open(path)
t.extractall(tmp)
input_dir = os.path.join(tmp, 'info', 'recipe')
tests = [
'/usr/local/env-execute true',
'. /usr/local/env-activate.sh',
]
recipe_meta = MetaData(input_dir)
tests_commands = recipe_meta.get_value('test/commands')
tests_imports = recipe_meta.get_value('test/imports')
requirements = recipe_meta.get_value('requirements/run')
if tests_imports or tests_commands:
if tests_commands:
tests.append(' && '.join(tests_commands))
if tests_imports and 'python' in requirements:
tests.append(
' && '.join('python -c "import %s"' % imp
for imp in tests_imports)
)
elif tests_imports and (
'perl' in requirements or 'perl-threaded' in requirements
):
tests.append(
' && '.join('''perl -e "use %s;"''' % imp
for imp in tests_imports)
)
tests = ' && '.join(tests)
tests = tests.replace('$R ', 'Rscript ')
# this is specific to involucro, the way how we build our containers
tests = tests.replace('$PREFIX', '/usr/local')
tests = tests.replace('${PREFIX}', '/usr/local')
return f"bash -c {shlex.quote(tests)}"
def get_image_name(path):
"""
Returns name of generated docker image.
Parameters
----------
path : str
Path to .tar.by2 package build by conda-build
"""
assert path.endswith('.tar.bz2')
pkg = os.path.basename(path).replace('.tar.bz2', '')
toks = pkg.split('-')
build_string = toks[-1]
version = toks[-2]
name = '-'.join(toks[:-2])
spec = '%s=%s--%s' % (name, version, build_string)
return spec
def test_package(
path,
name_override=None,
channels=("conda-forge", "local", "bioconda", "defaults"),
mulled_args="",
base_image=None,
conda_image=MULLED_CONDA_IMAGE,
):
"""
Tests a built package in a minimal docker container.
Parameters
----------
path : str
Path to a .tar.bz2 package built by conda-build
name_override : str
Passed as the --name-override argument to mulled-build
channels : list
List of Conda channels to use. Must include an entry "local" for the
local build channel.
mulled_args : str
Mechanism for passing arguments to the mulled-build command. They will
be split with shlex.split and passed to the mulled-build command. E.g.,
mulled_args="--dry-run --involucro-path /opt/involucro"
base_image : None | str
Specify custom base image. Busybox is used in the default case.
conda_image : None | str
Conda Docker image to install the package with during the mulled based
tests.
"""
assert path.endswith('.tar.bz2'), "Unrecognized path {0}".format(path)
# assert os.path.exists(path), '{0} does not exist'.format(path)
conda_bld_dir = os.path.abspath(os.path.dirname(os.path.dirname(path)))
conda_build.api.update_index([conda_bld_dir])
spec = get_image_name(path)
if "local" not in channels:
raise ValueError('"local" must be in channel list')
channels = [
'file://{0}'.format(conda_bld_dir) if channel == 'local' else channel
for channel in channels
]
channel_args = ['--channels', ','.join(channels)]
tests = get_tests(path)
logger.debug('Tests to run: %s', tests)
cmd = [
'mulled-build',
'build-and-test',
spec,
'-n', 'biocontainers',
'--test', tests
]
if name_override:
cmd += ['--name-override', name_override]
cmd += channel_args
cmd += shlex.split(mulled_args)
# galaxy-lib always downloads involucro, unless it's in cwd or its path is explicitly given.
# We inject a POSTINSTALL to the involucro command with a small wrapper to
# create activation / entrypoint scripts for the container.
# We also inject a PREINSTALL to alias conda to mamba so `mamba install` is
# used instead of `conda install` in the container builds.
involucro_path = os.path.join(os.path.dirname(__file__), 'involucro')
if not os.path.exists(involucro_path):
raise RuntimeError('internal involucro wrapper missing')
cmd += ['--involucro-path', involucro_path]
logger.debug('mulled-build command: %s' % cmd)
env = os.environ.copy()
if base_image is not None:
env["DEST_BASE_IMAGE"] = base_image
env["CONDA_IMAGE"] = conda_image
with tempfile.TemporaryDirectory() as d:
with utils.Progress():
p = utils.run(cmd, env=env, cwd=d, mask=False)
return p
|
{
"content_hash": "c209a9379386ad37df5549467374ac99",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 96,
"avg_line_length": 29.150837988826815,
"alnum_prop": 0.6165197393637409,
"repo_name": "bioconda/bioconda-utils",
"id": "47736c8f7eefdccb9dfb9abca9b9dba0bd0df6b2",
"size": "5218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bioconda_utils/pkg_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5624"
},
{
"name": "Dockerfile",
"bytes": "2433"
},
{
"name": "HTML",
"bytes": "5508"
},
{
"name": "Python",
"bytes": "709484"
},
{
"name": "Shell",
"bytes": "2952"
}
],
"symlink_target": ""
}
|
import iso8601
import mox
from oslo.config import cfg
from nova.compute import claims
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.tests.compute import test_compute
from nova.tests.image import fake as fake_image
from nova import utils
CONF = cfg.CONF
CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
def _fake_resources():
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0
}
return resources
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def _shelve_instance(self, shelved_offload_time):
CONF.set_override('shelved_offload_time', shelved_offload_time)
db_instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
None, True, None, False)
instance = instance_obj.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
image_id = 'fake_image_id'
host = 'fake-mini'
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
instance.task_state = task_states.SHELVING
instance.save()
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = image_id
sys_meta['shelved_host'] = host
db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve.start')
self.compute.driver.power_off(instance)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
mox.IgnoreArg())
update_values = {'power_state': 123,
'vm_state': vm_states.SHELVED,
'task_state': None,
'expected_task_state': [task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING],
'system_metadata': sys_meta}
if CONF.shelved_offload_time == 0:
update_values['task_state'] = task_states.SHELVING_OFFLOADING
db.instance_update_and_get_original(self.context, instance['uuid'],
update_values, update_cells=False,
columns_to_join=['metadata', 'system_metadata'],
).AndReturn((db_instance,
db_instance))
self.compute._notify_about_instance_usage(self.context,
instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
self.compute.driver.power_off(instance)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
db.instance_update_and_get_original(self.context,
instance['uuid'],
{'power_state': 123, 'host': None, 'node': None,
'vm_state': vm_states.SHELVED_OFFLOADED,
'task_state': None,
'expected_task_state': [task_states.SHELVING,
task_states.SHELVING_OFFLOADING]},
update_cells=False,
columns_to_join=['metadata', 'system_metadata'],
).AndReturn((db_instance, db_instance))
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
self.compute.shelve_instance(self.context, instance,
image_id=image_id)
def test_shelve(self):
self._shelve_instance(-1)
def test_shelve_offload(self):
self._shelve_instance(0)
def test_shelve_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
None, True, None, False)
instance = instance_obj.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
instance.task_state = task_states.SHELVING
instance.save()
host = 'fake-mini'
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = None
sys_meta['shelved_host'] = host
db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
self.compute.driver.power_off(instance)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'power_state': 123, 'host': None, 'node': None,
'vm_state': vm_states.SHELVED_OFFLOADED,
'task_state': None,
'expected_task_state': [task_states.SHELVING,
task_states.SHELVING_OFFLOADING]},
update_cells=False,
columns_to_join=['metadata', 'system_metadata'],
).AndReturn((db_instance, db_instance))
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
self.compute.shelve_offload_instance(self.context, instance)
def test_unshelve(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
None, True, None, False)
instance = instance_obj.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
instance.task_state = task_states.UNSHELVING
instance.save()
image = {'id': 'fake_id'}
host = 'fake-mini'
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
cur_time = timeutils.utcnow()
cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
timeutils.set_time_override(cur_time)
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = image['id']
sys_meta['shelved_host'] = host
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.deleted_image_id = None
def fake_delete(self2, ctxt, image_id):
self.deleted_image_id = image_id
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
db.instance_update_and_get_original(self.context, instance['uuid'],
{'task_state': task_states.SPAWNING},
update_cells=False,
columns_to_join=['metadata', 'system_metadata'],
).AndReturn((db_instance, db_instance))
self.compute._prep_block_device(self.context, instance,
mox.IgnoreArg()).AndReturn('fake_bdm')
db_instance['key_data'] = None
db_instance['auto_disk_config'] = None
self.rt.instance_claim(self.context, instance, limits).AndReturn(
claims.Claim(db_instance, self.rt, _fake_resources()))
self.compute.driver.spawn(self.context, instance, image,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
self.compute._get_power_state(self.context, instance).AndReturn(123)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'power_state': 123,
'vm_state': vm_states.ACTIVE,
'task_state': None,
'image_ref': instance['image_ref'],
'key_data': None,
'auto_disk_config': False,
'expected_task_state': task_states.SPAWNING,
'launched_at': cur_time_tz},
update_cells=False,
columns_to_join=['metadata', 'system_metadata']
).AndReturn((db_instance, db_instance))
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
self.compute.unshelve_instance(self.context, instance, image=image,
filter_properties=filter_properties, node=node)
self.assertEqual(image['id'], self.deleted_image_id)
self.assertEqual(instance.host, self.compute.host)
def test_unshelve_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
host = 'fake-mini'
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
cur_time = timeutils.utcnow()
cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
timeutils.set_time_override(cur_time)
self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
None, True, None, False)
instance = instance_obj.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
instance.task_state = task_states.UNSHELVING
instance.save()
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = None
sys_meta['shelved_host'] = host
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
db.instance_update_and_get_original(self.context, instance['uuid'],
{'task_state': task_states.SPAWNING},
update_cells=False,
columns_to_join=['metadata', 'system_metadata']
).AndReturn((db_instance, db_instance))
self.compute._prep_block_device(self.context, instance,
mox.IgnoreArg()).AndReturn('fake_bdm')
db_instance['key_data'] = None
db_instance['auto_disk_config'] = None
self.rt.instance_claim(self.context, instance, limits).AndReturn(
claims.Claim(db_instance, self.rt, _fake_resources()))
self.compute.driver.spawn(self.context, instance, None,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
self.compute._get_power_state(self.context, instance).AndReturn(123)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'power_state': 123,
'vm_state': vm_states.ACTIVE,
'task_state': None,
'key_data': None,
'auto_disk_config': False,
'expected_task_state': task_states.SPAWNING,
'launched_at': cur_time_tz},
update_cells=False,
columns_to_join=['metadata', 'system_metadata']
).AndReturn((db_instance, db_instance))
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
self.compute.unshelve_instance(self.context, instance, image=None,
filter_properties=filter_properties, node=node)
def test_shelved_poll_none_exist(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(timeutils, 'is_older_than')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_not_timedout(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_timedout(self):
active_instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, active_instance, {}, {}, [],
None, None, True, None, False)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time + 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
(old, instance) = db.instance_update_and_get_original(self.context,
instance['uuid'], {'vm_state': vm_states.SHELVED,
'system_metadata': sys_meta})
def fake_destroy(inst, nw_info, bdm):
# NOTE(alaski) There are too many differences between an instance
# as returned by instance_update_and_get_original and
# instance_get_all_by_filters so just compare the uuid.
self.assertEqual(instance['uuid'], inst['uuid'])
self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
self.compute._poll_shelved_instances(self.context)
class ShelveComputeAPITestCase(test_compute.BaseTestCase):
def test_shelve(self):
# Ensure instance can be shelved.
fake_instance = self._create_fake_instance({'display_name': 'vm01'})
instance = jsonutils.to_primitive(fake_instance)
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertIsNone(instance['task_state'])
def fake_init(self2):
# In original _FakeImageService.__init__(), some fake images are
# created. To verify the snapshot name of this test only, here
# sets a fake method.
self2.images = {}
def fake_create(self2, ctxt, metadata):
self.assertEqual(metadata['name'], 'vm01-shelved')
metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42'
return metadata
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init)
self.stubs.Set(fake_image._FakeImageService, 'create', fake_create)
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance_uuid)
self.compute_api.shelve(self.context, inst_obj)
inst_obj.refresh()
self.assertEqual(inst_obj.task_state, task_states.SHELVING)
db.instance_destroy(self.context, instance['uuid'])
def test_unshelve(self):
# Ensure instance can be unshelved.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertIsNone(instance['task_state'])
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance_uuid)
self.compute_api.shelve(self.context, inst_obj)
inst_obj.refresh()
inst_obj.task_state = None
inst_obj.vm_state = vm_states.SHELVED
inst_obj.save()
self.compute_api.unshelve(self.context, inst_obj)
inst_obj.refresh()
self.assertEqual(inst_obj.task_state, task_states.UNSHELVING)
db.instance_destroy(self.context, instance['uuid'])
|
{
"content_hash": "b98bcdd7c1ade341f8b494ad87d03581",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 78,
"avg_line_length": 46.63793103448276,
"alnum_prop": 0.5979403221547399,
"repo_name": "leilihh/nova",
"id": "0f4cb7c2152a028281388f6c377165158dc87559",
"size": "19508",
"binary": false,
"copies": "10",
"ref": "refs/heads/stable/icehouse",
"path": "nova/tests/compute/test_shelve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "112"
},
{
"name": "Python",
"bytes": "14056202"
},
{
"name": "Shell",
"bytes": "19895"
},
{
"name": "Smarty",
"bytes": "599198"
}
],
"symlink_target": ""
}
|
from base import DatadogBaseAction
from datadog import api
class DatadogScheduleMonitorDowntime(DatadogBaseAction):
def _run(self, **kwargs):
return api.Downtime.create(**kwargs)
|
{
"content_hash": "160d823163445db4296beeef646cc19a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 56,
"avg_line_length": 27.571428571428573,
"alnum_prop": 0.7668393782383419,
"repo_name": "tonybaloney/st2contrib",
"id": "f7ca87937f0f8de268ce3f9cd9d9575399e7c763",
"size": "193",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packs/datadog/actions/lib/downtimes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5392"
},
{
"name": "Python",
"bytes": "1285946"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7547"
}
],
"symlink_target": ""
}
|
import logging
try:
import cmandelbrot
CMANDELBROT = True
except ImportError:
CMANDELBROT = False
logging.warning("cmandelbrot not present, using python implementation.")
ITERATIONS = 300
class Mandelbrot(object):
def __init__(self, width, height, min_r=-2.0, max_r=1.0, min_i=-1.5,
iterations=ITERATIONS):
self.min_r = min_r
self.max_r = max_r
self.min_i = min_i
self.max_i = min_i + (max_r - min_r) * height / width
self.width = width
self.height = height
self.iterations=iterations
self.pre_height = (self.max_r - self.min_r) / (self.width - 1)
self.pre_width = (self.max_i - self.min_i) / (self.height - 1)
def get_c(self, x, y):
real = self.min_r + x * self.pre_height
imaginary = self.min_i + y * self.pre_width
return (real, imaginary)
def get(self, x, y):
return self.mandelbrot(*(self.get_c(x, y)))
def mandelbrot(self, real, imaginary):
if CMANDELBROT:
return cmandelbrot.mandelbrot(real, imaginary, self.iterations)
c = complex(real, imaginary)
z = c
for i in xrange(self.iterations):
if abs(z) > 2:
return i
z = z * z + c
return self.iterations
|
{
"content_hash": "4b0733b1043a8851f05950397cc7fecb",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 30.441860465116278,
"alnum_prop": 0.5714285714285714,
"repo_name": "Gagaro/pyfractal",
"id": "402dee23e8dc66e8b61530eafcbee401d5944b22",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fractal/mandelbrot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "867"
},
{
"name": "Python",
"bytes": "6387"
}
],
"symlink_target": ""
}
|
import datetime
import logging
from rest_framework import status
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import APIException
import seaserv
from seahub.base.accounts import User
from seahub.constants import GUEST_USER
from seahub.api2.models import Token, TokenV2
from seahub.api2.utils import get_client_ip
from seahub.utils import within_time_range
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
logger = logging.getLogger(__name__)
HEADER_CLIENT_VERSION = 'HTTP_X_SEAFILE_CLIENT_VERSION'
HEADER_PLATFORM_VERSION = 'HTTP_X_SEAFILE_PLATFORM_VERSION'
class AuthenticationFailed(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = 'Incorrect authentication credentials.'
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if not auth or auth[0].lower() != 'token':
return None
if len(auth) == 1:
msg = 'Invalid token header. No credentials provided.'
raise AuthenticationFailed(msg)
elif len(auth) > 2:
msg = 'Invalid token header. Token string should not contain spaces.'
raise AuthenticationFailed(msg)
key = auth[1]
ret = self.authenticate_v2(request, key)
if ret:
return ret
return self.authenticate_v1(request, key)
def _populate_user_permissions(self, user):
"""Disable some operations if ``user`` is a guest.
"""
if user.role == GUEST_USER:
user.permissions.can_add_repo = lambda: False
user.permissions.can_add_group = lambda: False
user.permissions.can_view_org = lambda: False
user.permissions.can_use_global_address_book = lambda: False
user.permissions.can_generate_shared_link = lambda: False
def authenticate_v1(self, request, key):
try:
token = Token.objects.get(key=key)
except Token.DoesNotExist:
raise AuthenticationFailed('Invalid token')
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
raise AuthenticationFailed('User inactive or deleted')
if MULTI_TENANCY:
orgs = seaserv.get_orgs_by_user(token.user)
if orgs:
user.org = orgs[0]
self._populate_user_permissions(user)
if user.is_active:
return (user, token)
def authenticate_v2(self, request, key):
try:
token = TokenV2.objects.get(key=key)
except TokenV2.DoesNotExist:
return None # Continue authentication in token v1
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
raise AuthenticationFailed('User inactive or deleted')
if MULTI_TENANCY:
orgs = seaserv.get_orgs_by_user(token.user)
if orgs:
user.org = orgs[0]
self._populate_user_permissions(user)
if user.is_active:
need_save = False
# We update the device's last_login_ip, client_version, platform_version if changed
ip = get_client_ip(request)
if ip and ip != token.last_login_ip:
token.last_login_ip = ip
need_save = True
client_version = request.META.get(HEADER_CLIENT_VERSION, '')
if client_version and client_version != token.client_version:
token.client_version = client_version
need_save = True
platform_version = request.META.get(HEADER_PLATFORM_VERSION, '')
if platform_version and platform_version != token.platform_version:
token.platform_version = platform_version
need_save = True
if not within_time_range(token.last_accessed, datetime.datetime.now(), 10 * 60):
# We only need 10min precision for the last_accessed field
need_save = True
if need_save:
try:
token.save()
except:
logger.exception('error when save token v2:')
return (user, token)
|
{
"content_hash": "92e56bb7514313a2ae0747d42b0b8609",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 95,
"avg_line_length": 34.00694444444444,
"alnum_prop": 0.6248723708392894,
"repo_name": "saukrIppl/seahub",
"id": "f72aff11df732c260aca806b126e282388a93204",
"size": "4897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seahub/api2/authentication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "329387"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "722728"
},
{
"name": "Java",
"bytes": "307193"
},
{
"name": "JavaScript",
"bytes": "7293422"
},
{
"name": "Makefile",
"bytes": "1097"
},
{
"name": "PLpgSQL",
"bytes": "19598"
},
{
"name": "Python",
"bytes": "9050702"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
}
|
"""Platform for light integration."""
from __future__ import annotations
from typing import Any
from devolo_home_control_api.devices.zwave import Zwave
from devolo_home_control_api.homecontrol import HomeControl
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .devolo_multi_level_switch import DevoloMultiLevelSwitchDeviceEntity
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Get all light devices and setup them via config entry."""
entities = []
for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"]:
for device in gateway.multi_level_switch_devices:
for multi_level_switch in device.multi_level_switch_property.values():
if multi_level_switch.switch_type == "dimmer":
entities.append(
DevoloLightDeviceEntity(
homecontrol=gateway,
device_instance=device,
element_uid=multi_level_switch.element_uid,
)
)
async_add_entities(entities, False)
class DevoloLightDeviceEntity(DevoloMultiLevelSwitchDeviceEntity, LightEntity):
"""Representation of a light within devolo Home Control."""
def __init__(
self, homecontrol: HomeControl, device_instance: Zwave, element_uid: str
) -> None:
"""Initialize a devolo multi level switch."""
super().__init__(
homecontrol=homecontrol,
device_instance=device_instance,
element_uid=element_uid,
)
self._attr_supported_features = SUPPORT_BRIGHTNESS
self._binary_switch_property = device_instance.binary_switch_property.get(
element_uid.replace("Dimmer", "BinarySwitch")
)
@property
def brightness(self) -> int:
"""Return the brightness value of the light."""
return round(self._value / 100 * 255)
@property
def is_on(self) -> bool:
"""Return the state of the light."""
return bool(self._value)
def turn_on(self, **kwargs: Any) -> None:
"""Turn device on."""
if kwargs.get(ATTR_BRIGHTNESS) is not None:
self._multi_level_switch_property.set(
round(kwargs[ATTR_BRIGHTNESS] / 255 * 100)
)
else:
if self._binary_switch_property is not None:
# Turn on the light device to the latest known value. The value is known by the device itself.
self._binary_switch_property.set(True)
else:
# If there is no binary switch attached to the device, turn it on to 100 %.
self._multi_level_switch_property.set(100)
def turn_off(self, **kwargs: Any) -> None:
"""Turn device off."""
if self._binary_switch_property is not None:
self._binary_switch_property.set(False)
else:
self._multi_level_switch_property.set(0)
|
{
"content_hash": "1c81456e441a623f31c6b26db7067c21",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 110,
"avg_line_length": 36.5,
"alnum_prop": 0.6264840182648402,
"repo_name": "FreekingDean/home-assistant",
"id": "28da95c8902d0d0fbf9e90f601f70258c4c62a4a",
"size": "3285",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/devolo_home_control/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import re
import json
import math
import operator
from TrainParser import *
class Naive_Bayes_Category_Classifier:
def __init__(self,TRAINING_PATH,TEST_PATH,OUTPUT_PATH):
PREDICTION_OUTPUT_PATH = OUTPUT_PATH+"prediction.txt"
## Files
test_doc = open(TEST_PATH)
train_doc = open(TRAINING_PATH)
opdoc = open(PREDICTION_OUTPUT_PATH,"w+")
V = 0
alpha = .1 #Smoothing value for 0 occurences (1 == Laplacian)
threshold = .2
size = len(train_doc.readlines())
print("Num Categories: %d")%(size)
train_doc.seek(0,0)
currLine = 0
## -- CLASSIFIER SETUP --
cdict = {}
vocab = set()
for line in train_doc:
c = json.loads(line)
cName = c['Category']
cTF = c['Term Frequencies']
cNum = c['Num Words']
for word in cTF:
if word not in vocab:
vocab.add(word)
if cName not in cdict:
newCat = CategoryObj(cName,cNum,cTF)
cdict[cName] = newCat
else:
print "Error can't have extra categories in a set!"+" Category: "+cName
break
currLine+=1
#print("Training category %d out of %d") % (currLine,size)
V = len(vocab)
## -- PREDICTION SETUP --
size = len(test_doc.readlines())
print("Num test businesses %d") %(size)
test_doc.seek(0,0)
currLine = 0
testDict = []
for line in test_doc:
currLine += 1
b = json.loads(line)
bTF = b['Term Frequencies']
bID = b['ID']
predictions = []
predictDict = {}
#Calculate predictions
sumCatProb = 0.0
for category in cdict:
prob = 0
catObj = cdict[category]
sumProb = 0.0
for word in bTF:
sumWordTF = catObj.getTF(word)+alpha
allTF = catObj.numWords+alpha*V
currWordProb = sumWordTF/allTF
if currWordProb == 0.0:
sumProb += 0.0
else:
sumProb += -math.log(currWordProb,2)*(bTF[word]) #Accounts for repetition of the word
sumCatProb += sumProb
predictDict[category] = sumProb
#predictions.append(maximum)
for cat in predictDict:
sumProb = predictDict[cat]
allProb = sumProb/sumCatProb
if allProb >= threshold:
print(allProb)
predictions.append(cat)
if(len(predictions) == 0):
maximum = max(predictDict, key=predictDict.get)
predictions.append(maximum)
bObj = BusinessObj(bID,predictions)
testDict.append(bObj)
#print("Prediction: %d out of %d completed") % (currLine,size)
#break
## -- WRITE OUT PREDICTIONS --
opdoc.seek(0,0)
for business in testDict:
opdoc.write(business.toJSONMachine()+'\n')
training_path = "../../../YelpDevData/naiveBayesTrain.txt"
testing_path = "../../../YelpDevData/naiveBayesTest.txt"
output_path = "../../../YelpDevData/"
a = Naive_Bayes_Category_Classifier(training_path,testing_path,output_path)
|
{
"content_hash": "04136f8931ecffd7cb549ff81833c18c",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 112,
"avg_line_length": 33.39423076923077,
"alnum_prop": 0.5067664843075151,
"repo_name": "luckyleap/NLP_Projects",
"id": "7bd07a3cc2810c2e2f9634972fa3e00d931d0ac7",
"size": "3703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Yelp Dataset Challenge/Training/Naive_Bayes_Category_Prediction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16603"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, print_function
import pytest
from gcdt_testtools.helpers_aws import check_preconditions, check_normal_mode
from gcdt_testtools.helpers_aws import awsclient # fixtures!
from gcdt_lookups.lookups import _acm_lookup, _resolve_lookups
@pytest.mark.aws
@check_preconditions
def test_acm_lookup(awsclient):
# we decided to use placebo recording for this testcase since the certificate
# information is public anyway
# * if we deploy new certificates this might break
# * we do not want to have all the certificate details in github
host_list = ['*.infra.glomex.cloud', '*.dev.infra.glomex.cloud']
cert_arn = _acm_lookup(awsclient, host_list)
assert cert_arn is not None
assert cert_arn.split(':')[3] == 'eu-west-1'
@pytest.mark.aws
@check_preconditions
def test_acm_lookup_is_yugen(awsclient):
# for API Gateway certs need to come from us-east-1
host_list = ['*.infra.glomex.cloud', '*.dev.infra.glomex.cloud']
cert_arn = _acm_lookup(awsclient, host_list, 'us-east-1')
assert cert_arn is not None
assert cert_arn.split(':')[3] == 'us-east-1'
@pytest.mark.aws
@check_preconditions
def test_stack_lookup_stack_output(awsclient):
# lookup:stack:<stack_name> w/o value gets us the whole stack_output
context = {
'_awsclient': awsclient,
'tool': 'ramuda'
}
config = {
'stack_output': 'lookup:stack:infra-dev'
}
_resolve_lookups(context, config, ['stack'])
assert config.get('stack_output', {}).get('AWSAccountId') == '420189626185'
@pytest.mark.aws
@check_preconditions
def test_stack_lookup_value(awsclient):
# lookup:stack:<stack_name> w/o value gets us the whole stack_output
context = {
'_awsclient': awsclient,
'tool': 'ramuda'
}
config = {
'AWSAccountId': 'lookup:stack:infra-dev:AWSAccountId'
}
_resolve_lookups(context, config, ['stack'])
assert config.get('AWSAccountId') == '420189626185'
@pytest.mark.aws
@check_preconditions
@check_normal_mode
def test_secret_lookup(awsclient):
context = {
'_awsclient': awsclient,
'tool': 'kumo'
}
config = {
'BaseAMIID': 'lookup:secret:ops.dev.base_ami'
}
_resolve_lookups(context, config, ['secret'])
assert config.get('BaseAMIID') == 'ami-1370b36a'
|
{
"content_hash": "0dd0b31038f3fe84be15b1c41c62aae7",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 81,
"avg_line_length": 29.3875,
"alnum_prop": 0.6699276903445343,
"repo_name": "glomex/gcdt-lookups",
"id": "4d330b9c991d55cba4494a9b6b18a0f4b850dd34",
"size": "2375",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_lookups_aws.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40485"
}
],
"symlink_target": ""
}
|
from dp_tornado.engine.controller import Controller
class SqliteController(Controller):
def get(self):
assert self.schema.tests.sqlite.fields.migrate()
self.finish('done')
|
{
"content_hash": "302e6bf574ecf989077130a0dadb8600",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 56,
"avg_line_length": 27.714285714285715,
"alnum_prop": 0.7216494845360825,
"repo_name": "why2pac/dp-tornado",
"id": "6d334a0a1af69664033cbfbcc688adf1752f82c8",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/controller/tests/schema/migrate/sqlite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3715"
},
{
"name": "Dockerfile",
"bytes": "2157"
},
{
"name": "HTML",
"bytes": "9880"
},
{
"name": "JavaScript",
"bytes": "41639"
},
{
"name": "Python",
"bytes": "567608"
},
{
"name": "Shell",
"bytes": "9068"
}
],
"symlink_target": ""
}
|
"""A class to serve as proxy for the target engine for testing.
Receives documents from the oplog worker threads and indexes them
into the backend.
Please look at the Solr and ElasticSearch doc manager classes for a sample
implementation with real systems.
"""
from threading import RLock
from mongo_connector.errors import OperationFailed
from mongo_connector.doc_managers.doc_manager_base import DocManagerBase
from mongo_connector.compat import u
class DocumentStore(dict):
def __init__(self):
self._lock = RLock()
def __getitem__(self, key):
with self._lock:
return super(DocumentStore, self).__getitem__(key)
def __setitem__(self, key, value):
with self._lock:
return super(DocumentStore, self).__setitem__(key, value)
def __iter__(self):
def __myiter__():
with self._lock:
for item in super(DocumentStore, self).__iter__():
yield item
return __myiter__()
class Entry(object):
def __init__(self, doc, ns, ts):
self.doc, self.ns, self.ts = doc, ns, ts
self._id = self.doc['_id']
@property
def meta_dict(self):
return {'_id': self._id, 'ns': self.ns, '_ts': self.ts}
@property
def merged_dict(self):
d = self.doc.copy()
d.update(**self.meta_dict)
return d
def update(self, ns, ts):
self.ns, self.ts = ns, ts
class DocManager(DocManagerBase):
"""BackendSimulator emulates both a target DocManager and a server.
The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url=None, unique_key='_id',
auto_commit_interval=None, **kwargs):
"""Creates a dictionary to hold document id keys mapped to the
documents as values.
"""
self.unique_key = unique_key
self.auto_commit_interval = auto_commit_interval
self.doc_dict = DocumentStore()
self.url = url
def stop(self):
"""Stops any running threads in the DocManager.
"""
pass
def update(self, document_id, update_spec, namespace, timestamp):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
document = self.doc_dict[document_id].doc
updated = self.apply_update(document, update_spec)
updated[self.unique_key] = updated.pop("_id")
self.upsert(updated, namespace, timestamp)
return updated
def upsert(self, doc, namespace, timestamp):
"""Adds a document to the doc dict.
"""
# Allow exceptions to be triggered (for testing purposes)
if doc.get('_upsert_exception'):
raise Exception("upsert exception")
doc_id = doc["_id"]
self.doc_dict[doc_id] = Entry(doc=doc, ns=namespace, ts=timestamp)
def insert_file(self, f, namespace, timestamp):
"""Inserts a file to the doc dict.
"""
doc = f.get_metadata()
doc['content'] = f.read()
self.doc_dict[f._id] = Entry(doc=doc, ns=namespace, ts=timestamp)
def remove(self, document_id, namespace, timestamp):
"""Removes the document from the doc dict.
"""
try:
entry = self.doc_dict[document_id]
entry.doc = None
entry.update(namespace, timestamp)
except KeyError:
raise OperationFailed("Document does not exist: %s"
% u(document_id))
def search(self, start_ts, end_ts):
"""Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend.
"""
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.ts <= end_ts or entry.ts >= start_ts:
yield entry.meta_dict
def commit(self):
"""Simply passes since we're not using an engine that needs commiting.
"""
pass
def get_last_doc(self):
"""Searches through the doc dict to find the document that was
modified or deleted most recently."""
return max(self.doc_dict.values(), key=lambda x: x.ts).meta_dict
def _search(self):
"""Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend.
"""
results = []
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.doc is not None:
results.append(entry.merged_dict)
return results
def _delete(self):
"""Deletes all documents.
This function is not a part of the DocManager API, and is only used
to simulate deleting all documents from a backend.
"""
self.doc_dict = {}
|
{
"content_hash": "6817eafb54fd5b6bcfa75d33a9eed96d",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 79,
"avg_line_length": 33.45882352941177,
"alnum_prop": 0.6132208157524613,
"repo_name": "jtharpla/mongo-connector",
"id": "865ea1f601e0f9adb45466a2f7b5d25cd3f22c2a",
"size": "6267",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "mongo_connector/doc_managers/doc_manager_simulator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "306298"
},
{
"name": "Shell",
"bytes": "2029"
}
],
"symlink_target": ""
}
|
from uuid import uuid4
from traceback import format_exc
from sys import stdout
import logging
class TransactionManager:
def __init__(self):
self.transactions = []
def __begin_transaction(self, cursor):
cursor.execute('BEGIN;')
return (cursor, uuid4())
def __prepare_transaction(self, transaction):
cursor, transaction_id = transaction
cursor.execute('PREPARE TRANSACTION \'%s\';'%transaction_id)
logging.info('Prepared transaction %s'%transaction_id)
self.transactions.append(transaction)
def __add_queries(self, cursor, queries):
transaction = self.__begin_transaction(cursor)
for q in queries:
cursor.execute(q)
self.__prepare_transaction(transaction)
def add_transaction(self, cursor, queries):
try:
self.__add_queries(cursor, queries)
except Exception as e:
cursor.execute("rollback;")
logging.warning(format_exc())
self.rollback_transactions()
raise e
def commit_transactions(self):
for cursor, transaction_id in self.transactions:
logging.info('Commiting %s'%transaction_id)
cursor.execute('COMMIT PREPARED \'%s\';'%transaction_id)
def rollback_transactions(self):
for cursor, transaction_id in self.transactions:
logging.info('Rolling back %s'%transaction_id)
cursor.execute('ROLLBACK PREPARED \'%s\';'%transaction_id)
self.transactions = []
|
{
"content_hash": "d0e9cd6447040de542f1f986982dd7df",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 70,
"avg_line_length": 28.92452830188679,
"alnum_prop": 0.6301369863013698,
"repo_name": "char-lie/software_design",
"id": "e80d2de02d2f4e73f90e3d5b985027aae0738b3f",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/TransactionManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "320"
}
],
"symlink_target": ""
}
|
from mopidy import backend, models
from mopidy.models import Ref, SearchResult
from mopidy_subidy import uri
import logging
logger = logging.getLogger(__name__)
class SubidyLibraryProvider(backend.LibraryProvider):
def __create_vdirs():
vdir_templates = [
dict(id="root", name="Subsonic"),
dict(id="artists", name="Artists"),
dict(id="albums", name="Albums"),
dict(id="rootdirs", name="Directories"),
]
# Create a dict with the keys being the `id`s in `vdir_templates`
# and the values being objects containing the vdir `id`,
# the human readable name as `name`, and the URI as `uri`.
vdirs = {}
for template in vdir_templates:
vdir = template.copy()
vdir.update(uri=uri.get_vdir_uri(vdir["id"]))
vdirs[template['id']] = vdir
return vdirs
_vdirs = __create_vdirs()
def __raw_vdir_to_ref(vdir):
if vdir is None:
return None
return Ref.directory(
name=vdir['name'],
uri=vdir['uri'])
root_directory = __raw_vdir_to_ref(_vdirs['root'])
_raw_vdir_to_ref = staticmethod(__raw_vdir_to_ref)
def __init__(self, *args, **kwargs):
super(SubidyLibraryProvider, self).__init__(*args, **kwargs)
self.subsonic_api = self.backend.subsonic_api
def browse_songs(self, album_id):
return self.subsonic_api.get_songs_as_refs(album_id)
def browse_albums(self, artist_id=None):
return self.subsonic_api.get_albums_as_refs(artist_id)
def browse_artists(self):
return self.subsonic_api.get_artists_as_refs()
def browse_rootdirs(self):
return self.subsonic_api.get_rootdirs_as_refs()
def browse_diritems(self, directory_id):
return self.subsonic_api.get_diritems_as_refs(directory_id)
def lookup_song(self, song_id):
song = self.subsonic_api.get_song_by_id(song_id)
if song is None:
return []
else:
return [song]
def lookup_album(self, album_id):
return self.subsonic_api.get_songs_as_tracks(album_id)
def lookup_artist(self, artist_id):
return list(self.subsonic_api.get_artist_as_songs_as_tracks_iter(artist_id))
def lookup_directory(self, directory_id):
return list(self.subsonic_api.get_recursive_dir_as_songs_as_tracks_iter(directory_id))
def lookup_playlist(self, playlist_id):
return self.subsonic_api.get_playlist_as_playlist(playlist_id).tracks
def browse(self, browse_uri):
if browse_uri == uri.get_vdir_uri('root'):
root_vdir_names = ["rootdirs", "artists", "albums"]
root_vdirs = [self._vdirs[vdir_name] for vdir_name in root_vdir_names]
sorted_root_vdirs = sorted(root_vdirs, key=lambda vdir: vdir["name"])
return [self._raw_vdir_to_ref(vdir) for vdir in sorted_root_vdirs]
elif browse_uri == uri.get_vdir_uri("rootdirs"):
return self.browse_rootdirs()
elif browse_uri == uri.get_vdir_uri("artists"):
return self.browse_artists()
elif browse_uri == uri.get_vdir_uri("albums"):
return self.browse_albums()
else:
uri_type = uri.get_type(browse_uri)
if uri_type == uri.DIRECTORY:
return self.browse_diritems(uri.get_directory_id(browse_uri))
elif uri_type == uri.ARTIST:
return self.browse_albums(uri.get_artist_id(browse_uri))
elif uri_type == uri.ALBUM:
return self.browse_songs(uri.get_album_id(browse_uri))
else:
return []
def lookup_one(self, lookup_uri):
type = uri.get_type(lookup_uri)
if type == uri.ARTIST:
return self.lookup_artist(uri.get_artist_id(lookup_uri))
if type == uri.ALBUM:
return self.lookup_album(uri.get_album_id(lookup_uri))
if type == uri.DIRECTORY:
return self.lookup_directory(uri.get_directory_id(lookup_uri))
if type == uri.SONG:
return self.lookup_song(uri.get_song_id(lookup_uri))
if type == uri.PLAYLIST:
return self.lookup_playlist(uri.get_playlist_id(lookup_uri))
def lookup(self, uri=None, uris=None):
if uris is not None:
return dict((uri, self.lookup_one(uri)) for uri in uris)
if uri is not None:
return self.lookup_one(uri)
return None
def refresh(self, uri):
pass
def search_uri_iter(self, lookup_uri, include_self=True):
type = uri.get_type(lookup_uri)
if type == uri.ARTIST:
artistid = uri.get_artist_id(lookup_uri)
artist = self.subsonic_api.get_artist_by_id(artistid)
if artist is not None:
if include_self:
yield (uri.ARTIST, artist)
for i in self.subsonic_api.get_albums_as_albums(artistid):
yield (uri.ALBUM, i)
for i in self.subsonic_api.get_artist_as_songs_as_tracks_iter(artistid):
yield (uri.SONG, i)
elif type == uri.ALBUM:
albumid = uri.get_album_id(lookup_uri)
album = self.subsonic_api.get_album_by_id(albumid)
if album is not None:
if include_self:
yield (uri.ALBUM, album)
for i in self.lookup_album(albumid):
yield (uri.SONG, i)
elif type == uri.DIRECTORY:
for i in self.lookup_directory(uri.get_directory_id(lookup_uri)):
yield (uri.SONG, i)
elif type == uri.SONG:
if include_self:
song = self.subsonic_api.get_song_by_id(uri.get_song_id(lookup_uri))
if song:
yield (uri.SONG, song)
# TODO: playlist uri supporting
def finds_to_dict(self, finds):
artists = []
albums = []
tracks = []
for found in finds:
if found[0] == uri.ARTIST:
artists.append(found[1])
elif found[0] == uri.ALBUM:
albums.append(found[1])
elif found[0] == uri.SONG:
tracks.append(found[1])
return dict(artists=artists, albums=albums, tracks=tracks)
def search_by_artist_album_and_track(self, artist_name, album_name, track_name):
tracks = self.search_by_artist_and_album(artist_name, album_name)
track = next(item for item in tracks.tracks if track_name in item.name)
return SearchResult(tracks=[track])
def search_by_artist_and_album(self, artist_name, album_name):
artists = self.subsonic_api.get_raw_artists()
artist = next(item for item in artists if artist_name in item.get('name'))
albums = self.subsonic_api.get_raw_albums(artist.get('id'))
album = next(item for item in albums if album_name in item.get('title'))
return SearchResult(tracks=self.subsonic_api.get_songs_as_tracks(album.get('id')))
def get_distinct(self, field, query):
search_result = self.search(query)
if not search_result:
return []
if field == 'track' or field == 'title':
return [track.name for track in (search_result.tracks or [])]
if field == 'album':
return [album.name for album in (search_result.albums or [])]
if field == 'artist':
if not search_result.artists:
return [artist.name for artist in self.browse_artists()]
return [artist.name for artist in search_result.artists]
def search(self, query=None, uris=None, exact=False):
if 'artist' in query and 'album' in query and 'track_name' in query:
return self.search_by_artist_album_and_track(query.get('artist')[0], query.get('album')[0], query.get('track_name')[0])
if 'artist' in query and 'album' in query:
return self.search_by_artist_and_album(query.get('artist')[0], query.get('album')[0])
if 'artist' in query:
return self.subsonic_api.find_as_search_result(query.get('artist')[0])
if 'any' in query:
return self.subsonic_api.find_as_search_result(query.get('any')[0])
if 'uri' in query:
return SearchResult(
**self.finds_to_dict(self.search_uri_iter(query.get('uri')[0])))
if 'any' in query:
q = query.get('any')[0]
return SearchResult(
uri=uri.get_search_uri(q),
**self.finds_to_dict(self.subsonic_api.find_iter(q)))
return SearchResult(artists=self.subsonic_api.get_artists_as_artists())
def get_coverart_image(self, a_uri):
utype = uri.get_type(a_uri)
if utype == uri.ARTIST:
coverart_item_id = self.subsonic_api.coverart_item_id_by_artist_id(uri.get_artist_id(a_uri))
elif utype == uri.ALBUM:
coverart_item_id = self.subsonic_api.coverart_item_id_by_album_id(uri.get_album_id(a_uri))
elif utype == uri.SONG:
coverart_item_id = self.subsonic_api.coverart_item_id_by_song_id(uri.get_song_id(a_uri))
elif utype == uri.DIRECTORY:
coverart_item_id = self.subsonic_api.coverart_item_id_by_directory_id(uri.get_directory_id(a_uri))
else:
return []
if coverart_item_id is not None:
image_uri = self.subsonic_api.get_coverart_image_by_id(coverart_item_id)
if image_uri is not None:
return [image_uri]
else:
return []
else:
return []
def get_images(self, uris):
return dict((a_uri, self.get_coverart_image(a_uri)) for a_uri in uris)
|
{
"content_hash": "5d3bf24ab86fd10c58ffebcb5328b3e0",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 131,
"avg_line_length": 42.11206896551724,
"alnum_prop": 0.5876151484135107,
"repo_name": "hhm0/mopidy-subidy",
"id": "ba9bcce8c2e52a20cda831fa8c6c487f3b20ea84",
"size": "9770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mopidy_subidy/library.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "43325"
}
],
"symlink_target": ""
}
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NVX_conditional_render'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NVX_conditional_render',error_checker=_errors._error_checker)
@_f
@_p.types(None,_cs.GLuint)
def glBeginConditionalRenderNVX(id):pass
@_f
@_p.types(None,)
def glEndConditionalRenderNVX():pass
|
{
"content_hash": "2dda8268eb2856580f7801fad5bd17c2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 119,
"avg_line_length": 31.8,
"alnum_prop": 0.7531446540880503,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "d1a995e5ef283c31c744708ef1152a2634072b78",
"size": "636",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GL/NVX/conditional_render.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from django.views.generic import ListView
from . import views
app_name = 'resorthub'
urlpatterns = [
url(r'^$', views.index, name = 'index'),
url(r'^resorts/$', views.resort_listing, name= 'resorts'),
url(r'^compare/$', views.compare_listing, name='compare'),
]
|
{
"content_hash": "6f6eca0273311151bd546d637e61481d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 28.09090909090909,
"alnum_prop": 0.6763754045307443,
"repo_name": "racmariano/skidom",
"id": "b8403de9cc86ab15ca987293f8ea669f3ff5ac3f",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/resorthub/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4094"
},
{
"name": "HTML",
"bytes": "10934"
},
{
"name": "JavaScript",
"bytes": "6583"
},
{
"name": "Python",
"bytes": "71579"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import cms.models.pluginmodel
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('cmsplugin_ptr', models.OneToOneField(serialize=False, parent_link=True, auto_created=True, to='cms.CMSPlugin', primary_key=True)),
('file', models.FileField(verbose_name='file', upload_to=cms.models.pluginmodel.get_plugin_media_path)),
('title', models.CharField(verbose_name='title', blank=True, null=True, max_length=255)),
('target', models.CharField(verbose_name='target', blank=True, default='', max_length=100, choices=[('', 'same window'), ('_blank', 'new window'), ('_parent', 'parent window'), ('_top', 'topmost frame')])),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
{
"content_hash": "dfac4f2f3a08cb9f050855fb309362a1",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 222,
"avg_line_length": 38.888888888888886,
"alnum_prop": 0.5742857142857143,
"repo_name": "Venturi/oldcms",
"id": "546cee3e3dc89817025f430f10e0caf4e82806c3",
"size": "1074",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/djangocms_file/migrations_django/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40171"
},
{
"name": "CSS",
"bytes": "418090"
},
{
"name": "HTML",
"bytes": "467117"
},
{
"name": "JavaScript",
"bytes": "916100"
},
{
"name": "PHP",
"bytes": "2231"
},
{
"name": "Python",
"bytes": "15786894"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "3743"
},
{
"name": "XSLT",
"bytes": "157892"
}
],
"symlink_target": ""
}
|
from Child import Child
from Node import Node # noqa: I201
STMT_NODES = [
# continue-stmt -> 'continue' label? ';'?
Node('ContinueStmt', kind='Stmt',
children=[
Child('ContinueKeyword', kind='ContinueToken'),
Child('Label', kind='IdentifierToken',
is_optional=True),
]),
# while-stmt -> label? ':'? 'while' condition-list code-block ';'?
Node('WhileStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('WhileKeyword', kind='WhileToken'),
Child('Conditions', kind='ConditionElementList',
collection_element_name='Condition'),
Child('Body', kind='CodeBlock'),
]),
# defer-stmt -> 'defer' code-block ';'?
Node('DeferStmt', kind='Stmt',
traits=['WithCodeBlock'],
children=[
Child('DeferKeyword', kind='DeferToken'),
Child('Body', kind='CodeBlock'),
]),
# expr-stmt -> expression ';'?
Node('ExpressionStmt', kind='Stmt',
children=[
Child('Expression', kind='Expr'),
]),
# switch-case-list -> switch-case switch-case-list?
Node('SwitchCaseList', kind='SyntaxCollection',
element='Syntax', element_name='SwitchCase',
element_choices=['SwitchCase', 'IfConfigDecl']),
# repeat-while-stmt -> label? ':'? 'repeat' code-block 'while' expr ';'?
Node('RepeatWhileStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('RepeatKeyword', kind='RepeatToken'),
Child('Body', kind='CodeBlock'),
Child('WhileKeyword', kind='WhileToken'),
Child('Condition', kind='Expr'),
]),
# guard-stmt -> 'guard' condition-list 'else' code-block ';'?
Node('GuardStmt', kind='Stmt',
traits=['WithCodeBlock'],
children=[
Child('GuardKeyword', kind='GuardToken'),
Child('Conditions', kind='ConditionElementList',
collection_element_name='Condition'),
Child('ElseKeyword', kind='ElseToken'),
Child('Body', kind='CodeBlock'),
]),
Node('WhereClause', kind='Syntax',
children=[
Child('WhereKeyword', kind='WhereToken'),
Child('GuardResult', kind='Expr'),
]),
# for-in-stmt -> label? ':'? 'for' 'case'? pattern 'in' expr 'where'?
# expr code-block ';'?
Node('ForInStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('ForKeyword', kind='ForToken'),
Child('CaseKeyword', kind='CaseToken',
is_optional=True),
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
Child('InKeyword', kind='InToken'),
Child('SequenceExpr', kind='Expr'),
Child('WhereClause', kind='WhereClause',
is_optional=True),
Child('Body', kind='CodeBlock'),
]),
# switch-stmt -> identifier? ':'? 'switch' expr '{'
# switch-case-list '}' ';'?
Node('SwitchStmt', kind='Stmt',
traits=['Braced', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('SwitchKeyword', kind='SwitchToken'),
Child('Expression', kind='Expr'),
Child('LeftBrace', kind='LeftBraceToken'),
Child('Cases', kind='SwitchCaseList',
collection_element_name='Case'),
Child('RightBrace', kind='RightBraceToken'),
]),
# catch-clause-list -> catch-clause catch-clause-list?
Node('CatchClauseList', kind='SyntaxCollection',
element='CatchClause'),
# do-stmt -> identifier? ':'? 'do' code-block catch-clause-list ';'?
Node('DoStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('DoKeyword', kind='DoToken'),
Child('Body', kind='CodeBlock'),
Child('CatchClauses', kind='CatchClauseList',
collection_element_name='CatchClause', is_optional=True),
]),
# return-stmt -> 'return' expr? ';'?
Node('ReturnStmt', kind='Stmt',
children=[
Child('ReturnKeyword', kind='ReturnToken'),
Child('Expression', kind='Expr',
is_optional=True),
]),
# yield-stmt -> 'yield' '('? expr-list? ')'?
Node('YieldStmt', kind='Stmt',
children=[
Child('YieldKeyword', kind='YieldToken'),
Child('Yields', kind='Syntax',
node_choices=[
Child('YieldList', kind='YieldList'),
Child('SimpleYield', kind='Expr'),
]),
]),
Node('YieldList', kind='Syntax',
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('ElementList', kind='ExprList',
collection_element_name='Element'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
Child('RightParen', kind='RightParenToken'),
]),
# fallthrough-stmt -> 'fallthrough' ';'?
Node('FallthroughStmt', kind='Stmt',
children=[
Child('FallthroughKeyword', kind='FallthroughToken'),
]),
# break-stmt -> 'break' identifier? ';'?
Node('BreakStmt', kind='Stmt',
children=[
Child('BreakKeyword', kind='BreakToken'),
Child('Label', kind='IdentifierToken',
is_optional=True),
]),
# case-item-list -> case-item case-item-list?
Node('CaseItemList', kind='SyntaxCollection',
element='CaseItem'),
# condition -> expression
# | availability-condition
# | case-condition
# | optional-binding-condition
Node('ConditionElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Condition', kind='Syntax',
node_choices=[
Child('Expression', kind='Expr'),
Child('Availablity', kind='AvailabilityCondition'),
Child('MatchingPattern',
kind='MatchingPatternCondition'),
Child('OptionalBinding',
kind='OptionalBindingCondition'),
]),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# availability-condition -> '#available' '(' availability-spec ')'
Node('AvailabilityCondition', kind='Syntax',
children=[
Child('PoundAvailableKeyword', kind='PoundAvailableToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('AvailabilitySpec', kind='AvailabilitySpecList',
collection_element_name='AvailabilityArgument'),
Child('RightParen', kind='RightParenToken'),
]),
Node('MatchingPatternCondition', kind='Syntax',
children=[
Child('CaseKeyword', kind='CaseToken'),
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
Child('Initializer', kind='InitializerClause'),
]),
Node('OptionalBindingCondition', kind='Syntax',
children=[
Child('LetOrVarKeyword', kind='Token',
token_choices=[
'LetToken', 'VarToken',
]),
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
Child('Initializer', kind='InitializerClause'),
]),
# condition-list -> condition
# | condition ','? condition-list
Node('ConditionElementList', kind='SyntaxCollection',
element='ConditionElement'),
# A declaration in statement position.
# struct Foo {};
Node('DeclarationStmt', kind='Stmt',
children=[
Child('Declaration', kind='Decl'),
]),
# throw-stmt -> 'throw' expr ';'?
Node('ThrowStmt', kind='Stmt',
children=[
Child('ThrowKeyword', kind='ThrowToken'),
Child('Expression', kind='Expr'),
]),
# if-stmt -> identifier? ':'? 'if' condition-list code-block
# else-clause ';'?
Node('IfStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('IfKeyword', kind='IfToken'),
Child('Conditions', kind='ConditionElementList',
collection_element_name='Condition'),
Child('Body', kind='CodeBlock'),
Child('ElseKeyword', kind='ElseToken',
is_optional=True),
Child('ElseBody', kind='Syntax',
node_choices=[
Child('IfStmt', kind='IfStmt'),
Child('CodeBlock', kind='CodeBlock'),
],
is_optional=True),
]),
# else-if-continuation -> label? ':'? 'while' condition-list code-block ';'
Node('ElseIfContinuation', kind='Syntax',
children=[
Child('IfStatement', kind='IfStmt'),
]),
# else-clause -> 'else' code-block
Node('ElseBlock', kind='Syntax',
traits=['WithCodeBlock'],
children=[
Child('ElseKeyword', kind='ElseToken'),
Child('Body', kind='CodeBlock'),
]),
# switch-case -> unknown-attr? switch-case-label stmt-list
# | unknown-attr? switch-default-label stmt-list
Node('SwitchCase', kind='Syntax',
traits=['WithStatements'],
children=[
Child('UnknownAttr', kind='Attribute', is_optional=True),
Child('Label', kind='Syntax',
node_choices=[
Child('Default', kind='SwitchDefaultLabel'),
Child('Case', kind='SwitchCaseLabel'),
]),
Child('Statements', kind='CodeBlockItemList',
collection_element_name='Statement'),
]),
# switch-default-label -> 'default' ':'
Node('SwitchDefaultLabel', kind='Syntax',
children=[
Child('DefaultKeyword', kind='DefaultToken'),
Child('Colon', kind='ColonToken'),
]),
# case-item -> pattern where-clause? ','?
Node('CaseItem', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Pattern', kind='Pattern'),
Child('WhereClause', kind='WhereClause',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# switch-case-label -> 'case' case-item-list ':'
Node('SwitchCaseLabel', kind='Syntax',
children=[
Child('CaseKeyword', kind='CaseToken'),
Child('CaseItems', kind='CaseItemList',
collection_element_name='CaseItem'),
Child('Colon', kind='ColonToken'),
]),
# catch-clause 'catch' pattern? where-clause? code-block
Node('CatchClause', kind='Syntax',
children=[
Child('CatchKeyword', kind='CatchToken'),
Child('Pattern', kind='Pattern',
is_optional=True),
Child('WhereClause', kind='WhereClause',
is_optional=True),
Child('Body', kind='CodeBlock'),
]),
# e.g. #assert(1 == 2)
Node('PoundAssertStmt', kind='Stmt',
children=[
Child('PoundAssert', kind='PoundAssertToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Condition', kind='Expr',
description='The assertion condition.'),
Child('Comma', kind='CommaToken', is_optional=True,
description='The comma after the assertion condition.'),
Child('Message', kind='StringLiteralToken', is_optional=True,
description='The assertion message.'),
Child('RightParen', kind='RightParenToken'),
]),
]
|
{
"content_hash": "0673b6d27dc4fbc978e4153fc8ecddaf",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 79,
"avg_line_length": 38.39316239316239,
"alnum_prop": 0.5173642030276047,
"repo_name": "lorentey/swift",
"id": "6d860abd708ade5ebf0e16364f76d226bf3b9b63",
"size": "13476",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "utils/gyb_syntax_support/StmtNodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13203"
},
{
"name": "C",
"bytes": "232100"
},
{
"name": "C++",
"bytes": "34440043"
},
{
"name": "CMake",
"bytes": "541520"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2438"
},
{
"name": "Emacs Lisp",
"bytes": "57302"
},
{
"name": "LLVM",
"bytes": "70517"
},
{
"name": "MATLAB",
"bytes": "2576"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "429426"
},
{
"name": "Objective-C++",
"bytes": "249901"
},
{
"name": "Perl",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "1612445"
},
{
"name": "Roff",
"bytes": "3495"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "189755"
},
{
"name": "Swift",
"bytes": "31105346"
},
{
"name": "Vim Script",
"bytes": "16883"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
}
|
import unittest
from leap import is_leap_year
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.5.1
class LeapTest(unittest.TestCase):
def test_year_not_divisible_by_4(self):
self.assertIs(is_leap_year(2015), False)
def test_year_divisible_by_2_not_divisible_by_4(self):
self.assertIs(is_leap_year(1970), False)
def test_year_divisible_by_4_not_divisible_by_100(self):
self.assertIs(is_leap_year(1996), True)
def test_year_divisible_by_100_not_divisible_by_400(self):
self.assertIs(is_leap_year(2100), False)
def test_year_divisible_by_400(self):
self.assertIs(is_leap_year(2000), True)
def test_year_divisible_by_200_not_divisible_by_400(self):
self.assertIs(is_leap_year(1800), False)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ae02a108a28dbab2b663d45ae334f9de",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 28,
"alnum_prop": 0.6761904761904762,
"repo_name": "N-Parsons/exercism-python",
"id": "92ea304e6176828101c1d1be885f99755328e843",
"size": "840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/leap/leap_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "555991"
},
{
"name": "Shell",
"bytes": "1199"
}
],
"symlink_target": ""
}
|
from ..config import GlobalConfig
class CurrentTimer:
""" Represents a command to view the current Toggl Timer """
description = "View the Toggl Timer"
args = []
def run(self):
""" View the timer """
entry = GlobalConfig.connection.timer.current()
print(entry.description, entry.duration)
|
{
"content_hash": "c81a9957ef9cf2a2f9131c25a9103e1d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 31.272727272727273,
"alnum_prop": 0.627906976744186,
"repo_name": "cloew/TogglDriver",
"id": "a345f73703b4c6c245e37487543120727f21b723",
"size": "344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toggl_driver/commands/current_timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8881"
}
],
"symlink_target": ""
}
|
from django.contrib.postgres.signals import (
get_citext_oids, get_hstore_oids, register_type_handlers,
)
from django.db.migrations import AddIndex, RemoveIndex
from django.db.migrations.operations.base import Operation
from django.db.utils import NotSupportedError
class CreateExtension(Operation):
reversible = True
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.vendor != 'postgresql':
return
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % schema_editor.quote_name(self.name))
# Clear cached, stale oids.
get_hstore_oids.cache_clear()
get_citext_oids.cache_clear()
# Registering new type handlers cannot be done before the extension is
# installed, otherwise a subsequent data migration would use the same
# connection.
register_type_handlers(schema_editor.connection)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("DROP EXTENSION %s" % schema_editor.quote_name(self.name))
# Clear cached, stale oids.
get_hstore_oids.cache_clear()
get_citext_oids.cache_clear()
def describe(self):
return "Creates extension %s" % self.name
class BtreeGinExtension(CreateExtension):
def __init__(self):
self.name = 'btree_gin'
class BtreeGistExtension(CreateExtension):
def __init__(self):
self.name = 'btree_gist'
class CITextExtension(CreateExtension):
def __init__(self):
self.name = 'citext'
class CryptoExtension(CreateExtension):
def __init__(self):
self.name = 'pgcrypto'
class HStoreExtension(CreateExtension):
def __init__(self):
self.name = 'hstore'
class TrigramExtension(CreateExtension):
def __init__(self):
self.name = 'pg_trgm'
class UnaccentExtension(CreateExtension):
def __init__(self):
self.name = 'unaccent'
class NotInTransactionMixin:
def _ensure_not_in_transaction(self, schema_editor):
if schema_editor.connection.in_atomic_block:
raise NotSupportedError(
'The %s operation cannot be executed inside a transaction '
'(set atomic = False on the migration).'
% self.__class__.__name__
)
class AddIndexConcurrently(NotInTransactionMixin, AddIndex):
"""Create an index using PostgreSQL's CREATE INDEX CONCURRENTLY syntax."""
atomic = False
def describe(self):
return 'Concurrently create index %s on field(s) %s of model %s' % (
self.index.name,
', '.join(self.index.fields),
self.model_name,
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
self._ensure_not_in_transaction(schema_editor)
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_index(model, self.index, concurrently=True)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self._ensure_not_in_transaction(schema_editor)
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_index(model, self.index, concurrently=True)
class RemoveIndexConcurrently(NotInTransactionMixin, RemoveIndex):
"""Remove an index using PostgreSQL's DROP INDEX CONCURRENTLY syntax."""
atomic = False
def describe(self):
return 'Concurrently remove index %s from %s' % (self.name, self.model_name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
self._ensure_not_in_transaction(schema_editor)
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
index = from_model_state.get_index_by_name(self.name)
schema_editor.remove_index(model, index, concurrently=True)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self._ensure_not_in_transaction(schema_editor)
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
index = to_model_state.get_index_by_name(self.name)
schema_editor.add_index(model, index, concurrently=True)
|
{
"content_hash": "9db734860556783dc5a8ce880809e779",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 104,
"avg_line_length": 35.32846715328467,
"alnum_prop": 0.6679752066115703,
"repo_name": "mdworks2016/work_development",
"id": "9e417725ec6ea373a2300e26d16cb1d178431118",
"size": "4840",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Python/20_Third_Certification/venv/lib/python3.7/site-packages/django/contrib/postgres/operations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "142"
},
{
"name": "Kotlin",
"bytes": "68744"
},
{
"name": "Python",
"bytes": "1080"
}
],
"symlink_target": ""
}
|
"""UtilityAnalysisEngine Test"""
from absl.testing import absltest
from absl.testing import parameterized
from unittest.mock import patch
import copy
import pipeline_dp
from pipeline_dp import budget_accounting
from utility_analysis_new import dp_engine
from utility_analysis_new import metrics
import utility_analysis_new
class MultiParameterConfiguration(parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name="All MultiParameterConfiguration fields unset",
error_msg="MultiParameterConfiguration must have at least 1 "
"non-empty attribute.",
max_partitions_contributed=None,
max_contributions_per_partition=None,
min_sum_per_partition=None,
max_sum_per_partition=None),
dict(testcase_name="Attributes different size 1",
error_msg="All set attributes in MultiParameterConfiguration must "
"have the same length.",
max_partitions_contributed=[1],
max_contributions_per_partition=[1, 2],
min_sum_per_partition=None,
max_sum_per_partition=None),
dict(testcase_name="Attributes different size 2",
error_msg="All set attributes in MultiParameterConfiguration must "
"have the same length.",
max_partitions_contributed=None,
max_contributions_per_partition=None,
min_sum_per_partition=[1, 1, 1],
max_sum_per_partition=[2]),
dict(testcase_name="One of min_sum_per_partition, "
"max_sum_per_partition is None",
error_msg="MultiParameterConfiguration: min_sum_per_partition and "
"max_sum_per_partition must be both set or both None.",
max_partitions_contributed=None,
max_contributions_per_partition=None,
min_sum_per_partition=[1, 1, 1],
max_sum_per_partition=None),
)
def test_validation(self, error_msg, max_partitions_contributed,
max_contributions_per_partition, min_sum_per_partition,
max_sum_per_partition):
with self.assertRaisesRegex(ValueError, error_msg):
utility_analysis_new.MultiParameterConfiguration(
max_partitions_contributed, max_contributions_per_partition,
min_sum_per_partition, max_sum_per_partition)
def test_get_aggregate_params(self):
params = pipeline_dp.AggregateParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
metrics=[pipeline_dp.Metrics.COUNT],
max_partitions_contributed=1,
max_contributions_per_partition=1)
max_partitions_contributed = [10, 12, 15]
multi_params = utility_analysis_new.MultiParameterConfiguration(
max_partitions_contributed=max_partitions_contributed)
self.assertTrue(3, multi_params.size)
for i in range(multi_params.size):
ith_params = multi_params.get_aggregate_params(params, i)
params.max_partitions_contributed = max_partitions_contributed[i]
self.assertEqual(params, ith_params)
class DpEngine(parameterized.TestCase):
def _get_default_extractors(self) -> pipeline_dp.DataExtractors:
return pipeline_dp.DataExtractors(
privacy_id_extractor=lambda x: x,
partition_extractor=lambda x: x,
value_extractor=lambda x: x,
)
def _get_default_aggregate_params(self) -> pipeline_dp.AggregateParams:
return pipeline_dp.AggregateParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
metrics=[pipeline_dp.Metrics.COUNT],
max_partitions_contributed=1,
max_contributions_per_partition=1)
def test_utility_analysis_params(self):
default_extractors = self._get_default_extractors()
default_params = self._get_default_aggregate_params()
params_with_custom_combiners = copy.copy(default_params)
params_with_custom_combiners.custom_combiners = sum
params_with_unsupported_metric = copy.copy(default_params)
params_with_unsupported_metric.metrics = [pipeline_dp.Metrics.MEAN]
params_with_contribution_bounds_already_enforced = default_params
params_with_contribution_bounds_already_enforced.contribution_bounds_already_enforced = True
test_cases = [
{
"desc": "custom combiners",
"params": params_with_custom_combiners,
"data_extractor": default_extractors,
"public_partitions": [1]
},
{
"desc": "unsupported metric in metrics",
"params": params_with_unsupported_metric,
"data_extractor": default_extractors,
"public_partitions": [1]
},
{
"desc": "contribution bounds are already enforced",
"params": params_with_contribution_bounds_already_enforced,
"data_extractor": default_extractors,
"public_partitions": [1]
},
]
for test_case in test_cases:
with self.assertRaisesRegex(Exception,
expected_regex=test_case["desc"]):
budget_accountant = budget_accounting.NaiveBudgetAccountant(
total_epsilon=1, total_delta=1e-10)
engine = dp_engine.UtilityAnalysisEngine(
budget_accountant=budget_accountant,
backend=pipeline_dp.LocalBackend())
col = [0, 1, 2]
engine.aggregate(
col,
test_case["params"],
test_case["data_extractor"],
public_partitions=test_case["public_partitions"])
def test_aggregate_public_partition_e2e(self):
# Arrange
aggregator_params = self._get_default_aggregate_params()
budget_accountant = pipeline_dp.NaiveBudgetAccountant(total_epsilon=1,
total_delta=1e-10)
public_partitions = ["pk0", "pk1", "pk101"]
# Input collection has 100 elements, such that each privacy id
# contributes 1 time and each partition has 1 element.
col = list(range(100))
data_extractor = pipeline_dp.DataExtractors(
privacy_id_extractor=lambda x: x,
partition_extractor=lambda x: f"pk{x}",
value_extractor=lambda x: None)
engine = dp_engine.UtilityAnalysisEngine(
budget_accountant=budget_accountant,
backend=pipeline_dp.LocalBackend())
col = engine.aggregate(col=col,
params=aggregator_params,
data_extractors=data_extractor,
public_partitions=public_partitions)
budget_accountant.compute_budgets()
col = list(col)
# Assert public partitions are applied.
self.assertLen(col, 3)
self.assertTrue(any(v[0] == 'pk101' for v in col))
def test_aggregate_error_metrics(self):
# Arrange
aggregator_params = pipeline_dp.AggregateParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
metrics=[pipeline_dp.Metrics.COUNT],
max_partitions_contributed=1,
max_contributions_per_partition=2)
budget_accountant = pipeline_dp.NaiveBudgetAccountant(total_epsilon=2,
total_delta=1e-10)
# Input collection has 10 privacy ids where each privacy id
# contributes to the same 10 partitions, three times in each partition.
col = [(i, j) for i in range(10) for j in range(10)] * 3
data_extractor = pipeline_dp.DataExtractors(
privacy_id_extractor=lambda x: x[0],
partition_extractor=lambda x: f"pk{x[1]}",
value_extractor=lambda x: None)
engine = dp_engine.UtilityAnalysisEngine(
budget_accountant=budget_accountant,
backend=pipeline_dp.LocalBackend())
col = engine.aggregate(col=col,
params=aggregator_params,
data_extractors=data_extractor)
budget_accountant.compute_budgets()
col = list(col)
# Assert
self.assertLen(col, 10)
# Assert count metrics are correct.
[self.assertTrue(v[1][1].per_partition_error == -10) for v in col]
[
self.assertAlmostEqual(v[1][1].expected_cross_partition_error,
-18.0,
delta=1e-5) for v in col
]
[
self.assertAlmostEqual(v[1][1].std_cross_partition_error,
1.89736,
delta=1e-5) for v in col
]
[
self.assertAlmostEqual(v[1][1].std_noise, 11.95312, delta=1e-5)
for v in col
]
def test_multi_parameters(self):
# Arrange
aggregate_params = pipeline_dp.AggregateParams(
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN,
metrics=[pipeline_dp.Metrics.COUNT],
max_partitions_contributed=1,
max_contributions_per_partition=1)
multi_param = utility_analysis_new.MultiParameterConfiguration(
max_partitions_contributed=[1, 2],
max_contributions_per_partition=[1, 2])
budget_accountant = pipeline_dp.NaiveBudgetAccountant(total_epsilon=1,
total_delta=1e-10)
engine = dp_engine.UtilityAnalysisEngine(
budget_accountant=budget_accountant,
backend=pipeline_dp.LocalBackend())
# Input collection has 1 privacy id, which contributes to 2 partitions
# 1 and 2 times correspondingly.
input = [(0, "pk0"), (0, "pk1"), (0, "pk1")]
data_extractors = pipeline_dp.DataExtractors(
privacy_id_extractor=lambda x: x[0],
partition_extractor=lambda x: x[1],
value_extractor=lambda x: None)
public_partitions = ["pk0", "pk1"]
output = engine.aggregate(input,
aggregate_params,
data_extractors,
public_partitions=public_partitions,
multi_param_configuration=multi_param)
budget_accountant.compute_budgets()
output = list(output)
self.assertLen(output, 2)
# Each partition has 2 metrics (for both parameter set).
[self.assertLen(partition_metrics, 2) for partition_metrics in output]
expected_pk0 = [
metrics.CountMetrics(count=1,
per_partition_error=0,
expected_cross_partition_error=-0.5,
std_cross_partition_error=0.5,
std_noise=11.6640625,
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN),
metrics.CountMetrics(count=1,
per_partition_error=0,
expected_cross_partition_error=0,
std_cross_partition_error=0.0,
std_noise=32.99095075973487,
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN)
]
expected_pk1 = [
metrics.CountMetrics(count=2,
per_partition_error=-1,
expected_cross_partition_error=-0.5,
std_cross_partition_error=0.5,
std_noise=11.6640625,
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN),
metrics.CountMetrics(count=2,
per_partition_error=0,
expected_cross_partition_error=0,
std_cross_partition_error=0.0,
std_noise=32.99095075973487,
noise_kind=pipeline_dp.NoiseKind.GAUSSIAN)
]
self.assertSequenceEqual(expected_pk0, output[0][1])
self.assertSequenceEqual(expected_pk1, output[1][1])
@patch('pipeline_dp.sampling_utils.ValueSampler.__init__')
def test_partition_sampling(self, mock_sampler_init):
# Arrange
mock_sampler_init.return_value = None
aggregator_params = self._get_default_aggregate_params()
budget_accountant = pipeline_dp.NaiveBudgetAccountant(total_epsilon=1,
total_delta=1e-10)
data_extractor = pipeline_dp.DataExtractors(
privacy_id_extractor=lambda x: x,
partition_extractor=lambda x: f"pk{x}",
value_extractor=lambda x: None)
engine = dp_engine.UtilityAnalysisEngine(
budget_accountant=budget_accountant,
backend=pipeline_dp.LocalBackend())
partitions_sampling_prob = 0.25
engine.aggregate(col=[1, 2, 3],
params=aggregator_params,
data_extractors=data_extractor,
partitions_sampling_prob=partitions_sampling_prob)
mock_sampler_init.assert_called_once_with(partitions_sampling_prob)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "4e8e9071d05a399640e7b0d5d07a8b8c",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 100,
"avg_line_length": 42.9748427672956,
"alnum_prop": 0.5682716230060003,
"repo_name": "OpenMined/PipelineDP",
"id": "4b66c370e8b9ea1895643258d9cd8929198ddf8e",
"size": "14241",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "utility_analysis_new/tests/dp_engine_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "42478"
},
{
"name": "Makefile",
"bytes": "573"
},
{
"name": "Python",
"bytes": "671843"
}
],
"symlink_target": ""
}
|
import pathlib
import pandas as pd
import quandl
DATA_FILE = 'nasdaq_data.csv'
DATA_TRF = 'nasdaq5.csv'
def download_data():
if pathlib.Path(DATA_FILE).exists():
print('data file already downloaded')
return
ndq = quandl.get("NASDAQOMX/COMP-NASDAQ", trim_start='2000-01-01', trim_end='2019-02-01')
print('data downloaded ')
ndq.to_csv(DATA_FILE)
print('data saved')
def transform_data():
ndq = pd.read_csv(DATA_FILE)
print('data loaded')
ndq['Trade Date'] = pd.to_datetime(ndq['Trade Date']).dt.date
ndq.set_index('Trade Date', inplace=True)
print('date set as index')
ndq['Index Value 5'] = ndq['Index Value'].shift(-5)
ndq.dropna(inplace=True)
print('Five days ahead price shifted')
ndq['is_higher'] = ndq['Index Value 5'] > ndq['Index Value']
print('is higher value created')
ndq.to_csv(DATA_TRF)
print('data saved')
print('done')
if __name__ == '__main__':
download_data()
transform_data()
|
{
"content_hash": "00dd3fcf8cb3326234d410f5bb3f1aa5",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 93,
"avg_line_length": 23.302325581395348,
"alnum_prop": 0.6297405189620758,
"repo_name": "Tjorriemorrie/trading",
"id": "e015eddd630b74a0dcba4622787e9402a529dff5",
"size": "1002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "24_flatlib/generate_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "586"
},
{
"name": "HTML",
"bytes": "10059"
},
{
"name": "JavaScript",
"bytes": "1812"
},
{
"name": "Jupyter Notebook",
"bytes": "682876"
},
{
"name": "Less",
"bytes": "671"
},
{
"name": "M4",
"bytes": "18975"
},
{
"name": "Python",
"bytes": "636401"
},
{
"name": "Shell",
"bytes": "670"
},
{
"name": "q",
"bytes": "478327533"
}
],
"symlink_target": ""
}
|
import sys
from dataunit.main import main, parse_args
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
dataunit_excel_workbook = args.workbook
sys.exit(main(dataunit_excel_workbook))
|
{
"content_hash": "8e8eff8cb98049720f59c51a3af1c7e8",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 25.875,
"alnum_prop": 0.6763285024154589,
"repo_name": "dataunit/dataunit",
"id": "c4967d070e015f6d03ca65193d3a6f4ce52e977e",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataunit/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6626"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.test import TestCase
from django.utils.html import escape
from lists.models import Item, List
from lists.views import home_page
from lists.forms import (
ItemForm, EMPTY_LIST_ERROR,
ExistingListItemForm, DUPLICATE_ITEM_ERROR)
from unittest import skip
class HomePageTest(TestCase):
def test_home_page_renders_home_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_home_page_uses_item_form(self):
response = self.client.get('/')
self.assertIsInstance(response.context['form'], ItemForm)
class NewListTest(TestCase):
def test_saving_a_POST_request(self):
self.client.post(
'/lists/new',
data={'text': 'A new list item'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_redirects_after_POST(self):
response = self.client.post(
'/lists/new',
data={'text': 'A new list item'}
)
new_list = List.objects.first()
self.assertRedirects(response, '/lists/%d/' % (new_list.id,))
def test_validation_errors_are_sent_back_to_home_page_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
expected_error = escape("You can't have an empty list item")
self.assertContains(response, expected_error)
def test_invalid_list_items_arent_saved(self):
self.client.post('/lists/new', data={'text': ''})
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Item.objects.count(), 0)
def test_for_invalid_input_renders_home_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_validation_errors_are_shown_on_home_page(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertContains(response, escape(EMPTY_LIST_ERROR))
def test_for_invalid_input_passes_form_to_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertIsInstance(response.context['form'], ItemForm)
class ListViewTest(TestCase):
def test_uses_list_template(self):
list_ = List.objects.create()
response = self.client.get('/lists/%d/' % (list_.id,))
self.assertTemplateUsed(response, 'list.html')
def test_passes_correct_list_to_template(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertEqual(response.context['list'], correct_list)
def test_displays_only_items_for_that_list(self):
correct_list = List.objects.create()
Item.objects.create(text='itemey 1', list=correct_list)
Item.objects.create(text='itemey 2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='other list item 1', list=other_list)
Item.objects.create(text='other list item 2', list=other_list)
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertContains(response, 'itemey 1')
self.assertContains(response, 'itemey 2')
self.assertNotContains(response, 'other list item 1')
self.assertNotContains(response, 'other list item 2')
def test_can_save_a_POST_request_to_an_existing_list(self):
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post(
'/lists/%d/' % (correct_list.id,),
data={'text': 'A new item for an existing list'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
def test_POST_redirects_to_list_view(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.post(
'/lists/%d/' % (correct_list.id,),
data={'text': 'A new item for an existing list'}
)
self.assertRedirects(response, '/lists/%d/' % (correct_list.id,))
def test_display_item_form(self):
list_ = List.objects.create()
response = self.client.get('/lists/%d/' % (list_.id,))
self.assertIsInstance(response.context['form'], ExistingListItemForm)
self.assertContains(response, 'name="text"')
def post_invalid_input(self):
list_ = List.objects.create()
return self.client.post(
'/lists/%d/' % (list_.id,),
data={'text': ''}
)
def test_for_invalid_input_nothing_saved_to_db(self):
self.post_invalid_input()
self.assertEqual(Item.objects.count(), 0)
def test_for_invalid_input_renders_list_template(self):
response = self.post_invalid_input()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
def test_for_invalid_input_passes_form_to_template(self):
response = self.post_invalid_input()
self.assertIsInstance(response.context['form'], ExistingListItemForm)
def test_for_invalid_input_shows_error_on_page(self):
response = self.post_invalid_input()
self.assertContains(response, escape(EMPTY_LIST_ERROR))
def test_duplicate_item_validation_errors_end_up_on_lists_page(self):
list1 = List.objects.create()
item1 = Item.objects.create(list=list1, text='textey')
response = self.client.post(
'/lists/%d/' % (list1.id,),
data={'text': 'textey'}
)
expected_error = escape(DUPLICATE_ITEM_ERROR)
self.assertContains(response, expected_error)
self.assertTemplateUsed(response, 'list.html')
self.assertEqual(Item.objects.all().count(), 1)
|
{
"content_hash": "c463c304f1a9e127ef9efdb5c91f83b0",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 77,
"avg_line_length": 38.406060606060606,
"alnum_prop": 0.639734890326653,
"repo_name": "fantasycheung/django_learning",
"id": "393a6190b1e1f8ff4240b9a517728587e304d385",
"size": "6337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lists/tests/test_views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7490"
},
{
"name": "HTML",
"bytes": "3726"
},
{
"name": "JavaScript",
"bytes": "117936"
},
{
"name": "Python",
"bytes": "41085"
}
],
"symlink_target": ""
}
|
from .totalstatistics import TotalStatisticsBuilder
from .suitestatistics import SuiteStatisticsBuilder
from .tagstatistics import TagStatisticsBuilder
from .visitor import SuiteVisitor
class Statistics(object):
"""Container for total, suite and tag statistics.
Accepted parameters have the same semantics as the matching command line
options.
"""
def __init__(self, suite, suite_stat_level=-1, tag_stat_include=None,
tag_stat_exclude=None, tag_stat_combine=None, tag_doc=None,
tag_stat_link=None, rpa=False):
total_builder = TotalStatisticsBuilder(rpa=rpa)
suite_builder = SuiteStatisticsBuilder(suite_stat_level)
tag_builder = TagStatisticsBuilder(suite.criticality, tag_stat_include,
tag_stat_exclude, tag_stat_combine,
tag_doc, tag_stat_link)
suite.visit(StatisticsBuilder(total_builder, suite_builder, tag_builder))
#: Instance of :class:`~robot.model.totalstatistics.TotalStatistics`.
self.total = total_builder.stats
#: Instance of :class:`~robot.model.suitestatistics.SuiteStatistics`.
self.suite = suite_builder.stats
#: Instance of :class:`~robot.model.tagstatistics.TagStatistics`.
self.tags = tag_builder.stats
def visit(self, visitor):
visitor.visit_statistics(self)
class StatisticsBuilder(SuiteVisitor):
def __init__(self, total_builder, suite_builder, tag_builder):
self._total_builder = total_builder
self._suite_builder = suite_builder
self._tag_builder = tag_builder
def start_suite(self, suite):
self._suite_builder.start_suite(suite)
def end_suite(self, suite):
self._suite_builder.end_suite()
def visit_test(self, test):
self._total_builder.add_test(test)
self._suite_builder.add_test(test)
self._tag_builder.add_test(test)
def visit_keyword(self, kw):
pass
|
{
"content_hash": "616fd50e4367c7efb6f97a7e508162af",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 81,
"avg_line_length": 38.75,
"alnum_prop": 0.658560794044665,
"repo_name": "robotframework/RIDE",
"id": "7d20d3f49294bb02babb81afc1f46b020e77956b",
"size": "2659",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/robotide/lib/robot/model/statistics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31131"
},
{
"name": "HTML",
"bytes": "96342"
},
{
"name": "JavaScript",
"bytes": "42656"
},
{
"name": "Python",
"bytes": "3703410"
},
{
"name": "RobotFramework",
"bytes": "378004"
},
{
"name": "Shell",
"bytes": "1873"
}
],
"symlink_target": ""
}
|
import gym
import pygame
import sys
import time
import matplotlib
import matplotlib.pyplot as plt
from collections import deque
from pygame.locals import HWSURFACE, DOUBLEBUF, RESIZABLE, VIDEORESIZE
from threading import Thread
try:
matplotlib.use('GTK3Agg')
except Exception:
pass
def display_arr(screen, arr, video_size, transpose):
arr_min, arr_max = arr.min(), arr.max()
arr = 255.0 * (arr - arr_min) / (arr_max - arr_min)
pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1) if transpose else arr)
pyg_img = pygame.transform.scale(pyg_img, video_size)
screen.blit(pyg_img, (0,0))
def play(env, transpose=True, fps=30, zoom=None, callback=None, keys_to_action=None):
"""Allows one to play the game using keyboard.
To simply play the game use:
play(gym.make("Pong-v3"))
Above code works also if env is wrapped, so it's particularly useful in
verifying that the frame-level preprocessing does not render the game
unplayable.
If you wish to plot real time statistics as you play, you can use
gym.utils.play.PlayPlot. Here's a sample code for plotting the reward
for last 5 second of gameplay.
def callback(obs_t, obs_tp1, rew, done, info):
return [rew,]
env_plotter = EnvPlotter(callback, 30 * 5, ["reward"])
env = gym.make("Pong-v3")
play(env, callback=env_plotter.callback)
Arguments
---------
env: gym.Env
Environment to use for playing.
transpose: bool
If True the output of observation is transposed.
Defaults to true.
fps: int
Maximum number of steps of the environment to execute every second.
Defaults to 30.
zoom: float
Make screen edge this many times bigger
callback: lambda or None
Callback if a callback is provided it will be executed after
every step. It takes the following input:
obs_t: observation before performing action
obs_tp1: observation after performing action
action: action that was executed
rew: reward that was received
done: whether the environemnt is done or not
info: debug info
keys_to_action: dict: tuple(int) -> int or None
Mapping from keys pressed to action performed.
For example if pressed 'w' and space at the same time is supposed
to trigger action number 2 then key_to_action dict would look like this:
{
# ...
sorted(ord('w'), ord(' ')) -> 2
# ...
}
If None, default key_to_action mapping for that env is used, if provided.
"""
obs_s = env.observation_space
assert type(obs_s) == gym.spaces.box.Box
assert len(obs_s.shape) == 2 or (len(obs_s.shape) == 3 and obs_s.shape[2] in [1,3])
if keys_to_action is None:
if hasattr(env, 'get_keys_to_action'):
keys_to_action = env.get_keys_to_action()
elif hasattr(env.unwrapped, 'get_keys_to_action'):
keys_to_action = env.unwrapped.get_keys_to_action()
else:
assert False, env.spec.id + " does not have explicit key to action mapping, " + \
"please specify one manually"
relevant_keys = set(sum(map(list, keys_to_action.keys()),[]))
if transpose:
video_size = env.observation_space.shape[1], env.observation_space.shape[0]
else:
video_size = env.observation_space.shape[0], env.observation_space.shape[1]
if zoom is not None:
video_size = int(video_size[0] * zoom), int(video_size[1] * zoom)
pressed_keys = []
running = True
env_done = True
screen = pygame.display.set_mode(video_size)
clock = pygame.time.Clock()
while running:
if env_done:
env_done = False
obs = env.reset()
else:
action = keys_to_action[tuple(sorted(pressed_keys))]
prev_obs = obs
obs, rew, env_done, info = env.step(action)
if callback is not None:
callback(prev_obs, obs, action, rew, env_done, info)
if obs is not None:
if len(obs.shape) == 2:
obs = obs[:, :, None]
if obs.shape[2] == 1:
obs = obs.repeat(3, axis=2)
display_arr(screen, obs, transpose=transpose, video_size=video_size)
# process pygame events
for event in pygame.event.get():
# test events, set key states
if event.type == pygame.KEYDOWN:
if event.key in relevant_keys:
pressed_keys.append(event.key)
elif event.key == 27:
running = False
elif event.type == pygame.KEYUP:
if event.key in relevant_keys:
pressed_keys.remove(event.key)
elif event.type == pygame.QUIT:
running = False
elif event.type == VIDEORESIZE:
video_size = event.size
screen = pygame.display.set_mode(video_size)
print(video_size)
pygame.display.flip()
clock.tick(fps)
pygame.quit()
class PlayPlot(object):
def __init__(self, callback, horizon_timesteps, plot_names):
self.data_callback = callback
self.horizon_timesteps = horizon_timesteps
self.plot_names = plot_names
num_plots = len(self.plot_names)
self.fig, self.ax = plt.subplots(num_plots)
if num_plots == 1:
self.ax = [self.ax]
for axis, name in zip(self.ax, plot_names):
axis.set_title(name)
self.t = 0
self.cur_plot = [None for _ in range(num_plots)]
self.data = [deque(maxlen=horizon_timesteps) for _ in range(num_plots)]
def callback(self, obs_t, obs_tp1, action, rew, done, info):
points = self.data_callback(obs_t, obs_tp1, action, rew, done, info)
for point, data_series in zip(points, self.data):
data_series.append(point)
self.t += 1
xmin, xmax = max(0, self.t - self.horizon_timesteps), self.t
for i, plot in enumerate(self.cur_plot):
if plot is not None:
plot.remove()
self.cur_plot[i] = self.ax[i].scatter(range(xmin, xmax), list(self.data[i]))
self.ax[i].set_xlim(xmin, xmax)
plt.pause(0.000001)
if __name__ == '__main__':
from rl_algs.common.atari_wrappers import wrap_deepmind
def callback(obs_t, obs_tp1, action, rew, done, info):
return [rew, obs_t.mean()]
env_plotter = EnvPlotter(callback, 30 * 5, ["reward", "mean intensity"])
env = gym.make("MontezumaRevengeNoFrameskip-v3")
env = wrap_deepmind(env)
play_env(env, zoom=4, callback=env_plotter.callback, fps=30)
|
{
"content_hash": "c0bac4da9bcf5c468715da6229e68a7f",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 93,
"avg_line_length": 35.41968911917098,
"alnum_prop": 0.5953774136922176,
"repo_name": "dianchen96/gym",
"id": "1aafa622c3f9c215ca9102c193aefd18832cbbf1",
"size": "6836",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "utils/play.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "1225167"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
}
|
import happyforms
from django import forms
from django.contrib import messages
from remo.base.tasks import send_remo_mail
from remo.profiles.models import UserProfile
class BaseEmailUsersForm(happyforms.Form):
"""Base form to send email to multiple users."""
subject = forms.CharField(label='', widget=(
forms.TextInput(attrs={'placeholder': 'Subject',
'required': 'required',
'class': 'input-text big'})))
body = forms.CharField(label='', widget=(
forms.Textarea(attrs={'placeholder': 'Body of email',
'required': 'required',
'class': 'flat long'})))
class EmailUsersForm(BaseEmailUsersForm):
"""Generic form to send email to multiple users."""
def __init__(self, users, *args, **kwargs):
"""Initialize form.
Dynamically set fields for the recipients of the mail.
"""
super(EmailUsersForm, self).__init__(*args, **kwargs)
for user in users:
# Insert method is used to override the order of form fields
form_widget = forms.CheckboxInput(
attrs={'class': 'input-text-big'})
self.fields.update([(str(user.id), forms.BooleanField(label=user,
initial=False,
required=False,
widget=form_widget))])
def send_mail(self, request):
"""Send mail to recipients list."""
recipients_list = []
for field in self.fields:
if (isinstance(self.fields[field], forms.BooleanField) and
self.cleaned_data[field]):
recipients_list.append(long(field))
if recipients_list:
from_email = '%s <%s>' % (request.user.get_full_name(),
request.user.email)
send_remo_mail.delay(sender=from_email,
recipients_list=recipients_list,
subject=self.cleaned_data['subject'],
message=self.cleaned_data['body'])
messages.success(request, 'Email sent successfully.')
else:
messages.error(request, ('Email not sent. Please select at '
'least one recipient.'))
class EmailMentorForm(BaseEmailUsersForm):
"""Generic form to send email to a user's mentor."""
subject = forms.CharField(required=False)
def send_email(self, request, subject='', message=None,
template=None, data=None):
"""Send an email to user's mentor"""
mentor = request.user.userprofile.mentor
from_email = '%s <%s>' % (request.user.get_full_name(),
request.user.email)
send_remo_mail.delay(sender=from_email,
recipients_list=[mentor.id],
subject=subject,
message=message,
email_template=template,
data=data)
class EditSettingsForm(happyforms.ModelForm):
"""Form to edit user settings regarding mail preferences."""
receive_email_on_add_comment = forms.BooleanField(
required=False, initial=True,
label=('Receive email when a user comments on a report.'))
receive_email_on_add_event_comment = forms.BooleanField(
required=False, initial=True,
label=('Receive email when a user comments on an event.'))
receive_email_on_add_voting_comment = forms.BooleanField(
required=False, initial=True,
label=('Receive email when a user comments on an poll.'))
class Meta:
model = UserProfile
fields = ['receive_email_on_add_comment',
'receive_email_on_add_event_comment',
'receive_email_on_add_voting_comment']
|
{
"content_hash": "0105be397b7bbc3195b9be67cad0033e",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 88,
"avg_line_length": 42.73684210526316,
"alnum_prop": 0.541871921182266,
"repo_name": "tsmrachel/remo",
"id": "09b4b55953d72c85d457865c7dd57e1b73f9b371",
"size": "4060",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "remo/base/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "993"
},
{
"name": "CSS",
"bytes": "316677"
},
{
"name": "HTML",
"bytes": "333690"
},
{
"name": "JavaScript",
"bytes": "593637"
},
{
"name": "Python",
"bytes": "755215"
},
{
"name": "Shell",
"bytes": "715"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
}
|
import os
import glob
from flexmock import flexmock, flexmock_teardown
from .. import EloquentTestCase
from eloquent.migrations import Migrator, DatabaseMigrationRepository, Migration
from eloquent import DatabaseManager
from eloquent.connections import Connection
class MigratorTestCase(EloquentTestCase):
def tearDown(self):
flexmock_teardown()
def test_migrations_are_run_up_when_outstanding_migrations_exist(self):
resolver = flexmock(DatabaseManager)
resolver.should_receive('connection').and_return(None)
migrator = flexmock(
Migrator(
flexmock(
DatabaseMigrationRepository(
resolver,
'migrations'
)
),
resolver
)
)
g = flexmock(glob)
g.should_receive('glob').with_args(os.path.join(os.getcwd(), '*_*.py')).and_return([
os.path.join(os.getcwd(), '2_bar.py'),
os.path.join(os.getcwd(), '1_foo.py'),
os.path.join(os.getcwd(), '3_baz.py')
])
migrator.get_repository().should_receive('get_ran').once().and_return(['1_foo'])
migrator.get_repository().should_receive('get_next_batch_number').once().and_return(1)
migrator.get_repository().should_receive('log').once().with_args('2_bar', 1)
migrator.get_repository().should_receive('log').once().with_args('3_baz', 1)
bar_mock = flexmock(MigrationStub())
bar_mock.should_receive('up').once()
baz_mock = flexmock(MigrationStub())
baz_mock.should_receive('up').once()
migrator.should_receive('_resolve').with_args(os.getcwd(), '2_bar').once().and_return(bar_mock)
migrator.should_receive('_resolve').with_args(os.getcwd(), '3_baz').once().and_return(baz_mock)
migrator.run(os.getcwd())
def test_up_migration_can_be_pretended(self):
resolver_mock = flexmock(DatabaseManager)
resolver_mock.should_receive('connection').and_return({})
resolver = flexmock(DatabaseManager({}))
connection = flexmock(Connection(None))
connection.should_receive('pretend').replace_with(lambda callback: callback(None))
resolver.should_receive('connection').with_args(None).and_return(connection)
migrator = flexmock(
Migrator(
flexmock(
DatabaseMigrationRepository(
resolver,
'migrations'
)
),
resolver
)
)
g = flexmock(glob)
g.should_receive('glob').with_args(os.path.join(os.getcwd(), '*_*.py')).and_return([
os.path.join(os.getcwd(), '2_bar.py'),
os.path.join(os.getcwd(), '1_foo.py'),
os.path.join(os.getcwd(), '3_baz.py')
])
migrator.get_repository().should_receive('get_ran').once().and_return(['1_foo'])
migrator.get_repository().should_receive('get_next_batch_number').once().and_return(1)
bar_mock = flexmock(MigrationStub())
bar_mock.should_receive('get_connection').once().and_return(None)
bar_mock.should_receive('up').once()
baz_mock = flexmock(MigrationStub())
baz_mock.should_receive('get_connection').once().and_return(None)
baz_mock.should_receive('up').once()
migrator.should_receive('_resolve').with_args(os.getcwd(), '2_bar').once().and_return(bar_mock)
migrator.should_receive('_resolve').with_args(os.getcwd(), '3_baz').once().and_return(baz_mock)
migrator.run(os.getcwd(), True)
def test_nothing_is_done_when_no_migrations_outstanding(self):
resolver_mock = flexmock(DatabaseManager)
resolver_mock.should_receive('connection').and_return(None)
resolver = flexmock(DatabaseManager({}))
migrator = flexmock(
Migrator(
flexmock(
DatabaseMigrationRepository(
resolver,
'migrations'
)
),
resolver
)
)
g = flexmock(glob)
g.should_receive('glob').with_args(os.path.join(os.getcwd(), '*_*.py')).and_return([
os.path.join(os.getcwd(), '1_foo.py')
])
migrator.get_repository().should_receive('get_ran').once().and_return(['1_foo'])
migrator.run(os.getcwd())
def test_last_batch_of_migrations_can_be_rolled_back(self):
resolver = flexmock(DatabaseManager)
resolver.should_receive('connection').and_return(None)
migrator = flexmock(
Migrator(
flexmock(
DatabaseMigrationRepository(
resolver,
'migrations'
)
),
resolver
)
)
foo_migration = MigrationStub('foo')
bar_migration = MigrationStub('bar')
migrator.get_repository().should_receive('get_last').once().and_return([
foo_migration,
bar_migration
])
bar_mock = flexmock(MigrationStub())
bar_mock.should_receive('down').once()
foo_mock = flexmock(MigrationStub())
foo_mock.should_receive('down').once()
migrator.should_receive('_resolve').with_args(os.getcwd(), 'bar').once().and_return(bar_mock)
migrator.should_receive('_resolve').with_args(os.getcwd(), 'foo').once().and_return(foo_mock)
migrator.get_repository().should_receive('delete').once().with_args(bar_migration)
migrator.get_repository().should_receive('delete').once().with_args(foo_migration)
migrator.rollback(os.getcwd())
def test_rollback_migration_can_be_pretended(self):
resolver_mock = flexmock(DatabaseManager)
resolver_mock.should_receive('connection').and_return({})
resolver = flexmock(DatabaseManager({}))
connection = flexmock(Connection(None))
connection.should_receive('pretend').replace_with(lambda callback: callback(None))
resolver.should_receive('connection').with_args(None).and_return(connection)
migrator = flexmock(
Migrator(
flexmock(
DatabaseMigrationRepository(
resolver,
'migrations'
)
),
resolver
)
)
foo_migration = MigrationStub('foo')
bar_migration = MigrationStub('bar')
migrator.get_repository().should_receive('get_last').once().and_return([
foo_migration,
bar_migration
])
bar_mock = flexmock(MigrationStub())
bar_mock.should_receive('down').once()
foo_mock = flexmock(MigrationStub())
foo_mock.should_receive('down').once()
migrator.should_receive('_resolve').with_args(os.getcwd(), 'bar').once().and_return(bar_mock)
migrator.should_receive('_resolve').with_args(os.getcwd(), 'foo').once().and_return(foo_mock)
migrator.rollback(os.getcwd(), True)
def test_nothing_is_rolled_back_when_nothing_in_repository(self):
resolver = flexmock(DatabaseManager)
resolver.should_receive('connection').and_return(None)
migrator = flexmock(
Migrator(
flexmock(
DatabaseMigrationRepository(
resolver,
'migrations'
)
),
resolver
)
)
migrator.get_repository().should_receive('get_last').once().and_return([])
migrator.rollback(os.getcwd())
class MigrationStub(Migration):
def __init__(self, migration=None):
self.migration = migration
def up(self):
pass
def down(self):
pass
def __getitem__(self, item):
return self.migration
|
{
"content_hash": "817551cfc412c5e44620844a25b9a59c",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 103,
"avg_line_length": 36.40723981900452,
"alnum_prop": 0.5673626646781009,
"repo_name": "sdispater/eloquent",
"id": "27e2e3e801b0612599d8650fb931d3e4a5e289c2",
"size": "8071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/migrations/test_migrator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "741617"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.localflavor.us.models import USStateField
from wtforms.validators import u
class Group(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return u('%s(%d)') % (self.name, self.pk)
class User(models.Model):
username = models.CharField(max_length=40)
group = models.ForeignKey(Group)
birthday = models.DateField(help_text="Teh Birthday")
email = models.EmailField(blank=True)
posts = models.PositiveSmallIntegerField()
state = USStateField()
reg_ip = models.IPAddressField("IP Addy")
url = models.URLField()
file = models.FilePathField()
file2 = models.FileField(upload_to='.')
bool = models.BooleanField()
time1 = models.TimeField()
slug = models.SlugField()
|
{
"content_hash": "15fe67316e1037bcdf12502f71e9c7c8",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 61,
"avg_line_length": 33.72,
"alnum_prop": 0.6631079478054567,
"repo_name": "webitup/python3-wforms",
"id": "5d543085c11bbcdf78345030df92959e501c0c04",
"size": "843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ext_django/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "187546"
},
{
"name": "Shell",
"bytes": "2985"
}
],
"symlink_target": ""
}
|
"""API over the cinder service."""
from django.utils.translation import gettext_lazy as _
from django.views import generic
from openstack_dashboard import api
from openstack_dashboard.api.rest import json_encoder
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from openstack_dashboard.usage import quotas
CLIENT_KEYWORDS = {'marker', 'sort_dir', 'paginate'}
@urls.register
class Volumes(generic.View):
"""API for cinder volumes."""
url_regex = r'cinder/volumes/$'
@rest_utils.ajax()
def get(self, request):
"""Get a detailed list of volumes associated with the current project.
Example GET:
http://localhost/api/cinder/volumes?paginate=true&sort_dir=asc
If invoked as an admin, you may set the GET parameter "all_projects"
to 'true' to return details for all projects.
The following get parameters may be passed in the GET
:param search_opts: includes options such as name, status, bootable
:param paginate: If true will perform pagination based on settings.
:param marker: Specifies the namespace of the last-seen image.
The typical pattern of limit and marker is to make an
initial limited request and then to use the last
namespace from the response as the marker parameter
in a subsequent limited request. With paginate, limit
is automatically set.
:param sort_dir: The sort direction ('asc' or 'desc').
The listing result is an object with property "items".
"""
if request.GET.get('all_projects') == 'true':
result, has_more, has_prev = api.cinder.volume_list_paged(
request,
{'all_tenants': 1}
)
else:
search_opts, kwargs = rest_utils.parse_filters_kwargs(
request, CLIENT_KEYWORDS)
result, has_more, has_prev = api.cinder.volume_list_paged(
request,
search_opts=search_opts, **kwargs
)
return {
'items': [api.cinder.Volume(u).to_dict() for u in result],
'has_more_data': has_more,
'has_prev_data': has_prev
}
@rest_utils.ajax(data_required=True)
def post(self, request):
volume = api.cinder.volume_create(
request,
size=request.DATA['size'],
name=request.DATA['name'],
description=request.DATA['description'],
volume_type=request.DATA['volume_type'],
snapshot_id=request.DATA['snapshot_id'],
metadata=request.DATA['metadata'],
image_id=request.DATA['image_id'],
availability_zone=request.DATA['availability_zone'],
source_volid=request.DATA['source_volid']
)
return rest_utils.CreatedResponse(
'/api/cinder/volumes/%s' % volume.id,
volume.to_dict()
)
@urls.register
class Volume(generic.View):
"""API for cinder volume."""
url_regex = r'cinder/volumes/(?P<volume_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, volume_id):
"""Get a single volume's details with the volume id.
The following get parameters may be passed in the GET
:param volume_id: the id of the volume
The result is a volume object.
"""
return api.cinder.volume_get(request, volume_id).to_dict()
@urls.register
class VolumeTypes(generic.View):
"""API for volume types."""
url_regex = r'cinder/volumetypes/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of volume types.
The listing result is an object with the property "items".
"""
result = api.cinder.volume_type_list(request)
return {'items': [api.cinder.VolumeType(u).to_dict() for u in result]}
@urls.register
class VolumeMetadata(generic.View):
"""API for volume metadata"""
url_regex = r'cinder/volumes/(?P<volume_id>[^/]+)/metadata$'
@rest_utils.ajax()
def get(self, request, volume_id):
"""Get a specific volume's metadata
http://localhost/api/cinder/volumes/1/metadata
"""
return api.cinder.volume_get(request,
volume_id).to_dict().get('metadata')
@rest_utils.ajax()
def patch(self, request, volume_id):
"""Update metadata items for specific volume
http://localhost/api/cinder/volumes/1/metadata
"""
updated = request.DATA['updated']
removed = request.DATA['removed']
if updated:
api.cinder.volume_set_metadata(request, volume_id, updated)
if removed:
api.cinder.volume_delete_metadata(request, volume_id, removed)
@urls.register
class VolumeType(generic.View):
"""API for getting a volume type."""
url_regex = r'cinder/volumetypes/(?P<volumetype_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, volumetype_id):
"""Get a single volume type details with the volume type id.
The following get parameters may be passed in the GET
:param volumetype_id: the id of the volume type
If 'default' is passed as the volumetype_id then
it returns the default volumetype
The result is a volume type object.
"""
if volumetype_id == 'default':
volumetype = api.cinder.volume_type_default(request)
else:
volumetype = api.cinder.volume_type_get(request, volumetype_id)
return api.cinder.VolumeType(volumetype).to_dict()
@urls.register
class VolumeSnapshots(generic.View):
"""API for cinder volume snapshots."""
url_regex = r'cinder/volumesnapshots/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of volume snapshots associated with the current project.
The listing result is an object with property "items".
"""
result = api.cinder.volume_snapshot_list(
request,
search_opts=rest_utils.parse_filters_kwargs(request)[0]
)
return {'items': [u.to_dict() for u in result]}
@urls.register
class VolumeSnapshotMetadata(generic.View):
"""API for getting snapshots metadata"""
url_regex = r'cinder/volumesnapshots/' \
r'(?P<volume_snapshot_id>[^/]+)/metadata$'
@rest_utils.ajax()
def get(self, request, volume_snapshot_id):
"""Get a specific volumes snapshot metadata
http://localhost/api/cinder/volumesnapshots/1/metadata
"""
result = api.cinder.volume_snapshot_get(request,
volume_snapshot_id).\
to_dict().get('metadata')
return result
@rest_utils.ajax()
def patch(self, request, volume_snapshot_id):
"""Update metadata for specific volume snapshot
http://localhost/api/cinder/volumesnapshots/1/metadata
"""
updated = request.DATA['updated']
removed = request.DATA['removed']
if updated:
api.cinder.volume_snapshot_set_metadata(request,
volume_snapshot_id,
updated)
if removed:
api.cinder.volume_snapshot_delete_metadata(request,
volume_snapshot_id,
removed)
@urls.register
class VolumeTypeMetadata(generic.View):
"""API for getting snapshots metadata"""
url_regex = r'cinder/volumetypes/(?P<volume_type_id>[^/]+)/metadata$'
@rest_utils.ajax()
def get(self, request, volume_type_id):
"""Get a specific volume's metadata
http://localhost/api/cinder/volumetypes/1/metadata
"""
metadata = api.cinder.volume_type_extra_get(request, volume_type_id)
result = {x.key: x.value for x in metadata}
return result
@rest_utils.ajax()
def patch(self, request, volume_type_id):
"""Update metadata for specific volume
http://localhost/api/cinder/volumetypes/1/metadata
"""
updated = request.DATA['updated']
removed = request.DATA['removed']
if updated:
api.cinder.volume_type_extra_set(request,
volume_type_id,
updated)
if removed:
api.cinder.volume_type_extra_delete(request,
volume_type_id,
removed)
@urls.register
class Extensions(generic.View):
# API for cinder extensions.
url_regex = r'cinder/extensions/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of extensions.
The listing result is an object with property "items". Each item is
an extension.
Example GET:
http://localhost/api/cinder/extensions
"""
result = api.cinder.list_extensions(request)
return {'items': [{
'alias': e.alias,
'description': e.description,
'links': e.links,
'name': e.name,
'updated': e.updated
} for e in result]}
@urls.register
class QoSSpecs(generic.View):
url_regex = r'cinder/qosspecs/$'
@rest_utils.ajax()
def get(self, request):
result = api.cinder.qos_specs_list(request)
return {'items': [u.to_dict() for u in result]}
@urls.register
class TenantAbsoluteLimits(generic.View):
url_regex = r'cinder/tenantabsolutelimits/$'
@rest_utils.ajax(json_encoder=json_encoder.NaNJSONEncoder)
def get(self, request):
return api.cinder.tenant_absolute_limits(request)
@urls.register
class Services(generic.View):
"""API for cinder services."""
url_regex = r'cinder/services/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of cinder services.
Will return HTTP 501 status code if the service_list extension is
not supported.
"""
if not (api.base.is_service_enabled(request, 'volume') and
api.cinder.extension_supported(request, 'Services')):
raise rest_utils.AjaxError(501, '')
result = api.cinder.service_list(request)
return {'items': [{
'binary': u.binary,
'host': u.host,
'zone': u.zone,
'updated_at': u.updated_at,
'status': u.status,
'state': u.state,
'id': idx + 1
} for idx, u in enumerate(result)]}
@urls.register
class DefaultQuotaSets(generic.View):
"""API for getting default quotas for cinder"""
url_regex = r'cinder/quota-sets/defaults/$'
@rest_utils.ajax()
def get(self, request):
"""Get the values for Cinder specific quotas
Example GET:
http://localhost/api/cinder/quota-sets/defaults/
"""
if not api.cinder.is_volume_service_enabled(request):
raise rest_utils.AjaxError(501, _('Service Cinder is disabled.'))
quota_set = api.cinder.default_quota_get(
request, request.user.tenant_id)
result = [
{
'display_name':
quotas.QUOTA_NAMES.get(
quota.name,
quota.name.replace("_", " ").title()
) + '',
'name': quota.name,
'limit': quota.limit
}
for quota in quota_set]
return {'items': result}
@rest_utils.ajax(data_required=True)
def patch(self, request):
"""Update the values for Cinder specific quotas
This method returns HTTP 204 (no content) on success.
"""
if api.cinder.is_volume_service_enabled(request):
cinder_data = {
key: request.DATA[key] for key in quotas.CINDER_QUOTA_FIELDS
}
api.cinder.default_quota_update(request, **cinder_data)
else:
raise rest_utils.AjaxError(501, _('Service Cinder is disabled.'))
@urls.register
class QuotaSets(generic.View):
"""API for setting quotas for a given project."""
url_regex = r'cinder/quota-sets/(?P<project_id>[0-9a-f]+)$'
@rest_utils.ajax(data_required=True)
def patch(self, request, project_id):
"""Update a single project quota data.
The PATCH data should be an application/json object with the
attributes to set to new quota values.
This method returns HTTP 204 (no content) on success.
"""
# Filters cinder quota fields
disabled_quotas = quotas.get_disabled_quotas(request)
if api.cinder.is_volume_service_enabled(request):
cinder_data = {
key: request.DATA[key] for key in quotas.CINDER_QUOTA_FIELDS
if key not in disabled_quotas
}
api.cinder.tenant_quota_update(request, project_id, **cinder_data)
else:
raise rest_utils.AjaxError(501, _('Service Cinder is disabled.'))
@urls.register
class AvailabilityZones(generic.View):
"""API for cinder availability zones."""
url_regex = r'cinder/availzones/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of availability zones.
The following get parameters may be passed in the GET
request:
:param detailed: If this equals "true" then the result will
include more detail.
The listing result is an object with property "items".
"""
detailed = request.GET.get('detailed') == 'true'
result = api.cinder.availability_zone_list(request, detailed)
return {'items': [u.to_dict() for u in result]}
|
{
"content_hash": "a37bd1b3a67c4dd55148a8d8f669d392",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 78,
"avg_line_length": 32.70754716981132,
"alnum_prop": 0.5887655033169887,
"repo_name": "openstack/horizon",
"id": "b3e36457f17e838d5f19c2c021043d81ef91f5d1",
"size": "14440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/rest/cinder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "583449"
},
{
"name": "JavaScript",
"bytes": "2585531"
},
{
"name": "Python",
"bytes": "5370605"
},
{
"name": "SCSS",
"bytes": "133237"
},
{
"name": "Shell",
"bytes": "6526"
}
],
"symlink_target": ""
}
|
import re
# GTK Imports
import gtk
# EVOGTK imports
from evogtk.gui import GUIClass
# Plugin class
class Plugin(GUIClass):
"""
LOTROAssist money count plugin class
"""
metadata={
'PLUGIN_NAME': 'Money Count',
'PLUGIN_CODENAME': 'lootbag',
'PLUGIN_VERSION': '0.1',
'PLUGIN_DESC': 'Lord Of The Rings Online Assistant plugin for money counting',
'PLUGIN_COPYRIGHT': '(C) 2010 Oliver Gutiérrez <ogutsua@gmail.com>',
'PLUGIN_WEBSITE': 'http://www.evosistemas.com',
'PLUGIN_DOCK': 'status',
}
def initialize(self):
"""
Initialization function
"""
self.regexp=re.compile(r'You( sold \d+ items for | looted | received |\'ve earned |r share was )((?P<gold>\d+) gold coin(s)*( and )*)*((?P<silver>\d+) silver piece(s)*( and )*)*((?P<copper>\d+) copper coin(s)*)*( for quest completion| in the mail)*\.$')
# TODO: lotroassist: purchasing
# You purchased 43 Travelling Rations for 68 silver pieces and 80 copper coins.
def clearMoney(self,widget):
"""
Clear money
"""
self.ui.lblCopperCoins=0
self.ui.lblSilverCoins=0
self.ui.lblGoldCoins=0
def newLine(self,line):
"""
New line analysing function
"""
# Analyze log line
resp=self.regexp.search(line)
if resp:
# Set values
carry=0
copper=resp.group('copper')
silver=resp.group('silver')
gold=resp.group('gold')
if not copper:
copper=0
if not silver:
silver=0
if not gold:
gold=0
# Calculate copper coins
val=int(self.ui.lblCopperCoins)+int(copper)
if val >= 100:
carry=1
val=val%100
self.ui.lblCopperCoins=val
# Calculate silver coins
silver=int(silver)+carry
carry=0
val=int(self.ui.lblSilverCoins)+silver
if val >= 1000:
carry=1
val=val%1000
self.ui.lblSilverCoins=val
# Calculate gold coins
gold=int(gold)+carry
self.ui.lblGoldCoins=int(self.ui.lblGoldCoins)+gold
return True
|
{
"content_hash": "8ad7db9274f394a6dc6c31f6dfa590bd",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 261,
"avg_line_length": 31.72972972972973,
"alnum_prop": 0.5340715502555367,
"repo_name": "olivergs/lotroassist",
"id": "cd1faf4fb979dcfd370bc7c60e3cf1f167179e10",
"size": "2633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/plugins/moneycount/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70200"
}
],
"symlink_target": ""
}
|
from os import listdir
from os.path import isfile, join
import argparse
import re
from PIL import Image
import pystache
def process_image(filename):
image = Image.open(args.folder[0] + '/' + filename)
data = list(image.getdata())
return filename.split('.')[0].replace('-', '_'), data
def generate_data(data):
data_array = []
for a in data:
data_array.append(hex(a[3]))
return ', '.join(data_array)
def generate_file(template, name, data):
# print("Writing to output file: " + name)
template_file = open(template, 'r')
template = template_file.read()
template_file.close()
output_data = pystache.render(template, data)
output_file = open(name, 'w')
output_file.write(output_data)
output_file.close()
# Setup arguments
parser = argparse.ArgumentParser(description='Process open-iconic png files to c sources')
parser.add_argument('--scale', nargs=1, type=int, default=1,
help='icon scale, either 1, 2, 3, 4, 6, 8')
parser.add_argument('--folder', nargs=1, default=['./png'],
help='folder to look for icons')
parser.add_argument('--template', nargs=1, default=['icon-template.h'],
help='mustache template to fill')
parser.add_argument('--output', nargs=1, default=['icons.h'],
help='output file')
parser.add_argument('--debug',
help='debug mode')
args = parser.parse_args()
# Create filter for scale setting
if args.scale[0] == 1:
filter = re.compile('^([a-z\-]+).png')
else:
filter = re.compile('^([a-z\-]+)-' + str(args.scale) + 'x.png')
# Parse folder
files = [f for f in listdir(args.folder[0]) if isfile(join(args.folder[0], f))]
# Filter files based on scale
filtered_files = [f for f in files if filter.match(f)]
file_data = {}
file_data['template'] = args.template[0]
file_data['scale'] = args.scale[0]
file_data['icons'] = []
for f in filtered_files:
name, data = process_image(f)
icon = {}
icon['name'] = name
icon['data'] = generate_data(data)
icon['size'] = len(data)
file_data['icons'].append(icon)
generate_file(args.template[0], args.output[0], file_data)
|
{
"content_hash": "2f5250f02a8b3dc07d45fec72d0d0f1e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 90,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.6471144749290445,
"repo_name": "ryankurte/micro-gui",
"id": "c93952b04d8323577ba49e7b007ad5e0b2d15437",
"size": "2138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/build-iconic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9822996"
},
{
"name": "C++",
"bytes": "126"
},
{
"name": "CMake",
"bytes": "8246"
},
{
"name": "Makefile",
"bytes": "3372"
},
{
"name": "Python",
"bytes": "20351"
},
{
"name": "Shell",
"bytes": "204"
}
],
"symlink_target": ""
}
|
"""Definitions for the `Gaussian` class."""
import numpy as np
from scipy.special import erfinv
from mosfit.modules.parameters.parameter import Parameter
# Important: Only define one ``Module`` class per file.
class Gaussian(Parameter):
"""Parameter with Gaussian prior.
If the parameter must be positive, set the `pos` keyword to True.
"""
def __init__(self, **kwargs):
"""Initialize module."""
super(Gaussian, self).__init__(**kwargs)
self._mu = kwargs.get(self.key('mu'), None)
self._sigma = kwargs.get(self.key('sigma'), None)
if self._log:
self._mu = np.log(self._mu)
self._sigma = np.log(10.0 ** self._sigma)
if not self._mu:
raise ValueError('Need to set a value for mu!')
if not self._sigma:
raise ValueError('Need to set a value for sigma!')
def lnprior_pdf(self, x):
"""Evaluate natural log of probability density function."""
value = self.value(x)
if self._log:
value = np.log(value)
return -(value - self._mu) ** 2 / (2. * self._sigma ** 2)
def prior_icdf(self, u):
"""Evaluate inverse cumulative density function."""
value = (erfinv(2.0 * u - 1.0) * np.sqrt(2.)) * self._sigma + self._mu
value = (value - self._min_value) / (self._max_value - self._min_value)
return np.clip(value, 0.0, 1.0)
|
{
"content_hash": "e8cdcd1f578ebaa7a6855c9c33d4ed2b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 79,
"avg_line_length": 32.29545454545455,
"alnum_prop": 0.5826882477128783,
"repo_name": "mnicholl/MOSFiT",
"id": "d7cabb8202ba2fc51c98b5cce987957ce94d6399",
"size": "1421",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mosfit/modules/parameters/gaussian.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39296"
},
{
"name": "Jupyter Notebook",
"bytes": "2098166"
},
{
"name": "Python",
"bytes": "583236"
},
{
"name": "Shell",
"bytes": "2360"
}
],
"symlink_target": ""
}
|
"""Module for implementing the Federated search."""
from search.common import exceptions
from search.common import utils
from search.plugin import coordinate_search_handler
from search.plugin import geplaces_search_handler
class FederatedSearch(object):
"""Class for performing the Federated search.
We initially submit the search against the CoordinateSearch, stopping
there if any positive results are returned. If not, we issue our search
against the GEPlacesSearch.
If there is a valid response from any of the searches, we use
it. If not, then we can assume that 'location' doesn't exist and so we
present a reasonable 'no results found' back to the caller.
"""
def __init__(self):
"""Inits FederatedSearch.
Initializes the logger "ge_search".
"""
self.utils = utils.SearchUtils()
self.logger = self.utils.logger
# Create coordinate and places search objects
self._coordinate = coordinate_search_handler.CoordinateSearch()
self._geplaces = geplaces_search_handler.PlacesSearch()
# Get Style information from Places or Coordinate search handlers.
self._style = self._geplaces.style
def HandleSearchRequest(self, environ):
"""Fetches the search tokens from form and performs the federated search.
Args:
environ: A list of environment variables as supplied by the
WSGI interface to the federated search application interface.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
response_type: Response type can be KML or JSONP, depending on the client.
"""
search_results = ""
search_status = False
# Fetch all the attributes provided by the user.
parameters = self.utils.GetParameters(environ)
self._geplaces.parameters = parameters
self._coordinate.parameters = parameters
# Retrieve the function call back name for JSONP response.
self.f_callback = self.utils.GetCallback(parameters)
# Fetch additional query parameters 'flyToFirstElement' and
# 'displayKeys' from URL.
self.fly_to_first_element = self.utils.GetValue(
parameters, "flyToFirstElement")
self.display_keys_string = self.utils.GetValue(
parameters, "displayKeys")
response_type = self.utils.GetResponseType(environ)
original_query = self.utils.GetValue(parameters, "q")
if original_query:
(search_status, search_results) = self.DoSearch(
original_query, response_type)
else:
self.logger.debug("Empty search query received")
if not search_status:
folder_name = "No results were returned."
search_results = self.utils.NoSearchResults(
folder_name, self._style, response_type, self.f_callback)
return (search_results, response_type)
def DoSearch(self, original_query, response_type):
"""Performs the federated search and return's the results.
Args:
original_query: search query as entered by the user.
response_type: Response type can be KML or JSONP, depending on the client.
Returns:
tuple containing
search_status: Whether search could be performed.
search_results: A KML/JSONP formatted string which contains search results.
"""
search_status = False
search_results = ""
self._geplaces.f_callback = self.f_callback
self._coordinate.f_callback = self.f_callback
self._geplaces.fly_to_first_element = self.fly_to_first_element
self._geplaces.display_keys_string = self.display_keys_string
self.logger.debug("Performing coordinate search on %s", original_query)
try:
(search_status, search_results) = self._coordinate.DoSearch(
original_query, response_type)
except exceptions.BadQueryException:
# If 'BadQueryException' exception occurs, ignore it
# and proceed with places search.
pass
if not search_status:
self.logger.debug(
"No search results were returned by coordinate search."
"Proceeding with places search...")
(search_status, search_results) = self._geplaces.DoSearch(
original_query, response_type)
if not search_status:
self.logger.debug(
"No search results were returned by coordinate and places search.")
return search_status, search_results
def main():
fedobj = FederatedSearch()
fedobj.DoSearch("santa clara", "KML")
if __name__ == "__main__":
main()
|
{
"content_hash": "2d1170d8d4ad912613df813d5c70a1ba",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 80,
"avg_line_length": 33.76335877862596,
"alnum_prop": 0.7056296631245761,
"repo_name": "tst-ppenev/earthenterprise",
"id": "89e10d6561ba33c167989223ca83943cc412fef2",
"size": "5020",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "earth_enterprise/src/server/wsgi/search/plugin/federated_search_handler.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "134506"
},
{
"name": "C++",
"bytes": "11698506"
},
{
"name": "CSS",
"bytes": "370735"
},
{
"name": "Groovy",
"bytes": "29553"
},
{
"name": "HTML",
"bytes": "3304217"
},
{
"name": "Java",
"bytes": "9028"
},
{
"name": "JavaScript",
"bytes": "106980308"
},
{
"name": "Makefile",
"bytes": "3425"
},
{
"name": "PLpgSQL",
"bytes": "13426"
},
{
"name": "Perl",
"bytes": "396774"
},
{
"name": "Perl 6",
"bytes": "6715"
},
{
"name": "Prolog",
"bytes": "1423"
},
{
"name": "Python",
"bytes": "3712495"
},
{
"name": "QMake",
"bytes": "5293"
},
{
"name": "Roff",
"bytes": "31717"
},
{
"name": "Shell",
"bytes": "236086"
},
{
"name": "TeX",
"bytes": "70"
}
],
"symlink_target": ""
}
|
import numpy as np
class SpectralSub(object):
r"""
Here we have a class for performing **single channel** noise reduction via
spectral subtraction. The instantaneous signal energy and noise floor is
estimated at each time instance (for each frequency bin) and this is used
to compute a gain filter with which to perform spectral subtraction.
For a given frame `n`, the gain for frequency bin `k` is given by:
.. math::
G[k, n] = \max \\left \{ \\left ( \dfrac{P[k, n]-\\beta P_N[k, n]}{P[k, n]} \\right )^\\alpha, G_{min} \\right \},
where :math:`G_{min} = 10^{-(db\_reduc/20)}` and :math:`db\_reduc` is the
maximum reduction (in dB) that we are willing to perform for each bin (a
high value can actually be detrimental, see below). The instantaneous
energy :math:`P[k,n]` is computed by simply squaring the frequency
amplitude at the bin `k`. The time-frequency decomposition of the input
signal is typically done with the STFT and overlapping frames. The noise
estimate :math:`P_N[k, n]` for frequency bin `k` is given by looking back a
certain number of frames :math:`L` and selecting the bin with the lowest
energy:
.. math::
P_N[k, n] = \min_{[n-L, n]} P[k, n]
This approach works best when the SNR is positive and the noise is rather
stationary. An alternative approach for the noise estimate (also in the
case of stationary noise) would be to apply a lowpass filter for each
frequency bin.
With a large suppression, i.e. large values for :math:`db\_reduc`, we can
observe a typical artefact of such spectral subtraction approaches, namely
"musical noise".
`Here <https://www.vocal.com/noise-reduction/musical-noise/>`_ is nice
article about noise reduction and musical noise.
Adjusting the constants :math:`\\beta` and :math:`\\alpha` also presents a
trade-off between suppression and undesirable artefacts, i.e. more
noticeable musical noise.
Below is an example of how to use this class to emulate a streaming/online
input. A full example can be found
`here <https://github.com/LCAV/pyroomacoustics/blob/master/examples/noise_reduction_spectral_subtraction.py>`__.
::
# initialize STFT and SpectralSub objects
nfft = 512
stft = pra.transform.STFT(nfft, hop=nfft//2,
analysis_window=pra.hann(nfft))
scnr = pra.denoise.SpectralSub(nfft, db_reduc=10, lookback=5,
beta=20, alpha=3)
# apply block-by-block
for n in range(num_blocks):
# go to frequency domain for noise reduction
stft.analysis(mono_noisy)
gain_filt = scnr.compute_gain_filter(stft.X)
# estimating input convolved with unknown response
mono_denoised = stft.synthesis(gain_filt*stft.X)
There also exists a "one-shot" function.
::
# import or create `noisy_signal`
denoised_signal = apply_spectral_sub(noisy_signal, nfft=512,
db_reduc=10, lookback=5,
beta=20, alpha=3)
Parameters
----------
nfft: int
FFT size. Length of gain filter, i.e. the number of frequency bins, is
given by ``nfft//2+1``.
db_reduc: float
Maximum reduction in dB for each bin.
lookback: int
How many frames to look back for the noise estimate.
beta: float
Overestimation factor to "push" the gain filter value (at each
frequency) closer to the dB reduction specified by ``db_reduc``.
alpha: float, optional
Exponent factor to modify transition behavior towards the dB reduction
specified by ``db_reduc``. Default is 1.
"""
def __init__(self, nfft, db_reduc, lookback, beta, alpha=1):
self.beta = beta
self.alpha = alpha
self.n_bins = nfft // 2 + 1
self.p_prev = np.zeros((self.n_bins, lookback + 1))
self.gmin = 10 ** (-db_reduc / 20)
self.p_sn = np.zeros(self.n_bins)
self.p_n = np.zeros(self.n_bins)
def compute_gain_filter(self, X):
"""
Parameters
----------
X: numpy array
Complex spectrum of length ``nfft//2+1``.
Returns
-------
numpy array
Gain filter to multiply given spectrum with.
"""
# estimate of signal + noise at current time
self.p_sn[:] = np.real(np.conj(X) * X)
# estimate of noise level
self.p_prev[:, -1] = self.p_sn
self.p_n[:] = np.min(self.p_prev, axis=1)
# compute gain filter
gain_filter = [
max(
(max(self.p_sn[k] - self.beta * self.p_n[k], 0) / self.p_sn[k])
** self.alpha,
self.gmin,
)
for k in range(self.n_bins)
]
# update
self.p_prev = np.roll(self.p_prev, -1, axis=1)
return gain_filter
def apply_spectral_sub(
noisy_signal, nfft=512, db_reduc=25, lookback=12, beta=30, alpha=1
):
"""
One-shot function to apply spectral subtraction approach.
Parameters
----------
noisy_signal : numpy array
Real signal in time domain.
nfft: int
FFT size. Length of gain filter, i.e. the number of frequency bins, is
given by ``nfft//2+1``.
db_reduc: float
Maximum reduction in dB for each bin.
lookback: int
How many frames to look back for the noise estimate.
beta: float
Overestimation factor to "push" the gain filter value (at each
frequency) closer to the dB reduction specified by ``db_reduc``.
alpha: float, optional
Exponent factor to modify transition behavior towards the dB reduction
specified by ``db_reduc``. Default is 1.
Returns
-------
numpy array
Enhanced/denoised signal.
"""
from pyroomacoustics import hann
from pyroomacoustics.transform import STFT
hop = nfft // 2
window = hann(nfft, flag="asymmetric", length="full")
stft = STFT(nfft, hop=hop, analysis_window=window, streaming=True)
scnr = SpectralSub(nfft, db_reduc, lookback, beta, alpha)
processed_audio = np.zeros(noisy_signal.shape)
n = 0
while noisy_signal.shape[0] - n >= hop:
# SCNR in frequency domain
stft.analysis(
noisy_signal[
n : (n + hop),
]
)
gain_filt = scnr.compute_gain_filter(stft.X)
# back to time domain
processed_audio[
n : n + hop,
] = stft.synthesis(gain_filt * stft.X)
# update step
n += hop
return processed_audio
|
{
"content_hash": "7e4cd66ed475409bdf0ebf5a86baf655",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 122,
"avg_line_length": 33.41871921182266,
"alnum_prop": 0.5994988207547169,
"repo_name": "LCAV/pyroomacoustics",
"id": "c4b3bfd165b12d53cc5a064b35985afd5859f269",
"size": "8083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyroomacoustics/denoise/spectral_subtraction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "96552"
},
{
"name": "Cython",
"bytes": "2700"
},
{
"name": "Dockerfile",
"bytes": "735"
},
{
"name": "Python",
"bytes": "941773"
}
],
"symlink_target": ""
}
|
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import socket
from oslo_config import cfg
from cinder.i18n import _
CONF = cfg.CONF
def _get_my_ip():
"""
Returns the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, _port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
core_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for cinder-api'),
cfg.StrOpt('state_path',
default='/var/lib/cinder',
deprecated_name='pybasedir',
help="Top-level directory for maintaining cinder's state"), ]
debug_opts = [
]
CONF.register_cli_opts(core_opts)
CONF.register_cli_opts(debug_opts)
global_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='IP address of this host'),
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance host name or IP'),
cfg.IntOpt('glance_port',
default=9292,
help='Default glance port'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance API servers available to cinder '
'([hostname|ip]:port)'),
cfg.IntOpt('glance_api_version',
default=1,
help='Version of the glance API to use'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.BoolOpt('glance_api_ssl_compression',
default=False,
help='Enables or disables negotiation of SSL layer '
'compression. In some cases disabling compression '
'can improve data throughput, such as when high '
'network bandwidth is available and you use '
'compressed image formats like qcow2.'),
cfg.StrOpt('glance_ca_certificates_file',
help='Location of ca certificates file to use for glance '
'client requests.'),
cfg.IntOpt('glance_request_timeout',
default=None,
help='http/https timeout value for glance operations. If no '
'value (None) is supplied here, the glanceclient default '
'value is used.'),
cfg.StrOpt('scheduler_topic',
default='cinder-scheduler',
help='The topic that scheduler nodes listen on'),
cfg.StrOpt('volume_topic',
default='cinder-volume',
help='The topic that volume nodes listen on'),
cfg.StrOpt('backup_topic',
default='cinder-backup',
help='The topic that volume backup nodes listen on'),
cfg.BoolOpt('enable_v1_api',
default=True,
help=_("DEPRECATED: Deploy v1 of the Cinder API.")),
cfg.BoolOpt('enable_v2_api',
default=True,
help=_("Deploy v2 of the Cinder API.")),
cfg.BoolOpt('api_rate_limit',
default=True,
help='Enables or disables rate limit of the API.'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with cinder.api.contrib.'
'select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=['cinder.api.contrib.standard_extensions'],
help='osapi volume extension to load'),
cfg.StrOpt('volume_manager',
default='cinder.volume.manager.VolumeManager',
help='Full class name for the Manager for volume'),
cfg.StrOpt('backup_manager',
default='cinder.backup.manager.BackupManager',
help='Full class name for the Manager for volume backup'),
cfg.StrOpt('scheduler_manager',
default='cinder.scheduler.manager.SchedulerManager',
help='Full class name for the Manager for scheduler'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a host name, FQDN, or IP address.'),
# NOTE(vish): default to nova for compatibility with nova installs
cfg.StrOpt('storage_availability_zone',
default='nova',
help='Availability zone of this node'),
cfg.StrOpt('default_availability_zone',
default=None,
help='Default availability zone for new volumes. If not set, '
'the storage_availability_zone option value is used as '
'the default for new volumes.'),
cfg.StrOpt('default_volume_type',
default=None,
help='Default volume type to use'),
cfg.StrOpt('volume_usage_audit_period',
default='month',
help='Time period for which to generate volume usages. '
'The options are hour, day, month, or year.'),
cfg.StrOpt('rootwrap_config',
default='/etc/cinder/rootwrap.conf',
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Enable monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[],
help='List of modules/decorators to monkey patch'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for a service to be '
'considered up'),
cfg.StrOpt('volume_api_class',
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('backup_api_class',
default='cinder.backup.api.API',
help='The full class name of the volume backup API class'),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth. Supports noauth, keystone, '
'and deprecated.'),
cfg.ListOpt('enabled_backends',
default=None,
help='A list of backend names to use. These backend names '
'should be backed by a unique [CONFIG] group '
'with its options'),
cfg.BoolOpt('no_snapshot_gb_quota',
default=False,
help='Whether snapshots count against GigaByte quota'),
cfg.StrOpt('transfer_api_class',
default='cinder.transfer.api.API',
help='The full class name of the volume transfer API class'),
cfg.StrOpt('replication_api_class',
default='cinder.replication.api.API',
help='The full class name of the volume replication API class'),
cfg.StrOpt('consistencygroup_api_class',
default='cinder.consistencygroup.api.API',
help='The full class name of the consistencygroup API class'),
cfg.StrOpt('os_privileged_user_name',
default=None,
help='OpenStack privileged account username. Used for requests '
'to other services (such as Nova) that require an account '
'with special rights.'),
cfg.StrOpt('os_privileged_user_password',
default=None,
help='Password associated with the OpenStack privileged '
'account.'),
cfg.StrOpt('os_privileged_user_tenant',
default=None,
help='Tenant name associated with the OpenStack privileged '
'account.'),
]
CONF.register_opts(global_opts)
|
{
"content_hash": "140a2a583da7ab5adf1b8d6feb10120e",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 79,
"avg_line_length": 42.431372549019606,
"alnum_prop": 0.5724353049907579,
"repo_name": "blueboxgroup/cinder",
"id": "e3761bb285b0ced1cacf26151962f83f92a49f98",
"size": "9446",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/common/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10024269"
},
{
"name": "Shell",
"bytes": "9905"
}
],
"symlink_target": ""
}
|
import decimal
from .._gae import ndb
# TODO Needs more testing
class NDBDecimalProperty(ndb.StringProperty):
"""
NDB decimal implementation
"""
data_type = decimal.Decimal
def __init__(self, precision, scale, **kwargs):
d = "1."
for x in range(scale):
d += "0"
self.round = decimal.Decimal(d)
def _to_base_type(self, value):
if value is None or value == "":
return None
else:
return str(value)
def _from_base_type(self, value):
if value is None or value == "":
return None
else:
return decimal.Decimal(value).quantize(self.round)
def _validate(self, value):
if value is None or isinstance(value, decimal.Decimal):
return value
elif isinstance(value, basestring):
return decimal.Decimal(value)
raise TypeError("Property %s must be a Decimal or string." % self._name)
|
{
"content_hash": "aef59a32eca9c3a957a32b43e5c47316",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 80,
"avg_line_length": 26.243243243243242,
"alnum_prop": 0.5787847579814624,
"repo_name": "web2py/pydal",
"id": "e22f2a9138bce767843062626aa19a10067ebec8",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydal/helpers/gae.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "440"
},
{
"name": "Python",
"bytes": "1348608"
}
],
"symlink_target": ""
}
|
import copy
from logging import config as logging_config
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import environment
from alembic import migration as alembic_migration
from alembic import script as alembic_script
from alembic import util as alembic_util
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import importutils
import pkg_resources
import six
from neutron._i18n import _
from neutron.common import utils
from neutron.db import migration
from neutron.db.migration.connection import DBConnection
HEAD_FILENAME = 'HEAD'
HEADS_FILENAME = 'HEADS'
CONTRACT_HEAD_FILENAME = 'CONTRACT_HEAD'
EXPAND_HEAD_FILENAME = 'EXPAND_HEAD'
CURRENT_RELEASE = migration.NEWTON
RELEASES = (
migration.LIBERTY,
migration.MITAKA,
migration.NEWTON,
)
EXPAND_BRANCH = 'expand'
CONTRACT_BRANCH = 'contract'
MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH)
MIGRATION_ENTRYPOINTS = 'neutron.db.alembic_migrations'
migration_entrypoints = {
entrypoint.name: entrypoint
for entrypoint in pkg_resources.iter_entry_points(MIGRATION_ENTRYPOINTS)
}
neutron_alembic_ini = os.path.join(os.path.dirname(__file__), 'alembic.ini')
INSTALLED_SUBPROJECTS = [project_ for project_ in migration_entrypoints]
_core_opts = [
cfg.StrOpt('subproject',
choices=INSTALLED_SUBPROJECTS,
help=(_("The subproject to execute the command against. "
"Can be one of: '%s'.")
% "', '".join(INSTALLED_SUBPROJECTS))),
cfg.BoolOpt('split_branches',
default=True,
deprecated_for_removal=True,
help=_("DEPRECATED. Alembic environments integrating with "
"Neutron must implement split (contract and expand) "
"branches file structure."))
]
_db_opts = [
cfg.StrOpt('connection',
default='',
secret=True,
help=_('URL to database')),
cfg.StrOpt('engine',
default='',
help=_('Database engine for which script will be generated '
'when using offline migration.')),
]
CONF = cfg.ConfigOpts()
CONF.register_cli_opts(_core_opts)
CONF.register_cli_opts(_db_opts, 'database')
def do_alembic_command(config, cmd, revision=None, desc=None, **kwargs):
args = []
if revision:
args.append(revision)
project = config.get_main_option('neutron_project')
if desc:
alembic_util.msg(_('Running %(cmd)s (%(desc)s) for %(project)s ...') %
{'cmd': cmd, 'desc': desc, 'project': project})
else:
alembic_util.msg(_('Running %(cmd)s for %(project)s ...') %
{'cmd': cmd, 'project': project})
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(six.text_type(e))
alembic_util.msg(_('OK'))
def _get_alembic_entrypoint(project):
if project not in migration_entrypoints:
alembic_util.err(_('Sub-project %s not installed.') % project)
return migration_entrypoints[project]
def do_generic_show(config, cmd):
kwargs = {'verbose': CONF.command.verbose}
do_alembic_command(config, cmd, **kwargs)
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
validate_revisions(config)
validate_head_files(config)
def add_alembic_subparser(sub, cmd):
return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__)
def add_branch_options(parser):
group = parser.add_mutually_exclusive_group()
group.add_argument('--expand', action='store_true')
group.add_argument('--contract', action='store_true')
return group
def _find_milestone_revisions(config, milestone, branch=None):
"""Return the revision(s) for a given milestone."""
script = alembic_script.ScriptDirectory.from_config(config)
return [
(m.revision, label)
for m in _get_revisions(script)
for label in (m.branch_labels or [None])
if milestone in getattr(m.module, 'neutron_milestone', []) and
(branch is None or branch in m.branch_labels)
]
def do_upgrade(config, cmd):
branch = None
if ((CONF.command.revision or CONF.command.delta) and
(CONF.command.expand or CONF.command.contract)):
raise SystemExit(_(
'Phase upgrade options do not accept revision specification'))
if CONF.command.expand:
branch = EXPAND_BRANCH
revision = _get_branch_head(EXPAND_BRANCH)
elif CONF.command.contract:
branch = CONTRACT_BRANCH
revision = _get_branch_head(CONTRACT_BRANCH)
elif not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
else:
revision = CONF.command.revision or ''
if '-' in revision:
raise SystemExit(_('Negative relative revision (downgrade) not '
'supported'))
delta = CONF.command.delta
if delta:
if '+' in revision:
raise SystemExit(_('Use either --delta or relative revision, '
'not both'))
if delta < 0:
raise SystemExit(_('Negative delta (downgrade) not supported'))
revision = '%s+%d' % (revision, delta)
# leave branchless 'head' revision request backward compatible by
# applying all heads in all available branches.
if revision == 'head':
revision = 'heads'
if revision in migration.NEUTRON_MILESTONES:
expand_revisions = _find_milestone_revisions(config, revision,
EXPAND_BRANCH)
contract_revisions = _find_milestone_revisions(config, revision,
CONTRACT_BRANCH)
# Expand revisions must be run before contract revisions
revisions = expand_revisions + contract_revisions
else:
revisions = [(revision, branch)]
for revision, branch in revisions:
if not CONF.command.sql:
run_sanity_checks(config, revision)
do_alembic_command(config, cmd, revision=revision,
desc=branch, sql=CONF.command.sql)
def no_downgrade(config, cmd):
raise SystemExit(_("Downgrade no longer supported"))
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
revision=CONF.command.revision,
sql=CONF.command.sql)
def _get_branch_head(branch):
'''Get the latest @head specification for a branch.'''
return '%s@head' % branch
def _check_bootstrap_new_branch(branch, version_path, addn_kwargs):
addn_kwargs['version_path'] = version_path
addn_kwargs['head'] = _get_branch_head(branch)
if not os.path.exists(version_path):
# Bootstrap initial directory structure
utils.ensure_dir(version_path)
def do_revision(config, cmd):
kwargs = {
'message': CONF.command.message,
'autogenerate': CONF.command.autogenerate,
'sql': CONF.command.sql,
}
branches = []
if CONF.command.expand:
kwargs['head'] = 'expand@head'
branches.append(EXPAND_BRANCH)
elif CONF.command.contract:
kwargs['head'] = 'contract@head'
branches.append(CONTRACT_BRANCH)
else:
branches = MIGRATION_BRANCHES
if not CONF.command.autogenerate:
for branch in branches:
args = copy.copy(kwargs)
version_path = _get_version_branch_path(
config, release=CURRENT_RELEASE, branch=branch)
_check_bootstrap_new_branch(branch, version_path, args)
do_alembic_command(config, cmd, **args)
else:
# autogeneration code will take care of enforcing proper directories
do_alembic_command(config, cmd, **kwargs)
update_head_files(config)
def _get_release_labels(labels):
result = set()
for label in labels:
# release labels were introduced Liberty for a short time and dropped
# in that same release cycle
result.add('%s_%s' % (migration.LIBERTY, label))
return result
def _compare_labels(revision, expected_labels):
# validate that the script has expected labels only
bad_labels = revision.branch_labels - expected_labels
if bad_labels:
# NOTE(ihrachyshka): this hack is temporary to accommodate those
# projects that already initialized their branches with liberty_*
# labels. Let's notify them about the deprecation for now and drop it
# later.
bad_labels_with_release = (revision.branch_labels -
_get_release_labels(expected_labels))
if not bad_labels_with_release:
alembic_util.warn(
_('Release aware branch labels (%s) are deprecated. '
'Please switch to expand@ and contract@ '
'labels.') % bad_labels)
return
script_name = os.path.basename(revision.path)
alembic_util.err(
_('Unexpected label for script %(script_name)s: %(labels)s') %
{'script_name': script_name,
'labels': bad_labels}
)
def _validate_single_revision_labels(script_dir, revision, label=None):
expected_labels = set()
if label is not None:
expected_labels.add(label)
_compare_labels(revision, expected_labels)
# if it's not the root element of the branch, expect the parent of the
# script to have the same label
if revision.down_revision is not None:
down_revision = script_dir.get_revision(revision.down_revision)
_compare_labels(down_revision, expected_labels)
def _validate_revision(script_dir, revision):
for branch in MIGRATION_BRANCHES:
if branch in revision.path:
_validate_single_revision_labels(
script_dir, revision, label=branch)
return
# validate script from branchless part of migration rules
_validate_single_revision_labels(script_dir, revision)
def validate_revisions(config):
script_dir = alembic_script.ScriptDirectory.from_config(config)
revisions = _get_revisions(script_dir)
for revision in revisions:
_validate_revision(script_dir, revision)
branchpoints = _get_branch_points(script_dir)
if len(branchpoints) > 1:
branchpoints = ', '.join(p.revision for p in branchpoints)
alembic_util.err(
_('Unexpected number of alembic branch points: %(branchpoints)s') %
{'branchpoints': branchpoints}
)
def _get_revisions(script):
return list(script.walk_revisions(base='base', head='heads'))
def _get_branch_points(script):
branchpoints = []
for revision in _get_revisions(script):
if revision.is_branch_point:
branchpoints.append(revision)
return branchpoints
def _get_heads_map(config):
script = alembic_script.ScriptDirectory.from_config(config)
heads = script.get_heads()
head_map = {}
for head in heads:
if CONTRACT_BRANCH in script.get_revision(head).branch_labels:
head_map[CONTRACT_BRANCH] = head
else:
head_map[EXPAND_BRANCH] = head
return head_map
def _check_head(branch_name, head_file, head):
try:
with open(head_file) as file_:
observed_head = file_.read().strip()
except IOError:
pass
else:
if observed_head != head:
alembic_util.err(
_('%(branch)s HEAD file does not match migration timeline '
'head, expected: %(head)s') % {'branch': branch_name.title(),
'head': head})
def validate_head_files(config):
'''Check that HEAD files contain the latest head for the branch.'''
contract_head = _get_contract_head_file_path(config)
expand_head = _get_expand_head_file_path(config)
if not os.path.exists(contract_head) or not os.path.exists(expand_head):
alembic_util.warn(_("Repository does not contain HEAD files for "
"contract and expand branches."))
return
head_map = _get_heads_map(config)
_check_head(CONTRACT_BRANCH, contract_head, head_map[CONTRACT_BRANCH])
_check_head(EXPAND_BRANCH, expand_head, head_map[EXPAND_BRANCH])
def update_head_files(config):
'''Update HEAD files with the latest branch heads.'''
head_map = _get_heads_map(config)
contract_head = _get_contract_head_file_path(config)
expand_head = _get_expand_head_file_path(config)
with open(contract_head, 'w+') as f:
f.write(head_map[CONTRACT_BRANCH] + '\n')
with open(expand_head, 'w+') as f:
f.write(head_map[EXPAND_BRANCH] + '\n')
old_head_file = _get_head_file_path(config)
old_heads_file = _get_heads_file_path(config)
for file_ in (old_head_file, old_heads_file):
fileutils.delete_if_exists(file_)
def _get_current_database_heads(config):
with DBConnection(config.neutron_config.database.connection) as conn:
opts = {
'version_table': get_alembic_version_table(config)
}
context = alembic_migration.MigrationContext.configure(
conn, opts=opts)
return context.get_current_heads()
def has_offline_migrations(config, cmd):
heads_map = _get_heads_map(config)
if heads_map[CONTRACT_BRANCH] not in _get_current_database_heads(config):
# If there is at least one contract revision not applied to database,
# it means we should shut down all neutron-server instances before
# proceeding with upgrade.
project = config.get_main_option('neutron_project')
alembic_util.msg(_('Need to apply migrations from %(project)s '
'contract branch. This will require all Neutron '
'server instances to be shutdown before '
'proceeding with the upgrade.') %
{"project": project})
return True
return False
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches', 'heads']:
parser = add_alembic_subparser(subparsers, name)
parser.set_defaults(func=do_generic_show)
parser.add_argument('--verbose',
action='store_true',
help='Display more verbose output for the '
'specified command')
help_text = (getattr(alembic_command, 'branches').__doc__ +
' and validate head file')
parser = subparsers.add_parser('check_migration', help=help_text)
parser.set_defaults(func=do_check_migration)
parser = add_alembic_subparser(subparsers, 'upgrade')
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.add_argument('--mysql-engine',
default='',
help='Change MySQL storage engine of current '
'existing tables')
add_branch_options(parser)
parser.set_defaults(func=do_upgrade)
parser = subparsers.add_parser('downgrade', help="(No longer supported)")
parser.add_argument('None', nargs='?', help="Downgrade not supported")
parser.set_defaults(func=no_downgrade)
parser = add_alembic_subparser(subparsers, 'stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = add_alembic_subparser(subparsers, 'revision')
parser.add_argument('-m', '--message')
parser.add_argument('--sql', action='store_true')
group = add_branch_options(parser)
group.add_argument('--autogenerate', action='store_true')
parser.set_defaults(func=do_revision)
parser = subparsers.add_parser(
'has_offline_migrations',
help='Determine whether there are pending migration scripts that '
'require full shutdown for all services that directly access '
'database.')
parser.set_defaults(func=has_offline_migrations)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def _get_project_base(config):
'''Return the base python namespace name for a project.'''
script_location = config.get_main_option('script_location')
return script_location.split(':')[0].split('.')[0]
def _get_package_root_dir(config):
root_module = importutils.try_import(_get_project_base(config))
if not root_module:
project = config.get_main_option('neutron_project')
alembic_util.err(_("Failed to locate source for %s.") % project)
# The root_module.__file__ property is a path like
# '/opt/stack/networking-foo/networking_foo/__init__.py'
# We return just
# '/opt/stack/networking-foo'
return os.path.dirname(os.path.dirname(root_module.__file__))
def _get_root_versions_dir(config):
'''Return root directory that contains all migration rules.'''
root_dir = _get_package_root_dir(config)
script_location = config.get_main_option('script_location')
# Script location is something like:
# 'project_base.db.migration:alembic_migrations'
# Convert it to:
# 'project_base/db/migration/alembic_migrations/versions'
part1, part2 = script_location.split(':')
parts = part1.split('.') + part2.split('.') + ['versions']
# Return the absolute path to the versions dir
return os.path.join(root_dir, *parts)
def _get_head_file_path(config):
'''Return the path of the file that contains single head.'''
return os.path.join(
_get_root_versions_dir(config),
HEAD_FILENAME)
def _get_heads_file_path(config):
'''
Return the path of the file that was once used to maintain the list of
latest heads.
'''
return os.path.join(
_get_root_versions_dir(config),
HEADS_FILENAME)
def _get_contract_head_file_path(config):
'''
Return the path of the file that is used to maintain contract head
'''
return os.path.join(
_get_root_versions_dir(config),
CONTRACT_HEAD_FILENAME)
def _get_expand_head_file_path(config):
'''
Return the path of the file that is used to maintain expand head
'''
return os.path.join(
_get_root_versions_dir(config),
EXPAND_HEAD_FILENAME)
def _get_version_branch_path(config, release=None, branch=None):
version_path = _get_root_versions_dir(config)
if branch and release:
return os.path.join(version_path, release, branch)
return version_path
def _set_version_locations(config):
'''Make alembic see all revisions in all migration branches.'''
split_branches = False
version_paths = [_get_version_branch_path(config)]
for release in RELEASES:
for branch in MIGRATION_BRANCHES:
version_path = _get_version_branch_path(config, release, branch)
if split_branches or os.path.exists(version_path):
split_branches = True
version_paths.append(version_path)
config.set_main_option('version_locations', ' '.join(version_paths))
def _get_installed_entrypoint(subproject):
'''Get the entrypoint for the subproject, which must be installed.'''
if subproject not in migration_entrypoints:
alembic_util.err(_('Package %s not installed') % subproject)
return migration_entrypoints[subproject]
def _get_subproject_script_location(subproject):
'''Get the script location for the installed subproject.'''
entrypoint = _get_installed_entrypoint(subproject)
return ':'.join([entrypoint.module_name, entrypoint.attrs[0]])
def _get_subproject_base(subproject):
'''Get the import base name for the installed subproject.'''
entrypoint = _get_installed_entrypoint(subproject)
return entrypoint.module_name.split('.')[0]
def get_alembic_version_table(config):
script_dir = alembic_script.ScriptDirectory.from_config(config)
alembic_version_table = [None]
def alembic_version_table_from_env(rev, context):
alembic_version_table[0] = context.version_table
return []
with environment.EnvironmentContext(config, script_dir,
fn=alembic_version_table_from_env):
script_dir.run_env()
return alembic_version_table[0]
def get_alembic_configs():
'''Return a list of alembic configs, one per project.
'''
# Get the script locations for the specified or installed projects.
# Which projects to get script locations for is determined by the CLI
# options as follows:
# --subproject P # only subproject P (where P can be neutron)
# (none specified) # neutron and all installed subprojects
script_locations = {}
if CONF.subproject:
script_location = _get_subproject_script_location(CONF.subproject)
script_locations[CONF.subproject] = script_location
else:
for subproject, ep in migration_entrypoints.items():
script_locations[subproject] = _get_subproject_script_location(
subproject)
# Return a list of alembic configs from the projects in the
# script_locations dict. If neutron is in the list it is first.
configs = []
project_seq = sorted(script_locations.keys())
# Core neutron must be the first project if there is more than one
if len(project_seq) > 1 and 'neutron' in project_seq:
project_seq.insert(0, project_seq.pop(project_seq.index('neutron')))
for project in project_seq:
config = alembic_config.Config(neutron_alembic_ini)
config.set_main_option('neutron_project', project)
script_location = script_locations[project]
config.set_main_option('script_location', script_location)
_set_version_locations(config)
config.neutron_config = CONF
configs.append(config)
return configs
def get_neutron_config():
# Neutron's alembic config is always the first one
return get_alembic_configs()[0]
def run_sanity_checks(config, revision):
script_dir = alembic_script.ScriptDirectory.from_config(config)
def check_sanity(rev, context):
# TODO(ihrachyshka): here we use internal API for alembic; we may need
# alembic to expose implicit_base= argument into public
# iterate_revisions() call
for script in script_dir.revision_map.iterate_revisions(
revision, rev, implicit_base=True):
if hasattr(script.module, 'check_sanity'):
script.module.check_sanity(context.connection)
return []
with environment.EnvironmentContext(config, script_dir,
fn=check_sanity,
starting_rev=None,
destination_rev=revision):
script_dir.run_env()
def get_engine_config():
return [obj for obj in _db_opts if obj.name == 'engine']
def main():
# Interpret the config file for Python logging.
# This line sets up loggers basically.
logging_config.fileConfig(neutron_alembic_ini)
CONF(project='neutron')
return_val = False
for config in get_alembic_configs():
#TODO(gongysh) enable logging
return_val |= bool(CONF.command.func(config, CONF.command.name))
if CONF.command.name == 'has_offline_migrations' and not return_val:
alembic_util.msg(_('No offline migrations pending.'))
return return_val
|
{
"content_hash": "39be2ccdbed0f9830ffe831db2a9febd",
"timestamp": "",
"source": "github",
"line_count": 677,
"max_line_length": 79,
"avg_line_length": 35.38552437223043,
"alnum_prop": 0.6365419936550343,
"repo_name": "igor-toga/local-snat",
"id": "2c7090f0daa2a6321546be8ee29f580e753ac58c",
"size": "24583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/db/migration/cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9636936"
},
{
"name": "Shell",
"bytes": "14072"
}
],
"symlink_target": ""
}
|
"""Provide a TestCase base class for PageTest subclasses' unittests."""
import unittest
from telemetry import benchmark
from telemetry import story
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.internal.results import results_options
from telemetry.internal import story_runner
from telemetry.page import page as page_module
from telemetry.page import page_test
from telemetry.page import test_expectations
from telemetry.testing import options_for_unittests
class BasicTestPage(page_module.Page):
def __init__(self, url, story_set, base_dir):
super(BasicTestPage, self).__init__(url, story_set, base_dir)
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(EmptyMetadataForTest, self).__init__('')
class PageTestTestCase(unittest.TestCase):
"""A base class to simplify writing unit tests for PageTest subclasses."""
def CreateStorySetFromFileInUnittestDataDir(self, test_filename):
ps = self.CreateEmptyPageSet()
page = BasicTestPage('file://' + test_filename, ps, base_dir=ps.base_dir)
ps.AddStory(page)
return ps
def CreateEmptyPageSet(self):
base_dir = util.GetUnittestDataDir()
ps = story.StorySet(base_dir=base_dir)
return ps
def RunMeasurement(self, measurement, ps,
expectations=test_expectations.TestExpectations(),
options=None):
"""Runs a measurement against a pageset, returning the rows its outputs."""
if options is None:
options = options_for_unittests.GetCopy()
assert options
temp_parser = options.CreateParser()
story_runner.AddCommandLineArgs(temp_parser)
defaults = temp_parser.get_default_values()
for k, v in defaults.__dict__.items():
if hasattr(options, k):
continue
setattr(options, k, v)
measurement.CustomizeBrowserOptions(options.browser_options)
options.output_file = None
options.output_formats = ['none']
options.suppress_gtest_report = True
options.output_trace_tag = None
story_runner.ProcessCommandLineArgs(temp_parser, options)
results = results_options.CreateResults(EmptyMetadataForTest(), options)
story_runner.Run(measurement, ps, expectations, options, results)
return results
def TestTracingCleanedUp(self, measurement_class, options=None):
ps = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
start_tracing_called = [False]
stop_tracing_called = [False]
class BuggyMeasurement(measurement_class):
def __init__(self, *args, **kwargs):
measurement_class.__init__(self, *args, **kwargs)
# Inject fake tracing methods to tracing_controller
def TabForPage(self, page, browser):
ActualStartTracing = browser.platform.tracing_controller.Start
def FakeStartTracing(*args, **kwargs):
ActualStartTracing(*args, **kwargs)
start_tracing_called[0] = True
raise exceptions.IntentionalException
browser.StartTracing = FakeStartTracing
ActualStopTracing = browser.platform.tracing_controller.Stop
def FakeStopTracing(*args, **kwargs):
result = ActualStopTracing(*args, **kwargs)
stop_tracing_called[0] = True
return result
browser.platform.tracing_controller.Stop = FakeStopTracing
return measurement_class.TabForPage(self, page, browser)
measurement = BuggyMeasurement()
try:
self.RunMeasurement(measurement, ps, options=options)
except page_test.TestNotSupportedOnPlatformError:
pass
if start_tracing_called[0]:
self.assertTrue(stop_tracing_called[0])
|
{
"content_hash": "d6b01ff0109e6eca6003477bcb865c22",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 36.601941747572816,
"alnum_prop": 0.7222811671087533,
"repo_name": "chuan9/chromium-crosswalk",
"id": "f753ed025da5cff5b1d9b2ab2f775dc1cab6dfff",
"size": "3933",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/testing/page_test_test_case.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9417055"
},
{
"name": "C++",
"bytes": "240920124"
},
{
"name": "CSS",
"bytes": "938860"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27258381"
},
{
"name": "Java",
"bytes": "14580273"
},
{
"name": "JavaScript",
"bytes": "20507007"
},
{
"name": "Makefile",
"bytes": "70992"
},
{
"name": "Objective-C",
"bytes": "1742904"
},
{
"name": "Objective-C++",
"bytes": "9967587"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "480579"
},
{
"name": "Python",
"bytes": "8519074"
},
{
"name": "Shell",
"bytes": "482077"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
import sys
import os
def getTTYSize():
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,'1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
def prompt(prefix, echo = True):
import termios
ttystate = None
if sys.stdin.isatty() and not echo:
ttystate = termios.tcgetattr(sys.stdin)
ttynewstate = termios.tcgetattr(sys.stdin)
ttynewstate[3] = ttynewstate[3] & ~termios.ECHO
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, ttynewstate)
result = None
try:
result = raw_input(prefix)
if not echo:
sys.stdout.write('\n') # the non-echoed mode will not emit the newline
except KeyboardInterrupt:
pass
if sys.stdin.isatty() and not echo:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, ttystate)
return result
class TtyState:
def __init__(self):
self.state = None
def __enter__(self):
self.store()
return self
def __exit__(self ,type, value, traceback):
self.restore()
def store(self):
if sys.stdout.isatty():
import hopper.utils.process
result = hopper.utils.process.Process.run([ "stty", "-g" ])
if result[0] == 0:
self.state = result[1].strip()
def restore(self):
if sys.stdout.isatty() and self.state:
import hopper.utils.process
hopper.utils.process.Process.run([ "stty", self.state ])
|
{
"content_hash": "b133a26825e358a148544e82e623caab",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 73,
"avg_line_length": 23.214285714285715,
"alnum_prop": 0.6750769230769231,
"repo_name": "Xilinx/hopper",
"id": "47b207451a79b5dd06bdfabdb6b8f42752f340ae",
"size": "2716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hopper/utils/console/TtyHelper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "150323"
}
],
"symlink_target": ""
}
|
from pypom import Page, Region
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as expected
class Search(Page):
_search_box_locator = (By.CLASS_NAME, 'AutoSearchInput-query')
_submit_button_locator = (By.CLASS_NAME, 'AutoSearchInput-submit-button')
_search_filters_sort_locator = (By.ID, 'SearchFilters-Sort')
_search_filters_type_locator = (By.ID, 'SearchFilters-AddonType')
_search_filters_os_locator = (By.ID, 'SearchFilters-OperatingSystem')
def wait_for_page_to_load(self):
self.wait.until(
expected.invisibility_of_element_located(
(By.CLASS_NAME, 'LoadingText')))
return self
@property
def result_list(self):
return self.SearchResultList(self)
def filter_by_sort(self, value):
self.find_element(*self._search_filters_sort_locator).click()
self.find_element(*self._search_filters_sort_locator).send_keys(value)
def filter_by_type(self, value):
self.find_element(*self._search_filters_type_locator).click()
self.find_element(*self._search_filters_type_locator).send_keys(value)
def filter_by_os(self, value):
self.find_element(*self._search_filters_os_locator).click()
self.find_element(*self._search_filters_os_locator).send_keys(value)
class SearchResultList(Region):
_result_locator = (By.CLASS_NAME, 'SearchResult')
_theme_locator = (By.CLASS_NAME, 'SearchResult--theme')
_extension_locator = (By.CLASS_NAME, 'SearchResult-name')
@property
def extensions(self):
items = self.find_elements(*self._result_locator)
return [self.ResultListItems(self, el) for el in items]
@property
def themes(self):
items = self.find_elements(*self._theme_locator)
return [self.ResultListItems(self, el) for el in items]
class ResultListItems(Region):
_rating_locator = (By.CSS_SELECTOR, '.Rating--small')
_search_item_name_locator = (By.CSS_SELECTOR,
'.SearchResult-contents > h2')
_users_locator = (By.CLASS_NAME, 'SearchResult-users-text')
@property
def name(self):
return self.find_element(*self._search_item_name_locator).text
def link(self):
self.find_element(*self._search_item_name_locator).click()
@property
def users(self):
users = self.find_element(*self._users_locator).text
return int(
users.split()[0].replace(',', '').replace('users', ''))
@property
def rating(self):
"""Returns the rating"""
rating = self.find_element(
*self._rating_locator).get_property('title')
return int(rating.split()[1])
|
{
"content_hash": "ce76c2b7895a40cff7b39f0897789bba",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 38.246753246753244,
"alnum_prop": 0.6050933786078099,
"repo_name": "atiqueahmedziad/addons-server",
"id": "8592462c041b4787bbf1cbfc36cdbe0b1ec92053",
"size": "2945",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/ui/pages/desktop/search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "810065"
},
{
"name": "Dockerfile",
"bytes": "2868"
},
{
"name": "HTML",
"bytes": "599024"
},
{
"name": "JavaScript",
"bytes": "1070220"
},
{
"name": "Makefile",
"bytes": "820"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5272277"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11171"
},
{
"name": "Smarty",
"bytes": "1497"
}
],
"symlink_target": ""
}
|
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision$"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(__builtin__, path):
return getattr(__builtin__, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
try: import warnings
except ImportError: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
{
"content_hash": "d796e86831095663fbf8879f1ca9819a",
"timestamp": "",
"source": "github",
"line_count": 2335,
"max_line_length": 88,
"avg_line_length": 40.66895074946467,
"alnum_prop": 0.5237674016975211,
"repo_name": "hwu25/AppPkg",
"id": "4b5b387a60f41e916675b26bfd6ecf33f416ff55",
"size": "95012",
"binary": false,
"copies": "50",
"ref": "refs/heads/trunk",
"path": "Applications/Python/Python-2.7.2/Lib/pydoc.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "13199099"
},
{
"name": "C++",
"bytes": "105080"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "Lua",
"bytes": "249"
},
{
"name": "Makefile",
"bytes": "12852"
},
{
"name": "Objective-C",
"bytes": "1374661"
},
{
"name": "Python",
"bytes": "16366233"
},
{
"name": "Shell",
"bytes": "9469"
},
{
"name": "Visual Basic",
"bytes": "494"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/food/shared_dessert_felbar.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "365fbd24deb6e553c7217fec5cea7143",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 23.307692307692307,
"alnum_prop": 0.693069306930693,
"repo_name": "anhstudios/swganh",
"id": "059d48f17474d4b798803e8d5534972a27fb610a",
"size": "448",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/food/shared_dessert_felbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import numpy as np
import tensorflow as tf
from federatedml.util import LOGGER
class FTLDataLoader(tf.keras.utils.Sequence):
def __init__(self, non_overlap_samples, overlap_samples, batch_size, guest_side=True):
self.batch_size = batch_size
self.guest_side = guest_side
self._overlap_index = []
self._non_overlap_index = []
if guest_side:
self.size = non_overlap_samples.count() + overlap_samples.count()
else:
self.size = overlap_samples.count()
_, one_data = overlap_samples.first()
self.y_shape = (1,)
self.x_shape = one_data.features.shape
self.x = np.zeros((self.size, *self.x_shape))
self.y = np.zeros((self.size, *self.y_shape))
index = 0
self._overlap_keys = []
self._non_overlap_keys = []
for k, inst in overlap_samples.collect():
self._overlap_keys.append(k)
self.x[index] = inst.features
if guest_side:
self.y[index] = inst.label
index += 1
if self.guest_side:
for k, inst in non_overlap_samples.collect():
self._non_overlap_keys.append(k)
self.x[index] = inst.features
if guest_side:
self.y[index] = inst.label
index += 1
if guest_side:
self._overlap_index = np.array(list(range(0, overlap_samples.count())))
self._non_overlap_index = np.array(list(range(overlap_samples.count(), self.size)))
else:
self._overlap_index = list(range(len(self.x)))
def get_overlap_indexes(self):
return self._overlap_index
def get_non_overlap_indexes(self):
return self._non_overlap_index
def get_batch_indexes(self, batch_index):
start = self.batch_size * batch_index
end = self.batch_size * (batch_index + 1)
return start, end
def get_relative_overlap_index(self, batch_index):
start, end = self.get_batch_indexes(batch_index)
return self._overlap_index[(self._overlap_index >= start) & (self._overlap_index < end)] % self.batch_size
def get_overlap_x(self):
return self.x[self._overlap_index]
def get_overlap_y(self):
return self.y[self._overlap_index]
def get_overlap_keys(self):
return self._overlap_keys
def get_non_overlap_keys(self):
return self._non_overlap_keys
def __getitem__(self, index):
start, end = self.get_batch_indexes(index)
if self.guest_side:
return self.x[start: end], self.y[start: end]
else:
return self.x[start: end]
def __len__(self):
return int(np.ceil(self.size / float(self.batch_size)))
def get_idx(self):
return self._keys
def data_basic_info(self):
return 'total sample num is {}, overlap sample num is {}, non_overlap sample is {},'\
'x_shape is {}'.format(self.size, len(self._overlap_index), len(self._non_overlap_index),
self.x_shape)
|
{
"content_hash": "2a1ca9f47203a4c60f582e9cf69b87e5",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 114,
"avg_line_length": 32.747368421052634,
"alnum_prop": 0.5753776920604308,
"repo_name": "FederatedAI/FATE",
"id": "c7e90575bdb0ab04841913e580c0ca27d164ba61",
"size": "3111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/federatedml/transfer_learning/hetero_ftl/ftl_dataloder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from optparse import make_option
import os
from jukebox.jukebox_core.utils import FileIndexer
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("--path", action="store", dest="path",
help="Music library path to scan"),
)
def handle(self, *args, **options):
if options["path"] is None:
print "Required arguments: path"
return
if not os.path.exists(options["path"]):
print "Path does not exist"
return
print "Indexing music in " + options["path"]
print "This may take a while"
self.index(options["path"], int(options["verbosity"]))
def index(self, path, verbosity):
if not path.endswith("/"):
path += "/"
indexer = FileIndexer()
listing = os.listdir(path)
for filename in listing:
filename = path + filename
if os.path.isdir(filename):
self.index(filename + "/", verbosity)
elif filename.endswith(".mp3"):
if verbosity >= 2:
print "Indexing file " + filename
indexer.index(filename)
|
{
"content_hash": "07903497a6d3a766eb06700f1a2bf63c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 62,
"avg_line_length": 31.325,
"alnum_prop": 0.5674381484437351,
"repo_name": "rejahrehim/jukebox",
"id": "15dce8f0e6520b15470fc51065c219a427cc477a",
"size": "1277",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jukebox/jukebox_core/management/commands/jukebox_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12936"
},
{
"name": "HTML",
"bytes": "11482"
},
{
"name": "JavaScript",
"bytes": "50786"
},
{
"name": "Python",
"bytes": "171423"
}
],
"symlink_target": ""
}
|
""" PyTorch PEGASUS model."""
import copy
import math
import random
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_pegasus import PegasusConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/pegasus-large"
_CONFIG_FOR_DOC = "PegasusConfig"
_TOKENIZER_FOR_DOC = "PegasusTokenizer"
PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/pegasus-large",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Pegasus
class PegasusSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
super().__init__(num_positions, embedding_dim)
self.weight = self._init_weight(self.weight)
@staticmethod
def _init_weight(out: nn.Parameter) -> nn.Parameter:
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
return out
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Pegasus
class PegasusAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Pegasus
class PegasusEncoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Pegasus
class PegasusDecoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = PegasusAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size *(decoder_attention_heads,)*.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class PegasusPreTrainedModel(PreTrainedModel):
config_class = PegasusConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, PegasusSinusoidalPositionalEmbedding):
pass
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (PegasusDecoder, PegasusEncoder)):
module.gradient_checkpointing = value
PEGASUS_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`PegasusConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
PEGASUS_GENERATION_EXAMPLE = r"""
Summarization example:
```python
>>> from transformers import PegasusTokenizer, PegasusForConditionalGeneration
>>> model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
>>> tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-xsum")
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt")
>>> # Generate Summary
>>> summary_ids = model.generate(inputs["input_ids"])
>>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"California's largest electricity provider has turned off power to hundreds of thousands of customers."
```
"""
PEGASUS_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`PegasusTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PegasusTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you
can choose to directly pass an embedded representation. This is useful if you want more control over how to
convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class PegasusEncoder(PegasusPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`PegasusEncoderLayer`].
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
self.config.max_position_embeddings,
self.config.d_model,
self.padding_idx,
)
self.embed_positions.to(self.device)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`PegasusTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class PegasusDecoder(PegasusPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(inputs_embeds.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
self.config.max_position_embeddings,
self.config.d_model,
self.padding_idx,
)
self.embed_positions.to(self.device)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`PegasusTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
PEGASUS_START_DOCSTRING,
)
class PegasusModel(PegasusPreTrainedModel):
_keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
def __init__(self, config: PegasusConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = PegasusEncoder(config, self.shared)
self.decoder = PegasusDecoder(config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.encoder.resize_position_embeddings(new_num_position_embeddings)
self.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> Tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import PegasusTokenizer, PegasusModel
>>> tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-large")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 1024]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING
)
class PegasusForConditionalGeneration(PegasusPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder.version",
r"decoder.version",
r"lm_head.weight",
r"embed_positions.weight",
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
]
def __init__(self, config: PegasusConfig):
super().__init__(config)
self.model = PegasusModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> Tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Pegasus
class PegasusDecoderWrapper(PegasusPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = PegasusDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
class PegasusForCausalLM(PegasusPreTrainedModel):
_keys_to_ignore_on_load_missing = ["lm_head.weight"]
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = PegasusDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.model.decoder.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM.forward with Bart->Pegasus, facebook/bart-base->google/pegasus-large
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`PegasusTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import PegasusTokenizer, PegasusForCausalLM
>>> tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusForCausalLM.from_pretrained("google/pegasus-large", add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
|
{
"content_hash": "00bd235aa20cd982328ff1255c550ea1",
"timestamp": "",
"source": "github",
"line_count": 1708,
"max_line_length": 150,
"avg_line_length": 47.37646370023419,
"alnum_prop": 0.6302104573709512,
"repo_name": "huggingface/transformers",
"id": "a78a0235e337e9a9d342de45f9f1b71459621b36",
"size": "81557",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/transformers/models/pegasus/modeling_pegasus.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
}
|
import random
from neutron_lib import constants
from oslo_utils import uuidutils
from neutron.agent.linux import ip_lib
from neutron.common import utils as common_utils
from neutron.tests.fullstack import base
from neutron.tests.fullstack.cmd import dhcp_agent as cmd
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
from neutron.tests.unit import testlib_api
load_tests = testlib_api.module_load_tests
class BaseDhcpAgentTest(base.BaseFullStackTestCase):
scenarios = [
(constants.AGENT_TYPE_OVS,
{'l2_agent_type': constants.AGENT_TYPE_OVS}),
(constants.AGENT_TYPE_LINUXBRIDGE,
{'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE})
]
def setUp(self):
host_descriptions = [
environment.HostDescription(
dhcp_agent=True,
l2_agent_type=self.l2_agent_type
) for _ in range(self.number_of_hosts)]
env = environment.Environment(
environment.EnvironmentDescription(
l2_pop=False,
arp_responder=False,
agent_down_time=self.agent_down_time),
host_descriptions)
super(BaseDhcpAgentTest, self).setUp(env)
self.project_id = uuidutils.generate_uuid()
self._create_network_subnet_and_vm()
def _spawn_vm(self):
host = random.choice(self.environment.hosts)
vm = self.useFixture(
machine.FakeFullstackMachine(
host,
self.network['id'],
self.project_id,
self.safe_client,
use_dhcp=True))
vm.block_until_boot()
return vm
def _create_network_subnet_and_vm(self):
self.network = self.safe_client.create_network(self.project_id)
self.subnet = self.safe_client.create_subnet(
self.project_id, self.network['id'],
cidr='10.0.0.0/24',
gateway_ip='10.0.0.1',
name='subnet-test',
enable_dhcp=True)
self.vm = self._spawn_vm()
def _wait_until_agent_down(self, agent_id):
def _agent_down():
agent = self.client.show_agent(agent_id)['agent']
return not agent.get('alive')
common_utils.wait_until_true(_agent_down)
class TestDhcpAgentNoHA(BaseDhcpAgentTest):
number_of_hosts = 1
agent_down_time = 60
def test_dhcp_assignment(self):
# First check if network was scheduled to one DHCP agent
dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])
self.assertEqual(1, len(dhcp_agents['agents']))
# And check if IP and gateway config is fine on FakeMachine
self.vm.block_until_dhcp_config_done()
def test_mtu_update(self):
# The test case needs access to devices in nested namespaces. ip_lib
# doesn't support it, and it's probably unsafe to touch the library for
# testing matters.
# TODO(jlibosva) revisit when ip_lib supports nested namespaces
if self.environment.hosts[0].dhcp_agent.namespace is not None:
self.skipTest("ip_lib doesn't support nested namespaces")
self.vm.block_until_dhcp_config_done()
namespace = cmd._get_namespace_name(
self.network['id'],
suffix=self.environment.hosts[0].dhcp_agent.get_namespace_suffix())
ip = ip_lib.IPWrapper(namespace)
devices = ip.get_devices()
self.assertEqual(1, len(devices))
dhcp_dev = devices[0]
mtu = dhcp_dev.link.mtu
self.assertEqual(1450, mtu)
mtu -= 1
self.safe_client.update_network(self.network['id'], mtu=mtu)
common_utils.wait_until_true(lambda: dhcp_dev.link.mtu == mtu)
class TestDhcpAgentHA(BaseDhcpAgentTest):
number_of_hosts = 2
agent_down_time = 10
def _wait_until_network_rescheduled(self, old_agent):
def _agent_rescheduled():
network_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
if network_agents:
return network_agents[0]['id'] != old_agent['id']
return False
common_utils.wait_until_true(_agent_rescheduled)
def _kill_dhcp_agent(self, agent):
for host in self.environment.hosts:
hostname = host.dhcp_agent.get_agent_hostname()
if hostname == agent['host']:
host.dhcp_agent.kill()
self._wait_until_agent_down(agent['id'])
break
def _add_network_to_new_agent(self):
dhcp_agents = self.client.list_agents(
agent_type=constants.AGENT_TYPE_DHCP)['agents']
dhcp_agents_ids = [agent['id'] for agent in dhcp_agents]
current_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
current_agents_ids = [agent['id'] for agent in current_agents]
new_agents_ids = list(set(dhcp_agents_ids) - set(current_agents_ids))
if new_agents_ids:
new_agent_id = random.choice(new_agents_ids)
self.client.add_network_to_dhcp_agent(
new_agent_id, {'network_id': self.network['id']})
def test_reschedule_network_on_new_agent(self):
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
self.assertEqual(1, len(network_dhcp_agents))
self._kill_dhcp_agent(network_dhcp_agents[0])
self._wait_until_network_rescheduled(network_dhcp_agents[0])
# ensure that only one agent is handling DHCP for this network
new_network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
self.assertEqual(1, len(new_network_dhcp_agents))
# check if new vm will get IP from new DHCP agent
new_vm = self._spawn_vm()
new_vm.block_until_dhcp_config_done()
def test_multiple_agents_for_network(self):
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
self.assertEqual(1, len(network_dhcp_agents))
self._add_network_to_new_agent()
# ensure that two agents are handling DHCP for this network
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
self.assertEqual(2, len(network_dhcp_agents))
self._kill_dhcp_agent(network_dhcp_agents[0])
# check if new vm will get IP from DHCP agent which is still alive
new_vm = self._spawn_vm()
new_vm.block_until_dhcp_config_done()
|
{
"content_hash": "1013d18deed8703412180b2701059dfc",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 79,
"avg_line_length": 36.0427807486631,
"alnum_prop": 0.622106824925816,
"repo_name": "huntxu/neutron",
"id": "16afe9942f5dda7ab73652855ee8fb2f5eb6433f",
"size": "7340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/fullstack/test_dhcp_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11111676"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
from matplotlib import pyplot as plt
results = []
for d, dn, fn in os.walk("."):
for dir in sorted(dn):
if "scf-" in dir:
try:
E = 0
with open(d+"/"+dir+"/scf.out") as f:
for line in f:
if "!" in line:
E = float(line.split()[4])
results.append([float(dir[4:]), E])
except:
pass
results = sorted(results, key=lambda x: x[0])
results = np.array(results).T
plt.plot(results[0],results[1], linewidth=2)
plt.savefig("plot.png")
|
{
"content_hash": "690fd9e6657c91d168de1fbb2503949f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 55,
"avg_line_length": 27.304347826086957,
"alnum_prop": 0.47770700636942676,
"repo_name": "addman2/KvantSim",
"id": "b17b6da8f7083aa735d44aafbf83518136b4912f",
"size": "651",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Exercises/EX03-relax/Si/cellpar/plot.dist.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "267"
},
{
"name": "Python",
"bytes": "6900"
},
{
"name": "RPC",
"bytes": "148"
},
{
"name": "Shell",
"bytes": "1432"
},
{
"name": "TeX",
"bytes": "8460"
}
],
"symlink_target": ""
}
|
import os
import pymongo
import MySQLdb
import sys
from collections import defaultdict
c = pymongo.Connection('10.5.5.40', read_preference=pymongo.ReadPreference.SECONDARY_ONLY)
tcol = c['coleta_131']['tweets_131']
tunloadcol = c['coleta_131']['unload_tweets_131']
INSERT_SQL_1 = '''INSERT INTO conteudo_itemconteudodestaque(conteudo_id, texto, autor, data_publicacao, id_original, id_autor, imagem, score)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)'''
INSERT_SQL_2 = '''INSERT INTO conteudo_evolucaoitemconteudodestaque(item_id, data, score) VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE score = %s'''
conn = MySQLdb.connect (host='mysql1.ctweb.inweb.org.br', db='bbb13',
user='proc', passwd='pr0ce55@', compress=True, port=3306, charset='utf8', use_unicode=True)
evolucao_score = defaultdict(lambda: defaultdict(defaultdict))
id_conteudo = 1
cursor = conn.cursor()
cursor.execute('''UPDATE conteudo_conteudodestaque SET data_inicial = %s, data_final = %s WHERE id = %s''', [first_date, last_date, id_conteudo])
tweets_date = defaultdict(list)
name = sys.argv[1]
print "Dados processados para {0}".format(name)
date = name[1:]
with open(date, 'r') as f:
# Assume-se que os dados no arquivo estao ordenados pelo score.
# Apenas os top 1000 entram.
for c, line in enumerate(f):
#if c > 1000:
# break
(tweet_id, score, img, user, data, link, entities ) = line.strip().split('\t')
evolucao_score[tweet_id][date[1:]] = score
tweets_date[date].append((tweet_id, score))
total = len(evolucao_score)
for counter, (tweet_id, values) in enumerate(evolucao_score.iteritems()):
datas = sorted(values.iterkeys())
result = []
max_score = 0
for date in datas:
score = float(values[date]) * .001
result.append((date, score))
if max_score < score:
max_score = score
##print ','.join(result)
obj = tcol.find_one({'_id': int(tweet_id)}, {'text': 1, 'created_at': 1, 'user.name': 1, 'user.screen_name': 1, 'user.profile_image_url': 1})
if obj is None:
obj = tunloadcol.find_one({'_id': int(tweet_id)}, {'text': 1, 'created_at': 1, 'user.name': 1, 'user.screen_name': 1, 'user.profile_image_url': 1})
if obj:
cursor.execute("SELECT id FROM conteudo_itemconteudodestaque WHERE id_original = %s", tweet_id)
row = cursor.fetchone()
if row:
last_id = row[0]
else:
insert_values = [id_conteudo, obj['text'].replace(r'\/', '/'), obj['user']['name'], obj['created_at'],
tweet_id, obj['user']['screen_name'], obj['user']['profile_image_url'].replace(r'\/', '/'), max_score ];
cursor.execute(INSERT_SQL_1, insert_values)
last_id = cursor.lastrowid
for date, score in result:
cursor.execute(INSERT_SQL_2, [last_id, date, score, score])
# print (INSERT_SQL_2, [last_id, date, score, score])
else:
print "Not found: ", tweet_id
if counter and (counter % 100 == 0):
print "Processados %d / %d" % (counter, total)
|
{
"content_hash": "0963806d2984bb67e111c97372820854",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 149,
"avg_line_length": 40.49295774647887,
"alnum_prop": 0.6681739130434783,
"repo_name": "W3CBrasil/AI-Social",
"id": "49a012134691716d494c72e7f4ec32a7d423fa51",
"size": "2875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algoritmos/endosso/ranking/sumariza.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "51364"
},
{
"name": "C++",
"bytes": "3317"
},
{
"name": "CSS",
"bytes": "66345"
},
{
"name": "HTML",
"bytes": "40984"
},
{
"name": "JavaScript",
"bytes": "81067"
},
{
"name": "Makefile",
"bytes": "981"
},
{
"name": "PHP",
"bytes": "24989"
},
{
"name": "Python",
"bytes": "90259"
},
{
"name": "Ruby",
"bytes": "864"
},
{
"name": "Shell",
"bytes": "2623"
}
],
"symlink_target": ""
}
|
import os,imp,sys
from waflib import Utils,Errors,Logs
import waflib.Node
HEXVERSION=0x1060600
WAFVERSION="1.6.6"
WAFREVISION="11388"
ABI=98
DBFILE='.wafpickle-%d'%ABI
APPNAME='APPNAME'
VERSION='VERSION'
TOP='top'
OUT='out'
WSCRIPT_FILE='wscript'
launch_dir=''
run_dir=''
top_dir=''
out_dir=''
waf_dir=''
local_repo=''
remote_repo='http://waf.googlecode.com/svn/'
remote_locs=['branches/waf-%s/waflib/extras'%WAFVERSION,'trunk/waflib/extras','trunk/waflib/Tools']
g_module=None
STDOUT=1
STDERR=-1
BOTH=0
classes=[]
def create_context(cmd_name,*k,**kw):
global classes
for x in classes:
if x.cmd==cmd_name:
return x(*k,**kw)
ctx=Context(*k,**kw)
ctx.fun=cmd_name
return ctx
class store_context(type):
def __init__(cls,name,bases,dict):
super(store_context,cls).__init__(name,bases,dict)
name=cls.__name__
if name=='ctx'or name=='Context':
return
try:
cls.cmd
except AttributeError:
raise Errors.WafError('Missing command for the context class %r (cmd)'%name)
if not getattr(cls,'fun',None):
cls.fun=cls.cmd
global classes
classes.insert(0,cls)
ctx=store_context('ctx',(object,),{})
class Context(ctx):
errors=Errors
tools={}
def __init__(self,**kw):
try:
rd=kw['run_dir']
except KeyError:
global run_dir
rd=run_dir
class node_class(waflib.Node.Node):
pass
self.node_class=node_class
self.node_class.__module__="waflib.Node"
self.node_class.__name__="Nod3"
self.node_class.ctx=self
self.root=self.node_class('',None)
self.cur_script=None
self.path=self.root.find_dir(rd)
self.stack_path=[]
self.exec_dict={'ctx':self,'conf':self,'bld':self,'opt':self}
self.logger=None
def __hash__(self):
return id(self)
def load(self,tool_list,*k,**kw):
tools=Utils.to_list(tool_list)
path=Utils.to_list(kw.get('tooldir',''))
for t in tools:
module=load_tool(t,path)
fun=getattr(module,kw.get('name',self.fun),None)
if fun:
fun(self)
def execute(self):
global g_module
self.recurse([os.path.dirname(g_module.root_path)])
def pre_recurse(self,node):
self.stack_path.append(self.cur_script)
self.cur_script=node
self.path=node.parent
def post_recurse(self,node):
self.cur_script=self.stack_path.pop()
if self.cur_script:
self.path=self.cur_script.parent
def recurse(self,dirs,name=None,mandatory=True,once=True):
try:
cache=self.recurse_cache
except:
cache=self.recurse_cache={}
for d in Utils.to_list(dirs):
if not os.path.isabs(d):
d=os.path.join(self.path.abspath(),d)
WSCRIPT=os.path.join(d,WSCRIPT_FILE)
WSCRIPT_FUN=WSCRIPT+'_'+(name or self.fun)
node=self.root.find_node(WSCRIPT_FUN)
if node and(not once or node not in cache):
cache[node]=True
self.pre_recurse(node)
try:
function_code=node.read('rU')
exec(compile(function_code,node.abspath(),'exec'),self.exec_dict)
finally:
self.post_recurse(node)
elif not node:
node=self.root.find_node(WSCRIPT)
if node and(not once or node not in cache):
cache[node]=True
self.pre_recurse(node)
try:
wscript_module=load_module(node.abspath())
user_function=getattr(wscript_module,(name or self.fun),None)
if not user_function:
if not mandatory:
continue
raise Errors.WafError('No function %s defined in %s'%(name or self.fun,node.abspath()))
user_function(self)
finally:
self.post_recurse(node)
elif not node:
if not mandatory:
continue
raise Errors.WafError('No wscript file in directory %s'%d)
def exec_command(self,cmd,**kw):
subprocess=Utils.subprocess
kw['shell']=isinstance(cmd,str)
Logs.debug('runner: %r'%cmd)
Logs.debug('runner_env: kw=%s'%kw)
try:
if self.logger:
self.logger.info(cmd)
kw['stdout']=kw['stderr']=subprocess.PIPE
p=subprocess.Popen(cmd,**kw)
(out,err)=p.communicate()
if out:
self.logger.debug('out: %s'%out)
if err:
self.logger.error('err: %s'%err)
return p.returncode
else:
p=subprocess.Popen(cmd,**kw)
return p.wait()
except OSError:
return-1
def cmd_and_log(self,cmd,**kw):
subprocess=Utils.subprocess
kw['shell']=isinstance(cmd,str)
Logs.debug('runner: %r'%cmd)
if'quiet'in kw:
quiet=kw['quiet']
del kw['quiet']
else:
quiet=None
if'output'in kw:
to_ret=kw['output']
del kw['output']
else:
to_ret=STDOUT
kw['stdout']=kw['stderr']=subprocess.PIPE
if quiet is None:
self.to_log(cmd)
try:
p=subprocess.Popen(cmd,**kw)
(out,err)=p.communicate()
except Exception ,e:
try:
self.to_log(str(err))
except:
pass
raise Errors.WafError('Execution failure',ex=e)
if not isinstance(out,str):
out=out
if not isinstance(err,str):
err=err
if out and quiet!=STDOUT and quiet!=BOTH:
self.to_log('out: %s'%out)
if err and quiet!=STDERR and quiet!=BOTH:
self.to_log('err: %s'%err)
if p.returncode:
e=Errors.WafError('command %r returned %r'%(cmd,p.returncode))
e.returncode=p.returncode
e.stderr=err
e.stdout=out
raise e
if to_ret==BOTH:
return(out,err)
elif to_ret==STDERR:
return err
return out
def fatal(self,msg,ex=None):
if self.logger:
self.logger.info('from %s: %s'%(self.path.abspath(),msg))
try:
msg='%s\n(complete log in %s)'%(msg,self.logger.handlers[0].baseFilename)
except:
pass
raise self.errors.ConfigurationError(msg,ex=ex)
def to_log(self,msg):
if not msg:
return
if self.logger:
self.logger.info(msg)
else:
sys.stderr.write(str(msg))
sys.stderr.flush()
def msg(self,msg,result,color=None):
self.start_msg(msg)
if not isinstance(color,str):
color=result and'GREEN'or'YELLOW'
self.end_msg(result,color)
def start_msg(self,msg):
try:
if self.in_msg:
self.in_msg+=1
return
except:
self.in_msg=0
self.in_msg+=1
try:
self.line_just=max(self.line_just,len(msg))
except AttributeError:
self.line_just=max(40,len(msg))
for x in(self.line_just*'-',msg):
self.to_log(x)
Logs.pprint('NORMAL',"%s :"%msg.ljust(self.line_just),sep='')
def end_msg(self,result,color=None):
self.in_msg-=1
if self.in_msg:
return
defcolor='GREEN'
if result==True:
msg='ok'
elif result==False:
msg='not found'
defcolor='YELLOW'
else:
msg=str(result)
self.to_log(msg)
Logs.pprint(color or defcolor,msg)
def load_special_tools(self,var,ban=[]):
global waf_dir
lst=self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
for x in lst:
if not x.name in ban:
load_tool(x.name.replace('.py',''))
cache_modules={}
def load_module(path):
try:
return cache_modules[path]
except KeyError:
pass
module=imp.new_module(WSCRIPT_FILE)
try:
code=Utils.readf(path,m='rU')
except(IOError,OSError):
raise Errors.WafError('Could not read the file %r'%path)
module_dir=os.path.dirname(path)
sys.path.insert(0,module_dir)
exec(compile(code,path,'exec'),module.__dict__)
sys.path.remove(module_dir)
cache_modules[path]=module
return module
def load_tool(tool,tooldir=None):
tool=tool.replace('++','xx')
tool=tool.replace('java','javaw')
tool=tool.replace('compiler_cc','compiler_c')
if tooldir:
assert isinstance(tooldir,list)
sys.path=tooldir+sys.path
try:
__import__(tool)
ret=sys.modules[tool]
Context.tools[tool]=ret
return ret
finally:
for d in tooldir:
sys.path.remove(d)
else:
global waf_dir
try:
os.stat(os.path.join(waf_dir,'waflib','extras',tool+'.py'))
d='waflib.extras.%s'%tool
except:
try:
os.stat(os.path.join(waf_dir,'waflib','Tools',tool+'.py'))
d='waflib.Tools.%s'%tool
except:
d=tool
__import__(d)
ret=sys.modules[d]
Context.tools[tool]=ret
return ret
|
{
"content_hash": "a5cf810d001b047bb685bed9ed801ab8",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 99,
"avg_line_length": 25.74496644295302,
"alnum_prop": 0.6663190823774765,
"repo_name": "drayside/kodkod",
"id": "f296f85aa9bce8c8f45c92fe4f3b073984339aff",
"size": "7817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/.waf-1.6.6-c57dd0fa119e23d36c23d598487c6880/waflib/Context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "771374"
},
{
"name": "C++",
"bytes": "1237437"
},
{
"name": "Java",
"bytes": "1361580"
},
{
"name": "Objective-C",
"bytes": "8663"
},
{
"name": "Python",
"bytes": "355420"
},
{
"name": "Shell",
"bytes": "285493"
}
],
"symlink_target": ""
}
|
"""
@file
@brief Implémente une simulation d'évolution des catégories de population
selon un modèle de Schelling.
"""
import random
import copy
import os
from pyquickhelper.loghelper import noLOG
from ..helpers.pygame_helper import wait_event, empty_main_loop
def round(r, g, b):
"""
arrondit chaque couleur
"""
return (int(r), int(g), int(b))
class Ville:
"""
Définit une ville qui va évoluer par la suite.
@param colors couleurs vives : simulation sans tenir compte de riches ou pauvres,
seulement regroupement
@param colors_grade simulation en tenant compte des riches, du plus foncé
au plus clair (riches)
"""
colors = {-1: (0, 0, 0), 0: (255, 0, 0), 1: (0, 255, 0), 2: (0, 0, 255),
3: (255, 255, 0), 4: (255, 0, 255), 5: (0, 255, 255)}
colors_grade = {-1: (0, 0, 0), 0: round(131.28918999850276, 137.49288815690971, 51.799520886360227),
1: round(151.28918999850276, 147.49288815690971, 71.799520886360227),
2: round(191.42448385755856, 191.27629208812527, 57.413606761812389),
3: round(190.99311386065693, 133.49749594932979, 41.781926646045072),
4: round(167.25849848112253, 76.347509523120692, 41.289551087323403),
5: round(196.76664713923063, 39.476078890841634, 31.506444053895724)
}
def __init__(self, cote=100, group=3, taille=3, riche=False, th2=1.2,
renouvellement=0.15, delay=1):
"""
constructeur
@param cote côté du carré utilisé pour la simulation
@param group nombre de catégories de gens
@param taille chaque individu regarde ses voisins à *+/-* taille près
@param riche simulation avec riche ou non
@param th2 le voisin le plus pauvre peut être contaminé,
si la différence de classes est importante (`cl1 > cl2 * th2`)
@param renouvellement à chaque itération, une certaine proportion des pâtés sont mis à jour,
cette proportion correspond au renouvellement
@param delay la simulation prend en compte la ville lors des "delay" dernières itérations
On tire au hasard la classe d'un pâté de maison dans un disque de rayon cote.
"""
if cote is None:
pass
else:
self.mat = [[random.randint(0, group - 1)
for i in range(0, cote)] for j in range(0, cote)]
self.group = group
self.taille = taille
self.past = []
self.th2 = th2
self.riche = riche
self.delay = delay
self.renouvellement = renouvellement
c = len(self.mat) / 2
R = c ** 2 / 4
for i in range(0, len(self.mat)):
for j in range(0, len(self.mat[0])):
d = (i - c) ** 2 + (j - c) ** 2
if d > R:
self.mat[i][j] = -1
def _voisinage(self, i, j, mat):
"""
calcul de la répartition du voisiage
@param i i,j coordonnées
@param j
@param mat matrice
@return dictionnaire { classe:nombre }
"""
d = {}
x1 = max(0, i - self.taille)
y1 = max(0, j - self.taille)
x2 = min(len(self.mat), i + self.taille + 1)
y2 = min(len(self.mat), j + self.taille + 1)
for ii in range(x1, x2):
for jj in range(y1, y2):
c = mat[ii][jj]
if c not in d:
d[c] = 0
d[c] += 1
return d
def evolution(self):
"""
évolution d'une itération à l'autre
@return nb1,nb2
"""
keep = copy.deepcopy(self.mat)
self.past.append(keep)
if len(self.past) > self.delay:
del self.past[:len(self.past) - self.delay]
# def fff(x, c):
# if c not in x:
# return 0
# elif x[c] >= sum(x.values()) * self.th:
# return 1
# else:
# return 0
# on renouvelle une certaine proportion de pâtés (renouvellement)
# tiré au hasard
nb1, nb2 = 0, 0
for n in range(0, int(len(self.mat) ** 2 * self.renouvellement)):
# on tire deux voisins au hasard
i = random.randint(0, len(self.mat) - 1)
j = random.randint(0, len(self.mat) - 1)
k = i + random.randint(-1, 1)
l_ = j + random.randint(-1, 1)
if k == i and l_ == j:
continue
x1 = max(0, k)
y1 = max(0, l_)
x2 = min(len(self.mat) - 1, k)
y2 = min(len(self.mat) - 1, l_)
if x1 != x2 or y1 != y2:
continue
# calcul des deux voisinages
v1 = self._voisinage(i, j, self.mat)
v2 = self._voisinage(k, l_, self.mat)
c = self.mat[i][j]
d = self.mat[k][l_]
# c,d : leurs catégorie
if c >= 0 and d >= 0:
# s'ils sont tous les deux habités
if v1.get(c, 0) < v2.get(c, 0) and v1.get(d, 0) > v2.get(d, 0):
# premier cas: si l'un voisin a plus de voisins qui ressemblent à l'autre
# et réciproquement, ils échangent
self.mat[k][l_] = c
self.mat[i][j] = d
nb1 += 1
elif v1.get(c, 0) > v2.get(d, 0) * self.th2 and (not self.riche or c > d):
# deuxième cas : cas riche, le voisin le plus pauvre peut-être contaminé
# si la différence est importante
self.mat[k][l_] = c
nb2 += 1
elif c == -1:
# celui qui n'est pas habité prend la couleur de l'autre
self.mat[i][j] = d
elif d == -1:
# celui qui n'est pas habité prend la couleur de l'autre
self.mat[k][l_] = c
return nb1, nb2
def count(self):
"""
@return la population
"""
d = {}
for line in self.mat:
for c in line:
if c not in d:
d[c] = 1
else:
d[c] += 1
return d
class VilleImage(Ville):
"""
Définit une ville à partir d'une image (donc non aléatoire).
"""
def __init__(self, image,
cote=100,
group=3,
taille=3,
riche=False,
th2=1.2,
renouvellement=0.15,
delay=1):
"""
constructeur
@param image nom d'une image pour définir l'initialisation
@param cote cote du carré utilisé pour la simulation
@param group nombre de catégories de gens
@param taille chaque individu regarde ses voisins à +- taille près
@param riche simulation avec riche ou non
@param th2 le voisin le plus pauvre peut-être contaminé,
si la différence de classes est importante (cl1 > cl2 * th2)
@param renouvellement à chaque itération, une certaine proportion des pâtés sont mis à jour,
cette proportion correspond à renouvellement
@param delay la simulation prend en compte la ville lors des "delay" dernières itérations
On tire au hasard la classe d'un pâté de maison dans un disque de rayon cote.
"""
Ville.__init__(self, cote, group, taille, riche,
th2, renouvellement, delay)
self._initialisation(image)
def _initialisation(self, im):
for i in range(0, len(self.mat)):
for j in range(0, len(self.mat[0])):
p = im.get_at((i, j))
mins = 1e6
best = None
for k, v in Ville.colors_grade.items():
s = 0
for z in [0, 1, 2]:
s += (v[z] - p[z]) ** 2
s = s ** 0.5
if s < mins:
mins = s
best = k
self.mat[i][j] = best
def display(self, screen, x, pygame):
"""
affichage
@param screen écran
@param x dimension d'un pâté de maison
"""
screen.fill((0, 0, 0))
if self.riche:
colors = Ville.colors_grade
else:
colors = Ville.colors
for i in range(0, len(self.mat)):
for j in range(0, len(self.mat[i])):
c = colors[self.mat[i][j]]
pygame.draw.rect(screen, c, pygame.Rect(i * x, j * x, x, x))
def pygame_simulation(pygame, first_click=False, folder=None,
x=6, nb=100, group=6, max_iter=150, th2=1.75,
image=None, flags=0, fLOG=noLOG):
"""
Simulation graphique.
Illuste la résolution du puzzle
@param pygame module pygame
@param first_click attend la pression d'un clic de souris avant de commencer
@param folder répertoire où stocker les images de la simulation
@param size taille de l'écran
@param delay delay between two tries
@param x pour l'affichage, taille d'un pâté de maison à l'écran
@param group ...
@param nb taille du carré de la simulation en nombre de pâtés de maisons
@param th2 ...
@param max_iter nombre d'itérations
@param image définition de la ville
@param flags see `pygame.display.set_mode <https://www.pygame.org/docs/ref/display.html#pygame.display.set_mode>`_
@param fLOG logging function
@return @see cl Ville
La simulation ressemble à ceci :
.. raw:: html
<video autoplay="" controls="" loop="" height="500">
<source src="http://www.xavierdupre.fr/enseignement/complements/voisinage.mp4" type="video/mp4" />
</video>
Pour lancer la simulation::
from ensae_teaching_cs.special.voisinage_evolution import pygame_simulation
import pygame
pygame_simulation(pygame)
Voir :ref:`l-simulation_voisinage`.
"""
if image is None:
this = os.path.dirname(__file__)
image = os.path.join(this, "paris_today.png")
image = pygame.image.load(image)
image = pygame.transform.scale(image, (100, 100))
pygame.init()
size = nb * x, nb * x
screen = pygame.display.set_mode(size, flags)
ville = VilleImage(image, nb, group, th2=th2, riche=True)
if first_click and pygame is not None:
wait_event(pygame)
if pygame is not None:
display(ville, screen, x, pygame)
pygame.display.flip()
images = []
if folder is not None:
images.append(screen.copy())
fLOG(ville.count())
for i in range(0, max_iter):
nb = ville.evolution()
fLOG("iteration ", i, " ch ", nb)
if pygame is not None:
if folder is not None:
images.append(screen.copy())
display(ville, screen, x, pygame)
pygame.display.flip()
empty_main_loop(pygame)
fLOG(ville.count())
if first_click and pygame is not None:
wait_event(pygame)
if folder is not None and pygame is not None:
images.append(screen.copy())
if folder is not None:
fLOG("saving images")
for it, screen in enumerate(images):
if it % 10 == 0:
fLOG("saving image:", it)
image = os.path.join(folder, "image_%04d.png" % it)
pygame.image.save(screen, image)
return ville
|
{
"content_hash": "ade994f1bc26ba4cb4c26dad863dc5f4",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 133,
"avg_line_length": 36.218934911242606,
"alnum_prop": 0.5022872079725536,
"repo_name": "sdpython/ensae_teaching_cs",
"id": "b8dea50c067406c1405c86206bf5c9e51b9cea63",
"size": "12350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ensae_teaching_cs/special/voisinage_evolution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "382"
},
{
"name": "C#",
"bytes": "26850"
},
{
"name": "CSS",
"bytes": "220769"
},
{
"name": "HTML",
"bytes": "44390"
},
{
"name": "JavaScript",
"bytes": "31077"
},
{
"name": "Jupyter Notebook",
"bytes": "45255629"
},
{
"name": "PostScript",
"bytes": "169142"
},
{
"name": "Python",
"bytes": "1770141"
},
{
"name": "R",
"bytes": "339"
},
{
"name": "Shell",
"bytes": "3675"
},
{
"name": "TeX",
"bytes": "593824"
}
],
"symlink_target": ""
}
|
class batchConverter:
kits = ['vtk_kit', 'wx_kit']
cats = ['Readers','Writers','Converters']
keywords = ['batch','convert','read','write','vti','mha','gipl']
help = """Batch converts image volume files from one type to another.
Source and target types can be VTK ImageData (.vti), MetaImage (.mha),
or Guys Image Processing Lab (.gipl).
All the files in the specified directory matching the given
source extension are converted.
The user may specify whether source files should be deleted
or target files should be automatically overwritten (be careful
with these settings!)
Known bug: writing to GIPL (forced binary uchar) will result in a thrown exception.
Circumvent this by first casting to binary uchar when writing to VTI, and then
converting to GIPL in a second pass.
(Module by Francois Malan)"""
class cptBrepWRT:
kits = ['vtk_kit']
cats = ['Writers']
help = """Writes polydata to disc in the format required by the Closest
Point Transform (CPT) driver software. Input data is put through
a triangle filter first, as that is what the CPT requires.
See the
<a href="http://www.acm.caltech.edu/~seanm/projects/cpt/cpt.html">CPT
home page</a> for more information about the algorithm and the
software.
"""
class DICOMWriter:
kits = ['vtk_kit', 'gdcm_kit']
cats = ['Writers', 'Medical', 'DICOM']
help = """Writes image data to disc as DICOM images.
This GDCM2-based module writes data to disc as one (multi-frame)
or more DICOM files. As input, it requires a special DeVIDE
datastructure containing the raw data, the medical image
properties and direction cosines (indicating the orientation of
the dataset in world / scanner space). You can create such a
datastructure by making use of the DVMedicalImageData module.
"""
class ivWRT:
kits = ['vtk_kit']
cats = ['Writers']
help = """ivWRT is an Inventor Viewer polygonal data writer devide module.
"""
class metaImageWRT:
kits = ['vtk_kit']
cats = ['Writers']
help = """Writes VTK image data or structured points in MetaImage format.
"""
class pngWRT:
kits = ['vtk_kit']
cats = ['Writers']
help = """Writes a volume as a series of PNG images.
Set the file pattern by making use of the file browsing dialog. Replace
the increasing index by a %d format specifier. %3d can be used for
example, in which case %d will be replaced by an integer zero padded to 3
digits, i.e. 000, 001, 002 etc. %d starts from 0.
Module by Joris van Zwieten.
"""
class MatlabPointsWriter:
kits = ['vtk_kit']
cats = ['Writers']
help = """Writes slice3dVWR world-points to an m-file.
"""
class points_writer:
# BUG: empty kits list screws up dependency checking
kits = ['vtk_kit']
cats = ['Writers']
help = """TBD
"""
class stlWRT:
kits = ['vtk_kit']
cats = ['Writers']
help = """Writes STL format data.
"""
class vtiWRT:
kits = ['vtk_kit']
cats = ['Writers']
help = """Writes VTK image data or structured points in the VTK XML
format. The data attribute is compressed.
This is the preferred way of saving image data in DeVIDE.
"""
class vtkPolyDataWRT:
kits = ['vtk_kit']
cats = ['Writers']
help = """Module for writing legacy VTK polydata. vtpWRT should be
preferred for all VTK-compatible polydata storage.
"""
class vtkStructPtsWRT:
kits = ['vtk_kit']
cats = ['Writers']
help = """Module for writing legacy VTK structured points data. vtiWRT
should be preferred for all VTK-compatible image data storage.
"""
class vtpWRT:
kits = ['vtk_kit']
cats = ['Writers']
help = """Writes VTK PolyData in the VTK XML format. The data attribute
is compressed.
This is the preferred way of saving PolyData in DeVIDE.
"""
|
{
"content_hash": "8d88419b85e59d50bba96d60cd37e5d8",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 87,
"avg_line_length": 31.04724409448819,
"alnum_prop": 0.6588891706822216,
"repo_name": "chrisidefix/devide",
"id": "04f76cab6e1d09617f899937201d55503189f48d",
"size": "3943",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/writers/module_index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Diff",
"bytes": "1373"
},
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3104368"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TwitterRecentEntries'
db.create_table('cmsplugin_twitterrecententries', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)),
('twitter_user', self.gf('django.db.models.fields.CharField')(max_length=75)),
('count', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=3)),
('link_hint', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)),
))
db.send_create_signal('djangocms_twitter', ['TwitterRecentEntries'])
# Adding model 'TwitterSearch'
db.create_table('cmsplugin_twittersearch', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)),
('query', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True)),
('count', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=3)),
))
db.send_create_signal('djangocms_twitter', ['TwitterSearch'])
def backwards(self, orm):
# Deleting model 'TwitterRecentEntries'
db.delete_table('cmsplugin_twitterrecententries')
# Deleting model 'TwitterSearch'
db.delete_table('cmsplugin_twittersearch')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 6, 23, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'djangocms_twitter.twitterrecententries': {
'Meta': {'object_name': 'TwitterRecentEntries', 'db_table': "'cmsplugin_twitterrecententries'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}),
'link_hint': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '75'})
},
'djangocms_twitter.twittersearch': {
'Meta': {'object_name': 'TwitterSearch', 'db_table': "'cmsplugin_twittersearch'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}),
'query': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'})
}
}
complete_apps = ['djangocms_twitter']
|
{
"content_hash": "830d3aebc72ed4b1566e2940b3e1b36f",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 156,
"avg_line_length": 65.31168831168831,
"alnum_prop": 0.5949492940942533,
"repo_name": "nephila/djangocms_twitter",
"id": "4c295f475057de417a7a9e66d6cb7487541cef6c",
"size": "5053",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "djangocms_twitter/south_migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1198"
},
{
"name": "Python",
"bytes": "23361"
}
],
"symlink_target": ""
}
|
from django.conf import settings
import djclick as click
from md.models import Agency, Stop
from tsdata.dataset_facts import compute_dataset_facts
@click.command()
def command():
facts = compute_dataset_facts(Agency, Stop, settings.MD_KEY)
for fact in facts:
click.echo(fact)
|
{
"content_hash": "ca63ceb63a16b3d7cdef3b9430d24bb9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 64,
"avg_line_length": 24.583333333333332,
"alnum_prop": 0.7423728813559322,
"repo_name": "OpenDataPolicingNC/Traffic-Stops",
"id": "506f6d616868da2b80b0d611b08cb5ad94ba8245",
"size": "295",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "md/management/commands/md_dataset_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14249"
},
{
"name": "Dockerfile",
"bytes": "1114"
},
{
"name": "Elixir",
"bytes": "40"
},
{
"name": "HCL",
"bytes": "2989"
},
{
"name": "HTML",
"bytes": "112505"
},
{
"name": "JavaScript",
"bytes": "99858"
},
{
"name": "Jupyter Notebook",
"bytes": "130974"
},
{
"name": "Makefile",
"bytes": "2662"
},
{
"name": "PLpgSQL",
"bytes": "11003"
},
{
"name": "Python",
"bytes": "261956"
},
{
"name": "SaltStack",
"bytes": "10013"
},
{
"name": "Scheme",
"bytes": "20526"
},
{
"name": "Shell",
"bytes": "250814"
}
],
"symlink_target": ""
}
|
__version__ = '2.1.0dev0'
from .vegalite import *
def load_ipython_extension(ipython):
from ._magics import vega, vegalite
ipython.register_magic_function(vega, 'cell')
ipython.register_magic_function(vegalite, 'cell')
|
{
"content_hash": "4a152d02de8e86b2a87128e0f40dd5e5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 53,
"avg_line_length": 29.125,
"alnum_prop": 0.7124463519313304,
"repo_name": "ellisonbg/altair",
"id": "4226f083ed2e72a5dfa5282a9288ac5500d09669",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "altair/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136763"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "1150719"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from imp import reload
import alexREPO.fitting as fitting
reload(fitting)
import alexREPO.circlefinder as circlefinder
def grayscale(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def cut_out(img,x,y,r):
"""
takes x,y coordinates in terms of pixels and a radius in pixels.
Cuts a boolean array that acts as cutout on the actual image.
"""
[lenx,leny] = img.shape
xcoords = np.outer(np.array(range(lenx)),np.ones(leny))
ycoords = np.outer(np.ones(lenx),np.array(range(leny)))
distancetoXY = np.sqrt((xcoords-x)**2 + (ycoords-y)**2)
return distancetoXY < r
def histogram(img,x,y,r):
#Plot Histogram of cut-out and calculate the area
image_2 = img*cut_out(img,x,y,r)
im = image_2.ravel()
img = im[np.nonzero(im)]
n,bins,patches = plt.hist(img,100, color='black')
return n,bins
def fit_histogram(x,n):
"""
takes input array with gray scale histogram and fits a gaussian.
returns a value that lies two standard deviations off to brighter values
"""
print('give the following parameters')
print(np.amax(n),x[np.argmax(n)])
p0,fitfunc = fitting.gauss(np.max(n),x[np.argmax(n)],10) ## entries are amp,x0,sigma
res = fitting.do_fit(x[:-1],n,p0,fitfunc)
#plt.plot(np.array(range(250)),fitfunc(np.array(range(250))),'b--') # in case you want to plot your guess
cut_off = res['params_dict']['x0']+res['params_dict']['s']*3 # go 5 sigma away from the mean of the gaussian to get cutoff
plt.plot(np.array(range(250)),res['fitfunc'](np.array(range(250))),'r-',zorder=100)
plt.show()
print('x0 and s '+ str(res['params_dict']['x0'])+' ' + str(res['params_dict']['s']))
print('cut off found at '+str(np.round(cut_off,3)) )
return cut_off
def calculate_area(img,cut_off):
"""
takes array of gray scale values, their respective and a cutoff value
returns the fraction of entries that lie above the chosen cut_off.
"""
return len(img[img>cut_off])/len(img[img>0])
def master_solver(filename,xs=None,ys = None, rs = None, radmin=80, radmax=110, houghaccumulator=0.6, searchrad=190, radiusreduction=0):
"""
input: takes image
converts to gray
finds dishes
cuts out dishes
evaluates color histogram
fits gaussian to find cut-off of the background
calculates area per circle
prints results (how much area is occupied by bacteria?)
output: None
"""
img = mpimg.imread(filename)
gray_img = grayscale(img) ## gray scale
if xs==None:
circles = circlefinder.find_circle_coords(filename, radmin, radmax, houghaccumulator, searchrad) ## find dishes
xs = circles[:,1] # Note, different convention for x and y for Norbert and James...
ys = circles[:,0]
rs = circles[:,2]
rs = rs-radiusreduction
img = gray_img
areas = np.zeros(len(xs))
for i,x,y,r in zip(range(len(xs)),xs,ys,rs):
cut = cut_out(img,x,y,r)
fig = plt.figure()
ax = plt.subplot()
plt.imshow(cut*img) ### which petri dish are we checking? Need visualization
plt.show()
[n,bins] = histogram(img,x,y,r)
brightness_cut_off = fit_histogram(bins,n)
area = calculate_area(cut*img,brightness_cut_off)
print('bacterial area is '+str(np.round(area,3)))
areas[i] = np.round(area,3)
### finally return the initial image and add our text at the right coordiantes (centre of the circles)
fig = plt.figure()
ax = plt.subplot()
plt.imshow(img,cmap='gray')
for x,y,area in zip(xs,ys,areas):
ax.text(y,x, str(area*100)+' %', style='italic',
bbox={'facecolor':'red', 'alpha':0.5, 'pad':1})
plt.show()
|
{
"content_hash": "39b07572230f955118fad6429a9a66ef",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 136,
"avg_line_length": 34.964285714285715,
"alnum_prop": 0.6284473953013279,
"repo_name": "AlexProutski/Casimir-Programming-2",
"id": "d6f10a25aee8a423b447eb1f1e8939bbd0d929df",
"size": "3917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grayscale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "624964"
},
{
"name": "Python",
"bytes": "9525"
}
],
"symlink_target": ""
}
|
try:
import utime as time
except ImportError:
import time
import _thread
def last(l):
return l[-1]
def thread_entry(n):
# allocate a bytearray and fill it
data = bytearray(i for i in range(128))
# run a loop which allocates a small list and uses it each iteration
lst = 8 * [0]
sum = 0
for i in range(n):
sum += last(lst)
lst = [0, 0, 0, 0, 0, 0, 0, i + 1]
# check that the bytearray still has the right data
for i, b in enumerate(data):
assert i == b
# print the result of the loop and indicate we are finished
with lock:
print(sum, lst[-1])
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_thread = 8
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(thread_entry, (10000,))
# wait for threads to finish
while n_finished < n_thread:
time.sleep(1)
|
{
"content_hash": "7154c30a1877c50f00a016a5a94a5f8b",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 72,
"avg_line_length": 22.317073170731707,
"alnum_prop": 0.614207650273224,
"repo_name": "husigeza/pycom-micropython-sigfox",
"id": "00527d05980ed120f26c6bc1f08b61b2c02dfec7",
"size": "1098",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/thread/stress_heap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "55179"
},
{
"name": "C",
"bytes": "32133098"
},
{
"name": "C++",
"bytes": "642137"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "104211"
},
{
"name": "Objective-C",
"bytes": "10903"
},
{
"name": "Python",
"bytes": "1000724"
},
{
"name": "Shell",
"bytes": "13441"
}
],
"symlink_target": ""
}
|
import sys
from pkg_resources import resource_stream, resource_exists, resource_isdir, \
resource_listdir
import json
import re
__version__ = '0.1.2'
cache = dict()
def fetch_resource(name):
if name not in cache:
cache[name] = json.loads(resource_stream(
__name__, name).read().decode('utf-8'))
return cache[name]
def get_categories(category=None):
if category is None:
return resource_listdir(__name__, "data")
else:
return [item for item
in resource_listdir(__name__, "data/" + category)
if resource_isdir(__name__, "data/" + category + "/" + item)]
def get_files(category):
return [re.sub(r"\.json$", "", item) for item
in resource_listdir(__name__, "data/" + category)
if not resource_isdir(__name__, "data/" + category + "/" + item)]
def get_file(*components):
return fetch_resource("/".join(["data"] + list(components)) + ".json")
class CorpusLoader(object):
def __init__(self, directory):
self.directory = directory
def __getitem__(self, key):
return self.__getattr__(key)
def __getattr__(self, attr):
file_loc = "data/" + self.directory + "/" + attr + ".json"
dir_loc = "data/" + self.directory + "/" + attr
if resource_exists(__name__, file_loc):
return fetch_resource(file_loc)
elif resource_exists(__name__, dir_loc) and \
resource_isdir(__name__, dir_loc):
return CorpusLoader(self.directory + "/" + attr)
else:
raise AttributeError("no resource named " + attr)
def get_categories(self):
return get_categories(self.directory)
def get_files(self):
return get_files(self.directory)
def get_file(self, *components):
return get_file(self.directory, *components)
module = sys.modules[__name__]
for directory in resource_listdir(__name__, "data"):
setattr(module, directory.replace("-", "_"), CorpusLoader(directory))
|
{
"content_hash": "1e9cff346074d574f2232b0c2221c353",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 77,
"avg_line_length": 29.808823529411764,
"alnum_prop": 0.5905278737049827,
"repo_name": "hugovk/pycorpora",
"id": "90be9831da289cb2418822b01921f354f8ab4c1e",
"size": "2027",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pycorpora/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9688"
}
],
"symlink_target": ""
}
|
"""Test the UniFi Protect sensor platform."""
# pylint: disable=protected-access
from __future__ import annotations
from copy import copy
from datetime import datetime, timedelta
from unittest.mock import Mock
import pytest
from pyunifiprotect.data import NVR, Camera, Event, Sensor
from pyunifiprotect.data.base import WifiConnectionState, WiredConnectionState
from pyunifiprotect.data.nvr import EventMetadata
from pyunifiprotect.data.types import EventType, SmartDetectObjectType
from homeassistant.components.unifiprotect.const import (
ATTR_EVENT_SCORE,
DEFAULT_ATTRIBUTION,
)
from homeassistant.components.unifiprotect.sensor import (
ALL_DEVICES_SENSORS,
CAMERA_DISABLED_SENSORS,
CAMERA_SENSORS,
MOTION_SENSORS,
NVR_DISABLED_SENSORS,
NVR_SENSORS,
OBJECT_TYPE_NONE,
SENSE_SENSORS,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .conftest import (
MockEntityFixture,
assert_entity_counts,
enable_entity,
ids_from_device_description,
time_changed,
)
@pytest.fixture(name="sensor")
async def sensor_fixture(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
mock_sensor: Sensor,
now: datetime,
):
"""Fixture for a single sensor for testing the sensor platform."""
# disable pydantic validation so mocking can happen
Sensor.__config__.validate_assignment = False
sensor_obj = mock_sensor.copy(deep=True)
sensor_obj._api = mock_entry.api
sensor_obj.name = "Test Sensor"
sensor_obj.battery_status.percentage = 10.0
sensor_obj.light_settings.is_enabled = True
sensor_obj.humidity_settings.is_enabled = True
sensor_obj.temperature_settings.is_enabled = True
sensor_obj.alarm_settings.is_enabled = True
sensor_obj.stats.light.value = 10.0
sensor_obj.stats.humidity.value = 10.0
sensor_obj.stats.temperature.value = 10.0
sensor_obj.up_since = now
sensor_obj.bluetooth_connection_state.signal_strength = -50.0
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.sensors = {
sensor_obj.id: sensor_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
# 2 from all, 4 from sense, 12 NVR
assert_entity_counts(hass, Platform.SENSOR, 19, 14)
yield sensor_obj
Sensor.__config__.validate_assignment = True
@pytest.fixture(name="sensor_none")
async def sensor_none_fixture(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
mock_sensor: Sensor,
now: datetime,
):
"""Fixture for a single sensor for testing the sensor platform."""
# disable pydantic validation so mocking can happen
Sensor.__config__.validate_assignment = False
sensor_obj = mock_sensor.copy(deep=True)
sensor_obj._api = mock_entry.api
sensor_obj.name = "Test Sensor"
sensor_obj.battery_status.percentage = 10.0
sensor_obj.light_settings.is_enabled = False
sensor_obj.humidity_settings.is_enabled = False
sensor_obj.temperature_settings.is_enabled = False
sensor_obj.alarm_settings.is_enabled = False
sensor_obj.up_since = now
sensor_obj.bluetooth_connection_state.signal_strength = -50.0
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.sensors = {
sensor_obj.id: sensor_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
# 2 from all, 4 from sense, 12 NVR
assert_entity_counts(hass, Platform.SENSOR, 19, 14)
yield sensor_obj
Sensor.__config__.validate_assignment = True
@pytest.fixture(name="camera")
async def camera_fixture(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
mock_camera: Camera,
now: datetime,
):
"""Fixture for a single camera for testing the sensor platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.feature_flags.has_smart_detect = True
camera_obj.is_smart_detected = False
camera_obj.wired_connection_state = WiredConnectionState(phy_rate=1000)
camera_obj.wifi_connection_state = WifiConnectionState(
signal_quality=100, signal_strength=-50
)
camera_obj.stats.rx_bytes = 100.0
camera_obj.stats.tx_bytes = 100.0
camera_obj.stats.video.recording_start = now
camera_obj.stats.storage.used = 100.0
camera_obj.stats.storage.used = 100.0
camera_obj.stats.storage.rate = 100.0
camera_obj.voltage = 20.0
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.nvr.system_info.storage.devices = []
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
# 3 from all, 6 from camera, 12 NVR
assert_entity_counts(hass, Platform.SENSOR, 22, 14)
yield camera_obj
Camera.__config__.validate_assignment = True
async def test_sensor_setup_sensor(
hass: HomeAssistant, mock_entry: MockEntityFixture, sensor: Sensor
):
"""Test sensor entity setup for sensor devices."""
entity_registry = er.async_get(hass)
expected_values = ("10", "10.0", "10.0", "10.0", "none")
for index, description in enumerate(SENSE_SENSORS):
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, sensor, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# BLE signal
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, sensor, ALL_DEVICES_SENSORS[1]
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == "-50"
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_setup_sensor_none(
hass: HomeAssistant, mock_entry: MockEntityFixture, sensor_none: Sensor
):
"""Test sensor entity setup for sensor devices with no sensors enabled."""
entity_registry = er.async_get(hass)
expected_values = (
"10",
STATE_UNAVAILABLE,
STATE_UNAVAILABLE,
STATE_UNAVAILABLE,
STATE_UNAVAILABLE,
)
for index, description in enumerate(SENSE_SENSORS):
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, sensor_none, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_setup_nvr(
hass: HomeAssistant, mock_entry: MockEntityFixture, now: datetime
):
"""Test sensor entity setup for NVR device."""
mock_entry.api.bootstrap.reset_objects()
nvr: NVR = mock_entry.api.bootstrap.nvr
nvr.up_since = now
nvr.system_info.cpu.average_load = 50.0
nvr.system_info.cpu.temperature = 50.0
nvr.storage_stats.utilization = 50.0
nvr.system_info.memory.available = 50.0
nvr.system_info.memory.total = 100.0
nvr.storage_stats.storage_distribution.timelapse_recordings.percentage = 50.0
nvr.storage_stats.storage_distribution.continuous_recordings.percentage = 50.0
nvr.storage_stats.storage_distribution.detections_recordings.percentage = 50.0
nvr.storage_stats.storage_distribution.hd_usage.percentage = 50.0
nvr.storage_stats.storage_distribution.uhd_usage.percentage = 50.0
nvr.storage_stats.storage_distribution.free.percentage = 50.0
nvr.storage_stats.capacity = 50.0
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
# 2 from all, 4 from sense, 12 NVR
assert_entity_counts(hass, Platform.SENSOR, 12, 9)
entity_registry = er.async_get(hass)
expected_values = (
now.replace(second=0, microsecond=0).isoformat(),
"50.0",
"50.0",
"50.0",
"50.0",
"50.0",
"50.0",
"50.0",
"50",
)
for index, description in enumerate(NVR_SENSORS):
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is not description.entity_registry_enabled_default
assert entity.unique_id == unique_id
if not description.entity_registry_enabled_default:
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
expected_values = ("50.0", "50.0", "50.0")
for index, description in enumerate(NVR_DISABLED_SENSORS):
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is not description.entity_registry_enabled_default
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_nvr_missing_values(
hass: HomeAssistant, mock_entry: MockEntityFixture, now: datetime
):
"""Test NVR sensor sensors if no data available."""
mock_entry.api.bootstrap.reset_objects()
nvr: NVR = mock_entry.api.bootstrap.nvr
nvr.system_info.memory.available = None
nvr.system_info.memory.total = None
nvr.up_since = None
nvr.storage_stats.capacity = None
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
# 2 from all, 4 from sense, 12 NVR
assert_entity_counts(hass, Platform.SENSOR, 12, 9)
entity_registry = er.async_get(hass)
# Uptime
description = NVR_SENSORS[0]
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_UNKNOWN
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# Memory
description = NVR_SENSORS[8]
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == "0"
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# Memory
description = NVR_DISABLED_SENSORS[2]
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_UNKNOWN
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_setup_camera(
hass: HomeAssistant, mock_entry: MockEntityFixture, camera: Camera, now: datetime
):
"""Test sensor entity setup for camera devices."""
entity_registry = er.async_get(hass)
expected_values = (
now.replace(microsecond=0).isoformat(),
"100",
"100.0",
"20.0",
)
for index, description in enumerate(CAMERA_SENSORS):
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, camera, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is not description.entity_registry_enabled_default
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
expected_values = ("100", "100")
for index, description in enumerate(CAMERA_DISABLED_SENSORS):
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, camera, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is not description.entity_registry_enabled_default
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# Wired signal
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, camera, ALL_DEVICES_SENSORS[2]
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == "1000"
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# WiFi signal
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, camera, ALL_DEVICES_SENSORS[3]
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == "-50"
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# Detected Object
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, camera, MOTION_SENSORS[0]
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == OBJECT_TYPE_NONE
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
assert state.attributes[ATTR_EVENT_SCORE] == 0
async def test_sensor_update_motion(
hass: HomeAssistant, mock_entry: MockEntityFixture, camera: Camera, now: datetime
):
"""Test sensor motion entity."""
_, entity_id = ids_from_device_description(
Platform.SENSOR, camera, MOTION_SENSORS[0]
)
event = Event(
id="test_event_id",
type=EventType.SMART_DETECT,
start=now - timedelta(seconds=1),
end=None,
score=100,
smart_detect_types=[SmartDetectObjectType.PERSON],
smart_detect_event_ids=[],
camera_id=camera.id,
api=mock_entry.api,
)
new_bootstrap = copy(mock_entry.api.bootstrap)
new_camera = camera.copy()
new_camera.is_smart_detected = True
new_camera.last_smart_detect_event_id = event.id
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = event
new_bootstrap.cameras = {new_camera.id: new_camera}
new_bootstrap.events = {event.id: event}
mock_entry.api.bootstrap = new_bootstrap
mock_entry.api.ws_subscription(mock_msg)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.state == SmartDetectObjectType.PERSON.value
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
assert state.attributes[ATTR_EVENT_SCORE] == 100
async def test_sensor_update_alarm(
hass: HomeAssistant, mock_entry: MockEntityFixture, sensor: Sensor, now: datetime
):
"""Test sensor motion entity."""
_, entity_id = ids_from_device_description(
Platform.SENSOR, sensor, SENSE_SENSORS[4]
)
event_metadata = EventMetadata(sensor_id=sensor.id, alarm_type="smoke")
event = Event(
id="test_event_id",
type=EventType.SENSOR_ALARM,
start=now - timedelta(seconds=1),
end=None,
score=100,
smart_detect_types=[],
smart_detect_event_ids=[],
metadata=event_metadata,
api=mock_entry.api,
)
new_bootstrap = copy(mock_entry.api.bootstrap)
new_sensor = sensor.copy()
new_sensor.set_alarm_timeout()
new_sensor.last_alarm_event_id = event.id
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = event
new_bootstrap.sensors = {new_sensor.id: new_sensor}
new_bootstrap.events = {event.id: event}
mock_entry.api.bootstrap = new_bootstrap
mock_entry.api.ws_subscription(mock_msg)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.state == "smoke"
await time_changed(hass, 10)
|
{
"content_hash": "4a6c9f696261223921f6e36a8139aec7",
"timestamp": "",
"source": "github",
"line_count": 573,
"max_line_length": 85,
"avg_line_length": 31.717277486910994,
"alnum_prop": 0.680147463409266,
"repo_name": "GenericStudent/home-assistant",
"id": "1f5624c30a90ab7bcca0db694a751945794c0d30",
"size": "18174",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/unifiprotect/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length`
# characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current
# character.
import codecs
import re
try:
from .error import YAMLError, Mark
from .compat import text_type, binary_type, PY3
except (ImportError, ValueError): # for Jython
from ruamel.yaml.error import YAMLError, Mark
from ruamel.yaml.compat import text_type, binary_type, PY3
__all__ = ['Reader', 'ReaderError']
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, binary_type):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `str` object (PY2) / a `bytes` object (PY3),
# - a `unicode` object (PY2) / a `str` object (PY3),
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = u''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, text_type):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+u'\0'
elif isinstance(stream, binary_type):
self.name = "<byte string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in u'\n\x85\u2028\u2029' \
or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
self.line += 1
self.column = 0
elif ch != u'\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or
len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, binary_type):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile(
u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
if PY3:
character = self.raw_buffer[exc.start]
else:
character = exc.object[exc.start]
if self.stream is not None:
position = self.stream_pointer - \
len(self.raw_buffer) + exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += u'\0'
self.raw_buffer = None
break
def update_raw(self, size=None):
if size is None:
size = 4096 if PY3 else 1024
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
# try:
# import psyco
# psyco.bind(Reader)
# except ImportError:
# pass
|
{
"content_hash": "265670666260dccc339a2c22d99fda21",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 79,
"avg_line_length": 35.691943127962084,
"alnum_prop": 0.5406984464214579,
"repo_name": "naparuba/kunai",
"id": "376c6de8c600e1ac28a479dafdfd24402a617fac",
"size": "7548",
"binary": false,
"copies": "30",
"ref": "refs/heads/master",
"path": "opsbro/misc/internalyaml/ruamel/yaml/reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "487"
},
{
"name": "C",
"bytes": "345490"
},
{
"name": "C++",
"bytes": "29298"
},
{
"name": "CSS",
"bytes": "12718"
},
{
"name": "HTML",
"bytes": "12328"
},
{
"name": "JavaScript",
"bytes": "558040"
},
{
"name": "Makefile",
"bytes": "8523"
},
{
"name": "Python",
"bytes": "2180654"
},
{
"name": "Shell",
"bytes": "18255"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
}
|
import unittest
import sys
import pickle
from test import support
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class PickleTest:
# Helper to check picklability
def check_pickle(self, itorg, seq):
d = pickle.dumps(itorg)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), seq)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
self.assertFalse(seq[1:])
return
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(list(it), seq[1:])
class EnumerateTestCase(unittest.TestCase, PickleTest):
enum = enumerate
seq, res = 'abc', [(0,'a'), (1,'b'), (2,'c')]
def test_basicfunction(self):
self.assertEqual(type(self.enum(self.seq)), self.enum)
e = self.enum(self.seq)
self.assertEqual(iter(e), e)
self.assertEqual(list(self.enum(self.seq)), self.res)
self.enum.__doc__
def test_pickle(self):
self.check_pickle(self.enum(self.seq), self.res)
def test_getitemseqn(self):
self.assertEqual(list(self.enum(G(self.seq))), self.res)
e = self.enum(G(''))
self.assertRaises(StopIteration, next, e)
def test_iteratorseqn(self):
self.assertEqual(list(self.enum(I(self.seq))), self.res)
e = self.enum(I(''))
self.assertRaises(StopIteration, next, e)
def test_iteratorgenerator(self):
self.assertEqual(list(self.enum(Ig(self.seq))), self.res)
e = self.enum(Ig(''))
self.assertRaises(StopIteration, next, e)
def test_noniterable(self):
self.assertRaises(TypeError, self.enum, X(self.seq))
def test_illformediterable(self):
self.assertRaises(TypeError, self.enum, N(self.seq))
def test_exception_propagation(self):
self.assertRaises(ZeroDivisionError, list, self.enum(E(self.seq)))
def test_argumentcheck(self):
self.assertRaises(TypeError, self.enum) # no arguments
self.assertRaises(TypeError, self.enum, 1) # wrong type (not iterable)
self.assertRaises(TypeError, self.enum, 'abc', 'a') # wrong type
self.assertRaises(TypeError, self.enum, 'abc', 2, 3) # too many arguments
@support.cpython_only
def test_tuple_reuse(self):
# Tests an implementation detail where tuple is reused
# whenever nothing else holds a reference to it
self.assertEqual(len(set(map(id, list(enumerate(self.seq))))), len(self.seq))
self.assertEqual(len(set(map(id, enumerate(self.seq)))), min(1,len(self.seq)))
class MyEnum(enumerate):
pass
class SubclassTestCase(EnumerateTestCase):
enum = MyEnum
class TestEmpty(EnumerateTestCase):
seq, res = '', []
class TestBig(EnumerateTestCase):
seq = range(10,20000,2)
res = list(zip(range(20000), seq))
class TestReversed(unittest.TestCase, PickleTest):
def test_simple(self):
class A:
def __getitem__(self, i):
if i < 5:
return str(i)
raise StopIteration
def __len__(self):
return 5
for data in 'abc', range(5), tuple(enumerate('abc')), A(), range(1,17,5):
self.assertEqual(list(data)[::-1], list(reversed(data)))
self.assertRaises(TypeError, reversed, {})
# don't allow keyword arguments
self.assertRaises(TypeError, reversed, [], a=1)
def test_range_optimization(self):
x = range(1)
self.assertEqual(type(reversed(x)), type(iter(x)))
@support.cpython_only
def test_len(self):
# This is an implementation detail, not an interface requirement
from test.test_iterlen import len
for s in ('hello', tuple('hello'), list('hello'), range(5)):
self.assertEqual(len(reversed(s)), len(s))
r = reversed(s)
list(r)
self.assertEqual(len(r), 0)
class SeqWithWeirdLen:
called = False
def __len__(self):
if not self.called:
self.called = True
return 10
raise ZeroDivisionError
def __getitem__(self, index):
return index
r = reversed(SeqWithWeirdLen())
self.assertRaises(ZeroDivisionError, len, r)
def test_gc(self):
class Seq:
def __len__(self):
return 10
def __getitem__(self, index):
return index
s = Seq()
r = reversed(s)
s.r = r
def test_args(self):
self.assertRaises(TypeError, reversed)
self.assertRaises(TypeError, reversed, [], 'extra')
def test_bug1229429(self):
# this bug was never in reversed, it was in
# PyObject_CallMethod, and reversed_new calls that sometimes.
if not hasattr(sys, "getrefcount"):
return
def f():
pass
r = f.__reversed__ = object()
rc = sys.getrefcount(r)
for i in range(10):
try:
reversed(f)
except TypeError:
pass
else:
self.fail("non-callable __reversed__ didn't raise!")
self.assertEqual(rc, sys.getrefcount(r))
def test_objmethods(self):
# Objects must have __len__() and __getitem__() implemented.
class NoLen(object):
def __getitem__(self): return 1
nl = NoLen()
self.assertRaises(TypeError, reversed, nl)
class NoGetItem(object):
def __len__(self): return 2
ngi = NoGetItem()
self.assertRaises(TypeError, reversed, ngi)
def test_pickle(self):
for data in 'abc', range(5), tuple(enumerate('abc')), range(1,17,5):
self.check_pickle(reversed(data), list(data)[::-1])
class EnumerateStartTestCase(EnumerateTestCase):
def test_basicfunction(self):
e = self.enum(self.seq)
self.assertEqual(iter(e), e)
self.assertEqual(list(self.enum(self.seq)), self.res)
class TestStart(EnumerateStartTestCase):
enum = lambda self, i: enumerate(i, start=11)
seq, res = 'abc', [(11, 'a'), (12, 'b'), (13, 'c')]
class TestLongStart(EnumerateStartTestCase):
enum = lambda self, i: enumerate(i, start=sys.maxsize+1)
seq, res = 'abc', [(sys.maxsize+1,'a'), (sys.maxsize+2,'b'),
(sys.maxsize+3,'c')]
def test_main(verbose=None):
support.run_unittest(__name__)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(__name__)
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
|
{
"content_hash": "7de7dc505a8c33803f3c1070fefb90ad",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 86,
"avg_line_length": 29.53818181818182,
"alnum_prop": 0.570232672657885,
"repo_name": "mikehulluk/ProcessManager",
"id": "2e904cf8789f07c52efffa5d075907a3d9e1e738",
"size": "8123",
"binary": false,
"copies": "28",
"ref": "refs/heads/master",
"path": "www/js/brython/Lib/test/test_enumerate.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "258"
},
{
"name": "Groff",
"bytes": "21080"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "JavaScript",
"bytes": "4545555"
},
{
"name": "Python",
"bytes": "13703493"
},
{
"name": "R",
"bytes": "2918"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_admin_state_pac import TapiCommonAdminStatePac # noqa: F401,E501
from tapi_server.models.tapi_common_administrative_state import TapiCommonAdministrativeState # noqa: F401,E501
from tapi_server.models.tapi_common_capacity import TapiCommonCapacity # noqa: F401,E501
from tapi_server.models.tapi_common_capacity_pac import TapiCommonCapacityPac # noqa: F401,E501
from tapi_server.models.tapi_common_global_class import TapiCommonGlobalClass # noqa: F401,E501
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_common_lifecycle_state import TapiCommonLifecycleState # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server.models.tapi_common_operational_state import TapiCommonOperationalState # noqa: F401,E501
from tapi_server.models.tapi_topology_cost_characteristic import TapiTopologyCostCharacteristic # noqa: F401,E501
from tapi_server.models.tapi_topology_latency_characteristic import TapiTopologyLatencyCharacteristic # noqa: F401,E501
from tapi_server.models.tapi_topology_node_edge_point import TapiTopologyNodeEdgePoint # noqa: F401,E501
from tapi_server.models.tapi_topology_node_edge_point_ref import TapiTopologyNodeEdgePointRef # noqa: F401,E501
from tapi_server.models.tapi_topology_node_rule_group import TapiTopologyNodeRuleGroup # noqa: F401,E501
from tapi_server.models.tapi_topology_topology_ref import TapiTopologyTopologyRef # noqa: F401,E501
from tapi_server.models.tapi_topology_transfer_cost_pac import TapiTopologyTransferCostPac # noqa: F401,E501
from tapi_server.models.tapi_topology_transfer_integrity_pac import TapiTopologyTransferIntegrityPac # noqa: F401,E501
from tapi_server.models.tapi_topology_transfer_timing_pac import TapiTopologyTransferTimingPac # noqa: F401,E501
from tapi_server import util
class TapiTopologyNode(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, operational_state=None, lifecycle_state=None, administrative_state=None, available_capacity=None, total_potential_capacity=None, name=None, uuid=None, cost_characteristic=None, error_characteristic=None, unavailable_time_characteristic=None, server_integrity_process_characteristic=None, delivery_order_characteristic=None, repeat_delivery_characteristic=None, loss_characteristic=None, latency_characteristic=None, layer_protocol_name=None, encap_topology=None, owned_node_edge_point=None, node_rule_group=None, aggregated_node_edge_point=None): # noqa: E501
"""TapiTopologyNode - a model defined in OpenAPI
:param operational_state: The operational_state of this TapiTopologyNode. # noqa: E501
:type operational_state: TapiCommonOperationalState
:param lifecycle_state: The lifecycle_state of this TapiTopologyNode. # noqa: E501
:type lifecycle_state: TapiCommonLifecycleState
:param administrative_state: The administrative_state of this TapiTopologyNode. # noqa: E501
:type administrative_state: TapiCommonAdministrativeState
:param available_capacity: The available_capacity of this TapiTopologyNode. # noqa: E501
:type available_capacity: TapiCommonCapacity
:param total_potential_capacity: The total_potential_capacity of this TapiTopologyNode. # noqa: E501
:type total_potential_capacity: TapiCommonCapacity
:param name: The name of this TapiTopologyNode. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param uuid: The uuid of this TapiTopologyNode. # noqa: E501
:type uuid: str
:param cost_characteristic: The cost_characteristic of this TapiTopologyNode. # noqa: E501
:type cost_characteristic: List[TapiTopologyCostCharacteristic]
:param error_characteristic: The error_characteristic of this TapiTopologyNode. # noqa: E501
:type error_characteristic: str
:param unavailable_time_characteristic: The unavailable_time_characteristic of this TapiTopologyNode. # noqa: E501
:type unavailable_time_characteristic: str
:param server_integrity_process_characteristic: The server_integrity_process_characteristic of this TapiTopologyNode. # noqa: E501
:type server_integrity_process_characteristic: str
:param delivery_order_characteristic: The delivery_order_characteristic of this TapiTopologyNode. # noqa: E501
:type delivery_order_characteristic: str
:param repeat_delivery_characteristic: The repeat_delivery_characteristic of this TapiTopologyNode. # noqa: E501
:type repeat_delivery_characteristic: str
:param loss_characteristic: The loss_characteristic of this TapiTopologyNode. # noqa: E501
:type loss_characteristic: str
:param latency_characteristic: The latency_characteristic of this TapiTopologyNode. # noqa: E501
:type latency_characteristic: List[TapiTopologyLatencyCharacteristic]
:param layer_protocol_name: The layer_protocol_name of this TapiTopologyNode. # noqa: E501
:type layer_protocol_name: List[TapiCommonLayerProtocolName]
:param encap_topology: The encap_topology of this TapiTopologyNode. # noqa: E501
:type encap_topology: TapiTopologyTopologyRef
:param owned_node_edge_point: The owned_node_edge_point of this TapiTopologyNode. # noqa: E501
:type owned_node_edge_point: List[TapiTopologyNodeEdgePoint]
:param node_rule_group: The node_rule_group of this TapiTopologyNode. # noqa: E501
:type node_rule_group: List[TapiTopologyNodeRuleGroup]
:param aggregated_node_edge_point: The aggregated_node_edge_point of this TapiTopologyNode. # noqa: E501
:type aggregated_node_edge_point: List[TapiTopologyNodeEdgePointRef]
"""
self.openapi_types = {
'operational_state': TapiCommonOperationalState,
'lifecycle_state': TapiCommonLifecycleState,
'administrative_state': TapiCommonAdministrativeState,
'available_capacity': TapiCommonCapacity,
'total_potential_capacity': TapiCommonCapacity,
'name': List[TapiCommonNameAndValue],
'uuid': str,
'cost_characteristic': List[TapiTopologyCostCharacteristic],
'error_characteristic': str,
'unavailable_time_characteristic': str,
'server_integrity_process_characteristic': str,
'delivery_order_characteristic': str,
'repeat_delivery_characteristic': str,
'loss_characteristic': str,
'latency_characteristic': List[TapiTopologyLatencyCharacteristic],
'layer_protocol_name': List[TapiCommonLayerProtocolName],
'encap_topology': TapiTopologyTopologyRef,
'owned_node_edge_point': List[TapiTopologyNodeEdgePoint],
'node_rule_group': List[TapiTopologyNodeRuleGroup],
'aggregated_node_edge_point': List[TapiTopologyNodeEdgePointRef]
}
self.attribute_map = {
'operational_state': 'operational-state',
'lifecycle_state': 'lifecycle-state',
'administrative_state': 'administrative-state',
'available_capacity': 'available-capacity',
'total_potential_capacity': 'total-potential-capacity',
'name': 'name',
'uuid': 'uuid',
'cost_characteristic': 'cost-characteristic',
'error_characteristic': 'error-characteristic',
'unavailable_time_characteristic': 'unavailable-time-characteristic',
'server_integrity_process_characteristic': 'server-integrity-process-characteristic',
'delivery_order_characteristic': 'delivery-order-characteristic',
'repeat_delivery_characteristic': 'repeat-delivery-characteristic',
'loss_characteristic': 'loss-characteristic',
'latency_characteristic': 'latency-characteristic',
'layer_protocol_name': 'layer-protocol-name',
'encap_topology': 'encap-topology',
'owned_node_edge_point': 'owned-node-edge-point',
'node_rule_group': 'node-rule-group',
'aggregated_node_edge_point': 'aggregated-node-edge-point'
}
self._operational_state = operational_state
self._lifecycle_state = lifecycle_state
self._administrative_state = administrative_state
self._available_capacity = available_capacity
self._total_potential_capacity = total_potential_capacity
self._name = name
self._uuid = uuid
self._cost_characteristic = cost_characteristic
self._error_characteristic = error_characteristic
self._unavailable_time_characteristic = unavailable_time_characteristic
self._server_integrity_process_characteristic = server_integrity_process_characteristic
self._delivery_order_characteristic = delivery_order_characteristic
self._repeat_delivery_characteristic = repeat_delivery_characteristic
self._loss_characteristic = loss_characteristic
self._latency_characteristic = latency_characteristic
self._layer_protocol_name = layer_protocol_name
self._encap_topology = encap_topology
self._owned_node_edge_point = owned_node_edge_point
self._node_rule_group = node_rule_group
self._aggregated_node_edge_point = aggregated_node_edge_point
@classmethod
def from_dict(cls, dikt) -> 'TapiTopologyNode':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.topology.Node of this TapiTopologyNode. # noqa: E501
:rtype: TapiTopologyNode
"""
return util.deserialize_model(dikt, cls)
@property
def operational_state(self):
"""Gets the operational_state of this TapiTopologyNode.
:return: The operational_state of this TapiTopologyNode.
:rtype: TapiCommonOperationalState
"""
return self._operational_state
@operational_state.setter
def operational_state(self, operational_state):
"""Sets the operational_state of this TapiTopologyNode.
:param operational_state: The operational_state of this TapiTopologyNode.
:type operational_state: TapiCommonOperationalState
"""
self._operational_state = operational_state
@property
def lifecycle_state(self):
"""Gets the lifecycle_state of this TapiTopologyNode.
:return: The lifecycle_state of this TapiTopologyNode.
:rtype: TapiCommonLifecycleState
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""Sets the lifecycle_state of this TapiTopologyNode.
:param lifecycle_state: The lifecycle_state of this TapiTopologyNode.
:type lifecycle_state: TapiCommonLifecycleState
"""
self._lifecycle_state = lifecycle_state
@property
def administrative_state(self):
"""Gets the administrative_state of this TapiTopologyNode.
:return: The administrative_state of this TapiTopologyNode.
:rtype: TapiCommonAdministrativeState
"""
return self._administrative_state
@administrative_state.setter
def administrative_state(self, administrative_state):
"""Sets the administrative_state of this TapiTopologyNode.
:param administrative_state: The administrative_state of this TapiTopologyNode.
:type administrative_state: TapiCommonAdministrativeState
"""
self._administrative_state = administrative_state
@property
def available_capacity(self):
"""Gets the available_capacity of this TapiTopologyNode.
:return: The available_capacity of this TapiTopologyNode.
:rtype: TapiCommonCapacity
"""
return self._available_capacity
@available_capacity.setter
def available_capacity(self, available_capacity):
"""Sets the available_capacity of this TapiTopologyNode.
:param available_capacity: The available_capacity of this TapiTopologyNode.
:type available_capacity: TapiCommonCapacity
"""
self._available_capacity = available_capacity
@property
def total_potential_capacity(self):
"""Gets the total_potential_capacity of this TapiTopologyNode.
:return: The total_potential_capacity of this TapiTopologyNode.
:rtype: TapiCommonCapacity
"""
return self._total_potential_capacity
@total_potential_capacity.setter
def total_potential_capacity(self, total_potential_capacity):
"""Sets the total_potential_capacity of this TapiTopologyNode.
:param total_potential_capacity: The total_potential_capacity of this TapiTopologyNode.
:type total_potential_capacity: TapiCommonCapacity
"""
self._total_potential_capacity = total_potential_capacity
@property
def name(self):
"""Gets the name of this TapiTopologyNode.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiTopologyNode.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiTopologyNode.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiTopologyNode.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def uuid(self):
"""Gets the uuid of this TapiTopologyNode.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:return: The uuid of this TapiTopologyNode.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this TapiTopologyNode.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:param uuid: The uuid of this TapiTopologyNode.
:type uuid: str
"""
self._uuid = uuid
@property
def cost_characteristic(self):
"""Gets the cost_characteristic of this TapiTopologyNode.
The list of costs where each cost relates to some aspect of the TopologicalEntity. # noqa: E501
:return: The cost_characteristic of this TapiTopologyNode.
:rtype: List[TapiTopologyCostCharacteristic]
"""
return self._cost_characteristic
@cost_characteristic.setter
def cost_characteristic(self, cost_characteristic):
"""Sets the cost_characteristic of this TapiTopologyNode.
The list of costs where each cost relates to some aspect of the TopologicalEntity. # noqa: E501
:param cost_characteristic: The cost_characteristic of this TapiTopologyNode.
:type cost_characteristic: List[TapiTopologyCostCharacteristic]
"""
self._cost_characteristic = cost_characteristic
@property
def error_characteristic(self):
"""Gets the error_characteristic of this TapiTopologyNode.
Describes the degree to which the signal propagated can be errored. Applies to TDM systems as the errored signal will be propagated and not packet as errored packets will be discarded. # noqa: E501
:return: The error_characteristic of this TapiTopologyNode.
:rtype: str
"""
return self._error_characteristic
@error_characteristic.setter
def error_characteristic(self, error_characteristic):
"""Sets the error_characteristic of this TapiTopologyNode.
Describes the degree to which the signal propagated can be errored. Applies to TDM systems as the errored signal will be propagated and not packet as errored packets will be discarded. # noqa: E501
:param error_characteristic: The error_characteristic of this TapiTopologyNode.
:type error_characteristic: str
"""
self._error_characteristic = error_characteristic
@property
def unavailable_time_characteristic(self):
"""Gets the unavailable_time_characteristic of this TapiTopologyNode.
Describes the duration for which there may be no valid signal propagated. # noqa: E501
:return: The unavailable_time_characteristic of this TapiTopologyNode.
:rtype: str
"""
return self._unavailable_time_characteristic
@unavailable_time_characteristic.setter
def unavailable_time_characteristic(self, unavailable_time_characteristic):
"""Sets the unavailable_time_characteristic of this TapiTopologyNode.
Describes the duration for which there may be no valid signal propagated. # noqa: E501
:param unavailable_time_characteristic: The unavailable_time_characteristic of this TapiTopologyNode.
:type unavailable_time_characteristic: str
"""
self._unavailable_time_characteristic = unavailable_time_characteristic
@property
def server_integrity_process_characteristic(self):
"""Gets the server_integrity_process_characteristic of this TapiTopologyNode.
Describes the effect of any server integrity enhancement process on the characteristics of the TopologicalEntity. # noqa: E501
:return: The server_integrity_process_characteristic of this TapiTopologyNode.
:rtype: str
"""
return self._server_integrity_process_characteristic
@server_integrity_process_characteristic.setter
def server_integrity_process_characteristic(self, server_integrity_process_characteristic):
"""Sets the server_integrity_process_characteristic of this TapiTopologyNode.
Describes the effect of any server integrity enhancement process on the characteristics of the TopologicalEntity. # noqa: E501
:param server_integrity_process_characteristic: The server_integrity_process_characteristic of this TapiTopologyNode.
:type server_integrity_process_characteristic: str
"""
self._server_integrity_process_characteristic = server_integrity_process_characteristic
@property
def delivery_order_characteristic(self):
"""Gets the delivery_order_characteristic of this TapiTopologyNode.
Describes the degree to which packets will be delivered out of sequence. Does not apply to TDM as the TDM protocols maintain strict order. # noqa: E501
:return: The delivery_order_characteristic of this TapiTopologyNode.
:rtype: str
"""
return self._delivery_order_characteristic
@delivery_order_characteristic.setter
def delivery_order_characteristic(self, delivery_order_characteristic):
"""Sets the delivery_order_characteristic of this TapiTopologyNode.
Describes the degree to which packets will be delivered out of sequence. Does not apply to TDM as the TDM protocols maintain strict order. # noqa: E501
:param delivery_order_characteristic: The delivery_order_characteristic of this TapiTopologyNode.
:type delivery_order_characteristic: str
"""
self._delivery_order_characteristic = delivery_order_characteristic
@property
def repeat_delivery_characteristic(self):
"""Gets the repeat_delivery_characteristic of this TapiTopologyNode.
Primarily applies to packet systems where a packet may be delivered more than once (in fault recovery for example). It can also apply to TDM where several frames may be received twice due to switching in a system with a large differential propagation delay. # noqa: E501
:return: The repeat_delivery_characteristic of this TapiTopologyNode.
:rtype: str
"""
return self._repeat_delivery_characteristic
@repeat_delivery_characteristic.setter
def repeat_delivery_characteristic(self, repeat_delivery_characteristic):
"""Sets the repeat_delivery_characteristic of this TapiTopologyNode.
Primarily applies to packet systems where a packet may be delivered more than once (in fault recovery for example). It can also apply to TDM where several frames may be received twice due to switching in a system with a large differential propagation delay. # noqa: E501
:param repeat_delivery_characteristic: The repeat_delivery_characteristic of this TapiTopologyNode.
:type repeat_delivery_characteristic: str
"""
self._repeat_delivery_characteristic = repeat_delivery_characteristic
@property
def loss_characteristic(self):
"""Gets the loss_characteristic of this TapiTopologyNode.
Describes the acceptable characteristic of lost packets where loss may result from discard due to errors or overflow. Applies to packet systems and not TDM (as for TDM errored signals are propagated unless grossly errored and overflow/underflow turns into timing slips). # noqa: E501
:return: The loss_characteristic of this TapiTopologyNode.
:rtype: str
"""
return self._loss_characteristic
@loss_characteristic.setter
def loss_characteristic(self, loss_characteristic):
"""Sets the loss_characteristic of this TapiTopologyNode.
Describes the acceptable characteristic of lost packets where loss may result from discard due to errors or overflow. Applies to packet systems and not TDM (as for TDM errored signals are propagated unless grossly errored and overflow/underflow turns into timing slips). # noqa: E501
:param loss_characteristic: The loss_characteristic of this TapiTopologyNode.
:type loss_characteristic: str
"""
self._loss_characteristic = loss_characteristic
@property
def latency_characteristic(self):
"""Gets the latency_characteristic of this TapiTopologyNode.
The effect on the latency of a queuing process. This only has significant effect for packet based systems and has a complex characteristic. # noqa: E501
:return: The latency_characteristic of this TapiTopologyNode.
:rtype: List[TapiTopologyLatencyCharacteristic]
"""
return self._latency_characteristic
@latency_characteristic.setter
def latency_characteristic(self, latency_characteristic):
"""Sets the latency_characteristic of this TapiTopologyNode.
The effect on the latency of a queuing process. This only has significant effect for packet based systems and has a complex characteristic. # noqa: E501
:param latency_characteristic: The latency_characteristic of this TapiTopologyNode.
:type latency_characteristic: List[TapiTopologyLatencyCharacteristic]
"""
self._latency_characteristic = latency_characteristic
@property
def layer_protocol_name(self):
"""Gets the layer_protocol_name of this TapiTopologyNode.
none # noqa: E501
:return: The layer_protocol_name of this TapiTopologyNode.
:rtype: List[TapiCommonLayerProtocolName]
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name):
"""Sets the layer_protocol_name of this TapiTopologyNode.
none # noqa: E501
:param layer_protocol_name: The layer_protocol_name of this TapiTopologyNode.
:type layer_protocol_name: List[TapiCommonLayerProtocolName]
"""
self._layer_protocol_name = layer_protocol_name
@property
def encap_topology(self):
"""Gets the encap_topology of this TapiTopologyNode.
:return: The encap_topology of this TapiTopologyNode.
:rtype: TapiTopologyTopologyRef
"""
return self._encap_topology
@encap_topology.setter
def encap_topology(self, encap_topology):
"""Sets the encap_topology of this TapiTopologyNode.
:param encap_topology: The encap_topology of this TapiTopologyNode.
:type encap_topology: TapiTopologyTopologyRef
"""
self._encap_topology = encap_topology
@property
def owned_node_edge_point(self):
"""Gets the owned_node_edge_point of this TapiTopologyNode.
none # noqa: E501
:return: The owned_node_edge_point of this TapiTopologyNode.
:rtype: List[TapiTopologyNodeEdgePoint]
"""
return self._owned_node_edge_point
@owned_node_edge_point.setter
def owned_node_edge_point(self, owned_node_edge_point):
"""Sets the owned_node_edge_point of this TapiTopologyNode.
none # noqa: E501
:param owned_node_edge_point: The owned_node_edge_point of this TapiTopologyNode.
:type owned_node_edge_point: List[TapiTopologyNodeEdgePoint]
"""
self._owned_node_edge_point = owned_node_edge_point
@property
def node_rule_group(self):
"""Gets the node_rule_group of this TapiTopologyNode.
none # noqa: E501
:return: The node_rule_group of this TapiTopologyNode.
:rtype: List[TapiTopologyNodeRuleGroup]
"""
return self._node_rule_group
@node_rule_group.setter
def node_rule_group(self, node_rule_group):
"""Sets the node_rule_group of this TapiTopologyNode.
none # noqa: E501
:param node_rule_group: The node_rule_group of this TapiTopologyNode.
:type node_rule_group: List[TapiTopologyNodeRuleGroup]
"""
self._node_rule_group = node_rule_group
@property
def aggregated_node_edge_point(self):
"""Gets the aggregated_node_edge_point of this TapiTopologyNode.
none # noqa: E501
:return: The aggregated_node_edge_point of this TapiTopologyNode.
:rtype: List[TapiTopologyNodeEdgePointRef]
"""
return self._aggregated_node_edge_point
@aggregated_node_edge_point.setter
def aggregated_node_edge_point(self, aggregated_node_edge_point):
"""Sets the aggregated_node_edge_point of this TapiTopologyNode.
none # noqa: E501
:param aggregated_node_edge_point: The aggregated_node_edge_point of this TapiTopologyNode.
:type aggregated_node_edge_point: List[TapiTopologyNodeEdgePointRef]
"""
self._aggregated_node_edge_point = aggregated_node_edge_point
|
{
"content_hash": "59ed64befdcae1e70c3b56b6f3b16ea0",
"timestamp": "",
"source": "github",
"line_count": 602,
"max_line_length": 599,
"avg_line_length": 46.85880398671096,
"alnum_prop": 0.7022935942429721,
"repo_name": "OpenNetworkingFoundation/Snowmass-ONFOpenTransport",
"id": "887b4a851c53a095b80bf81095b791f92da05e17",
"size": "28226",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_topology_node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "653"
},
{
"name": "D",
"bytes": "2405"
},
{
"name": "HTML",
"bytes": "137234"
},
{
"name": "Python",
"bytes": "937060"
},
{
"name": "Shell",
"bytes": "4361"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2016, Marcelo Leal
Description: Simple Azure Media Services Python library
License: MIT (see LICENSE.txt file for details)
"""
import os
import json
import azurerm
import time
#import pytz
import logging
import datetime
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# ALL CODE IN THIS DIRECTOY (INCLUDING THIS FILE) ARE EXAMPLE CODES THAT WILL ACT ON YOUR
# AMS ACCOUNT. IT ASSUMES THAT THE AMS ACCOUNT IS CLEAN (e.g.: BRAND NEW), WITH NO DATA OR
# PRODUCTION CODE ON IT. DO NOT, AGAIN: DO NOT RUN ANY EXAMPLE CODE AGAINST PRODUCTION AMS
# ACCOUNT! IF YOU RUN ANY EXAMPLE CODE AGAINST YOUR PRODUCTION AMS ACCOUNT, YOU CAN LOSE
# DATA, AND/OR PUT YOUR AMS SERVICES IN A DEGRADED OR UNAVAILABLE STATE. BE WARNED!
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# Load Azure app defaults
try:
with open('config.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print("ERROR: Expecting config.json in current folder")
sys.exit()
account_name = configData['accountName']
account_key = configData['accountKey']
# Get the access token...
response = azurerm.get_ams_access_token(account_name, account_key)
resjson = response.json()
access_token = resjson["access_token"]
#Initialization...
print ("\n-----------------------= AMS Py =----------------------");
print ("Simple Python Library for Azure Media Services REST API");
print ("-------------------------------------------------------\n");
### list an asset access policies
print ("\n001 >>> Listing a Asset Access Policies")
response = azurerm.list_asset_accesspolicy(access_token)
if (response.status_code == 200):
resjson = response.json()
print("GET Status......................: " + str(response.status_code))
for ap in resjson['d']['results']:
print("Asset Access Policie Id.........: " + str(ap['Id']))
else:
print("GET Status: " + str(response.status_code) + " - Asset Access Policy Listing ERROR." + str(response.content))
|
{
"content_hash": "cd5505b88b68e45a00ff394371676508",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 123,
"avg_line_length": 42.3448275862069,
"alnum_prop": 0.5496742671009772,
"repo_name": "gbowerman/azurerm",
"id": "e2c0ed2c86d624c3a0c1b9e2b2759eef0508819b",
"size": "2456",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/media_services/list_access_policies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "Python",
"bytes": "235742"
}
],
"symlink_target": ""
}
|
"""Converter for DRAGNN checkpoint+master-spec files to TF SavedModels.
This script loads a DRAGNN model from a checkpoint and master-spec and saves it
to a TF SavedModel checkpoint. The checkpoint and master-spec together must
form a complete model - see the conll_checkpoint_converter.py for an example
of how to convert CONLL checkpoints, since they are not complete.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from google.protobuf import text_format
from dragnn.protos import spec_pb2
from dragnn.python import dragnn_model_saver_lib as saver_lib
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master_spec', None, 'Path to task context with '
'inputs and parameters for feature extractors.')
flags.DEFINE_string('params_path', None, 'Path to trained model parameters.')
flags.DEFINE_string('export_path', '', 'Output path for exported servo model.')
flags.DEFINE_bool('export_moving_averages', False,
'Whether to export the moving average parameters.')
def export(master_spec_path, params_path, export_path,
export_moving_averages):
"""Restores a model and exports it in SavedModel form.
This method loads a graph specified by the spec at master_spec_path and the
params in params_path. It then saves the model in SavedModel format to the
location specified in export_path.
Args:
master_spec_path: Path to a proto-text master spec.
params_path: Path to the parameters file to export.
export_path: Path to export the SavedModel to.
export_moving_averages: Whether to export the moving average parameters.
"""
graph = tf.Graph()
master_spec = spec_pb2.MasterSpec()
with tf.gfile.FastGFile(master_spec_path) as fin:
text_format.Parse(fin.read(), master_spec)
# Remove '/' if it exists at the end of the export path, ensuring that
# path utils work correctly.
stripped_path = export_path.rstrip('/')
saver_lib.clean_output_paths(stripped_path)
short_to_original = saver_lib.shorten_resource_paths(master_spec)
saver_lib.export_master_spec(master_spec, graph)
saver_lib.export_to_graph(master_spec, params_path, stripped_path, graph,
export_moving_averages)
saver_lib.export_assets(master_spec, short_to_original, stripped_path)
def main(unused_argv):
# Run the exporter.
export(FLAGS.master_spec, FLAGS.params_path,
FLAGS.export_path, FLAGS.export_moving_averages)
tf.logging.info('Export complete.')
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "41617a48518438553a257d3512d83962",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 37.357142857142854,
"alnum_prop": 0.7269598470363289,
"repo_name": "jiaphuan/models",
"id": "bb0170d8b638c68aeecf616cf72a0afc5fea9ff9",
"size": "3293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/syntaxnet/dragnn/python/dragnn_model_saver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1224262"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "71060"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "Python",
"bytes": "5957505"
},
{
"name": "Shell",
"bytes": "76858"
}
],
"symlink_target": ""
}
|
import uuid
import psycopg2.extras
from django import forms
from django.db import models
__all__ = ('CharField', 'EmailField', 'SlugField', 'URLField', 'UUIDField')
class CharField(models.Field):
def formfield(self, **kwargs):
defaults = {'widget': forms.TextInput}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
def db_type(self, connection=None):
if self.max_length:
return 'varchar(%s)' % self.max_length
return 'text'
class EmailField(CharField):
def formfield(self, **kwargs):
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class SlugField(CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('db_index', True)
super(SlugField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class URLField(CharField):
def __init__(self, verbose_name=None, name=None, verify_exists=True, **kwargs):
self.verify_exists = verify_exists
super(URLField, self).__init__(verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.URLField, 'verify_exists': self.verify_exists}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
# Register the adapter so we can use UUID objects.
psycopg2.extras.register_uuid()
class UUIDField(CharField):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', uuid.uuid4)
kwargs.setdefault('editable', not kwargs.get('primary_key', False))
super(UUIDField, self).__init__(*args, **kwargs)
def db_type(self, connection=None):
return 'uuid'
def get_db_prep_value(self, value):
return self.to_python(value)
def to_python(self, value):
if not value:
return None
if not isinstance(value, uuid.UUID):
value = uuid.UUID(value)
return value
|
{
"content_hash": "5f7d33b46272bd30f36a0e0675ee796a",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 86,
"avg_line_length": 29.653333333333332,
"alnum_prop": 0.6371402877697842,
"repo_name": "jpwatts/django-pgfields",
"id": "d2d05c264e367ef20d3690181ff697ed37403efe",
"size": "2224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pgfields/db/models/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3103"
}
],
"symlink_target": ""
}
|
"""Contrib NDArray API of MXNet."""
from __future__ import absolute_import
import math
import numpy as np
from ..context import current_context
from ..random import uniform
from ..base import _as_list
from . import ndarray
try:
from .gen_contrib import *
except ImportError:
pass
__all__ = ["rand_zipfian", "foreach", "while_loop", "cond", "isinf", "isfinite", "isnan"]
# pylint: disable=line-too-long
def rand_zipfian(true_classes, num_sampled, range_max, ctx=None):
"""Draw random samples from an approximately log-uniform or Zipfian distribution.
This operation randomly samples *num_sampled* candidates the range of integers [0, range_max).
The elements of sampled_candidates are drawn with replacement from the base distribution.
The base distribution for this operator is an approximately log-uniform or Zipfian distribution:
P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
This sampler is useful when the true classes approximately follow such a distribution.
For example, if the classes represent words in a lexicon sorted in decreasing order of \
frequency. If your classes are not ordered by decreasing frequency, do not use this op.
Additionaly, it also returns the number of times each of the \
true classes and the sampled classes is expected to occur.
Parameters
----------
true_classes : NDArray
A 1-D NDArray of the target classes.
num_sampled: int
The number of classes to randomly sample.
range_max: int
The number of possible classes.
ctx : Context
Device context of output. Default is current context.
Returns
-------
samples: NDArray
The sampled candidate classes in 1-D `int64` dtype.
expected_count_true: NDArray
The expected count for true classes in 1-D `float64` dtype.
expected_count_sample: NDArray
The expected count for sampled candidates in 1-D `float64` dtype.
Examples
--------
>>> true_cls = mx.nd.array([3])
>>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5)
>>> samples
[1 3 3 3]
<NDArray 4 @cpu(0)>
>>> exp_count_true
[ 0.12453879]
<NDArray 1 @cpu(0)>
>>> exp_count_sample
[ 0.22629439 0.12453879 0.12453879 0.12453879]
<NDArray 4 @cpu(0)>
"""
if ctx is None:
ctx = current_context()
log_range = math.log(range_max + 1)
rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx)
# make sure sampled_classes are in the range of [0, range_max)
sampled_classes = (rand.exp() - 1).astype('int64') % range_max
true_cls = true_classes.as_in_context(ctx).astype('float64')
expected_count_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range * num_sampled
# cast sampled classes to fp64 to avoid interget division
sampled_cls_fp64 = sampled_classes.astype('float64')
expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
expected_count_sampled = expected_prob_sampled * num_sampled
return sampled_classes, expected_count_true, expected_count_sampled
# pylint: enable=line-too-long
def _flatten(args, inout_str):
if isinstance(args, ndarray.NDArray):
return [args], int(0)
assert isinstance(args, (list, tuple)), \
"%s must be (nested) list of NDArray, " \
"but got %s of type %s"%(inout_str, str(args), str(type(args)))
flat = []
fmts = []
for i in args:
arg, fmt = _flatten(i, inout_str)
flat.extend(arg)
fmts.append(fmt)
return flat, fmts
def _regroup(args, fmt):
if isinstance(fmt, int):
if fmt == 0:
return args[0], args[1:]
return args[:fmt], args[fmt:]
assert isinstance(args, (list, tuple)), \
"output must be (nested) list of NDArray, " \
"but got %s of type %s"%(str(args), str(type(args)))
ret = []
for i in fmt:
res, args = _regroup(args, i)
ret.append(res)
return ret, args
def foreach(body, data, init_states):
"""Run a for loop with user-defined computation over NDArrays on dimension 0.
This operator simulates a for loop and body has the computation for an iteration
of the for loop. It runs the computation in body on each slice from the input
NDArrays.
body takes two arguments as input and outputs a tuple of two elements,
as illustrated below::
out, states = body(data1, states)
data1 can be either an NDArray or a list of NDArrays. If data is an NDArray,
data1 is an NDArray. Otherwise, data1 is a list of NDArrays and has the same
size as data. states is a list of NDArrays and have the same size as init_states.
Similarly, out can be either an NDArray or a list of NDArrays, which are concatenated
as the first output of foreach; states from the last execution of body
are the second output of foreach.
The computation done by this operator is equivalent to the pseudo code below
when the input data is NDArray::
states = init_states
outs = []
for i in data.shape[0]:
s = data[i]
out, states = body(s, states)
outs.append(out)
outs = stack(*outs)
Parameters
----------
body : a Python function.
Define computation in an iteration.
data: an NDArray or a list of NDArrays.
The input data.
init_states: an NDArray or nested lists of NDArrays.
The initial values of the loop states.
name: string.
The name of the operator.
Returns
-------
outputs: an NDArray or nested lists of NDArrays.
The output data concatenated from the output of all iterations.
states: an NDArray or nested lists of NDArrays.
The loop states in the last iteration.
Examples
--------
>>> step = lambda data, states: (data + states[0], [states[0] * 2])
>>> data = mx.nd.random.uniform(shape=(2, 10))
>>> states = [mx.nd.random.uniform(shape=(10))]
>>> outs, states = mx.nd.contrib.foreach(step, data, states)
"""
def check_input(inputs, in_type, msg):
is_NDArray_or_list = True
if isinstance(inputs, list):
for i in inputs:
if not isinstance(i, in_type):
is_NDArray_or_list = False
break
else:
is_NDArray_or_list = isinstance(inputs, in_type)
assert is_NDArray_or_list, msg
flatten, _ = _flatten(data, "foreach input")
check_input(flatten, ndarray.NDArray,
"data should be an NDArray or a nested list of NDArrays")
flatten, _ = _flatten(init_states, "foreach states")
check_input(flatten, ndarray.NDArray,
"init_states should be an NDArray or a nested list of NDArrays")
not_data_list = isinstance(data, ndarray.NDArray)
num_iters = data.shape[0] if not_data_list else data[0].shape[0]
states = init_states
outputs = []
for i in range(num_iters):
if not_data_list:
eles = data[i]
else:
eles = [d[i] for d in data]
outs, states = body(eles, states)
outs, out_fmt = _flatten(outs, "foreach output")
outputs.append(outs)
outputs = zip(*outputs)
tmp_outputs = []
for out in outputs:
tmp_outputs.append(ndarray.op.stack(*out))
outputs = tmp_outputs
outputs, _ = _regroup(outputs, out_fmt)
return (outputs, states)
def while_loop(cond, func, loop_vars, max_iterations=None):
"""Run a while loop with user-defined computation and loop condition.
This operator simulates a while loop which iterately does customized computation
as long as the condition is satisfied.
`loop_vars` is a list of NDArrays on which the computation uses.
`cond` is a user-defined function, used as the loop condition.
It consumes `loop_vars`, and produces a scalar MXNet NDArray,
indicating the termination of the loop.
The loop ends when `cond` returns false (zero).
The `cond` is variadic, and its signature should be
`cond(*loop_vars) => NDArray`.
`func` is a user-defined function, used as the loop body.
It also consumes `loop_vars`, and produces `step_output` and `new_loop_vars` at each step.
In each step, `step_output` should contain the same number elements.
Through all steps, the i-th element of `step_output` should have the same shape and dtype.
Also, `new_loop_vars` should contain the same number of elements as `loop_vars`,
and the corresponding element should have the same shape and dtype.
The `func` is variadic, and its signature should be
`func(*loop_vars) =>
(NDArray or nested List[NDArray] step_output, NDArray or nested List[NDArray] new_loop_vars)`.
`max_iterations` is a scalar that defines the maximum number of iterations allowed.
This function returns two lists.
The first list has the length of `|step_output|`,
in which the i-th element are all i-th elements of
`step_output` from all steps, stacked along axis 0.
The second list has the length of `|loop_vars|`,
which represents final states of loop variables.
.. warning::
For now, the axis 0 of all NDArrays in the first list are `max_iterations`,
due to lack of dynamic shape inference.
.. warning::
When `cond` is never satisfied, we assume `step_output` is empty,
because it cannot be inferred. This is different from the symbolic version.
Parameters
----------
cond: a Python function.
The loop condition.
func: a Python function.
The loop body.
loop_vars: an NDArray or nested lists of NDArrays.
The initial values of the loop variables.
max_iterations: a python int.
Maximum number of iterations.
Returns
------
outputs: an NDArray or nested lists of NDArrays
stacked output from each step
states: an NDArray or nested lists of NDArrays
final state
Examples
--------
>>> cond = lambda i, s: i <= 5
>>> func = lambda i, s: ([i + s], [i + 1, s + i])
>>> loop_vars = (mx.nd.array([0], dtype="int64"), mx.nd.array([1], dtype="int64"))
>>> outputs, states = mx.nd.contrib.while_loop(cond, func, loop_vars, max_iterations=10)
>>> outputs
[
[[ 1]
[ 2]
[ 4]
[ 7]
[11]
[16]
[...] # undefined value
[...]
[...]
[...]]
<NDArray 6x1 @cpu(0)>]
>>> states
[
[6]
<NDArray 1 @cpu(0)>,
[16]
<NDArray 1 @cpu(0)>]
"""
def _to_python_scalar(inputs, type_, name):
"""Converts "inputs", possibly typed mxnet NDArray, a numpy ndarray, other python types,
to the given type
"""
if isinstance(inputs, ndarray.NDArray):
inputs = inputs.asscalar()
try:
inputs = type_(inputs)
except:
raise ValueError("Cannot convert %s to python %s" % (name, type_.__name__))
return inputs
def _func_wrapper(loop_vars):
"""This wrapper unifies
"func: loop_vars -> new_loop_vars"
and "func: loop_vars -> (step_output, new_loop_vars)"
into "func: loop_vars -> (None or tuple of step_outputs, tuple of new_loop_vars)
"""
step_output, new_loop_vars = func(*loop_vars)
if step_output is None:
step_output = []
if new_loop_vars is None:
new_loop_vars = []
if isinstance(step_output, tuple):
step_output = list(step_output)
if isinstance(new_loop_vars, tuple):
new_loop_vars = list(new_loop_vars)
new_loop_vars = _as_list(new_loop_vars)
if len(loop_vars) != len(new_loop_vars):
raise ValueError("The length of loop_vars should be consistent during the loop")
return step_output, new_loop_vars
if max_iterations is None:
raise ValueError("max_iterations should be specified")
max_iterations = _to_python_scalar(max_iterations, int, "max_iteration")
# It should be work as fine if loop_vars are empty I guess,
# but it is semantically unnecessary to include this case.
if len(loop_vars) == 0:
raise ValueError("loop_vars should contain at least one element")
steps = 0
outputs = []
# there might not be an iteration.
out_fmt = None
not_loop_var_list = isinstance(loop_vars, ndarray.NDArray)
loop_vars = _as_list(loop_vars)
while steps < max_iterations and \
_to_python_scalar(cond(*loop_vars), bool, "Return value of cond"): # loop condition
step_output, loop_vars = _func_wrapper(loop_vars)
step_output, out_fmt = _flatten(step_output, "while output")
outputs.append(step_output)
steps += 1
if len(outputs) != steps or len(step_output) != len(outputs[0]):
raise ValueError("Number of elements in step_output should be the same in each step")
stacked_outputs = []
for i_th, items in enumerate(zip(*outputs), 1):
# `mx.ndarray.pad` only support 4-D or 5-D inputs for now
# so we could not use it.
items = [x.expand_dims(0) for x in items]
if steps != max_iterations and items:
pad_shape = [max_iterations - steps] + list(items[0].shape[1: ])
pad = ndarray.empty(
shape=pad_shape,
ctx=items[0].context,
dtype=items[0].dtype,
)
items = list(items) + [pad]
try:
stacked_outputs.append(ndarray.op.concat(*items, dim=0))
except ValueError:
raise ValueError("\n".join(
["Shapes of %d-th elements in step_outputs are inconsistent, which are:" % i_th] +
[" Step %d, shape is %s" % (i, str(x.shape)) for i, x in enumerate(items)]
))
if out_fmt is not None:
stacked_outputs, _ = _regroup(stacked_outputs, out_fmt)
if not_loop_var_list:
loop_vars = loop_vars[0]
return stacked_outputs, loop_vars
def cond(pred, then_func, else_func):
"""Run an if-then-else using user-defined condition and computation
This operator simulates a if-like branch which chooses to do one of
the two customized computations according to the specified condition.
`pred` is a scalar MXNet NDArray,
indicating which branch of computation should be used.
`then_func` is a user-defined function, used as computation of the then branch.
It produces `outputs`, which is a list of NDArrays.
The signature of `then_func` should be
`then_func() => NDArray or nested List[NDArray]`.
`else_func` is a user-defined function, used as computation of the else branch.
It produces `outputs`, which is a list of NDArrays.
The signature of `else_func` should be
`else_func() => NDArray or nested List[NDArray]`.
The `outputs` produces by `then_func` and `else_func` should have the same number
of elements, all of which should be in the same shape, of the same dtype and stype.
This function returns a list of symbols, representing the computation result.
Parameters
----------
pred: a MXNet NDArray representing a scalar.
The branch condition.
then_func: a Python function.
The computation to be executed if `pred` is true.
else_func: a Python function.
The computation to be executed if `pred` is false.
Returns
-------
outputs: an NDArray or nested lists of NDArrays, representing the result of computation.
Examples
--------
>>> a, b = mx.nd.array([1]), mx.nd.array([2])
>>> pred = a * b < 5
>>> then_func = lambda: (a + 5) * (b + 5)
>>> else_func = lambda: (a - 5) * (b - 5)
>>> outputs = mx.nd.contrib.cond(pred, then_func, else_func)
>>> outputs[0]
[42.]
<NDArray 1 @cpu(0)>
"""
def _to_python_scalar(inputs, type_, name):
"""Converts "inputs", possibly typed mxnet NDArray, a numpy ndarray, other python types,
to the given type
"""
if hasattr(inputs, "asscalar"):
inputs = inputs.asscalar()
try:
inputs = type_(inputs)
except:
raise ValueError("Cannot convert %s to python %s" % (name, type_.__name__))
return inputs
branch = _to_python_scalar(pred, bool, "pred")
if branch:
return then_func()
else:
return else_func()
def isinf(data):
"""Performs an element-wise check to determine if the NDArray contains an infinite element
or not.
Parameters
----------
input : NDArray
An N-D NDArray.
Returns
-------
output: NDArray
The output NDarray, with same shape as input, where 1 indicates the array element is
equal to positive or negative infinity and 0 otherwise.
Examples
--------
>>> data = mx.nd.array([np.inf, -np.inf, np.NINF, -1])
>>> output = mx.nd.contrib.isinf(data)
>>> output
[1. 1. 1. 0.]
<NDArray 4 @cpu(0)>
"""
return data.abs() == np.inf
def isfinite(data):
"""Performs an element-wise check to determine if the NDArray contains an infinite element
or not.
Parameters
----------
input : NDArray
An N-D NDArray.
Returns
-------
output: NDArray
The output NDarray, with same shape as input, where 1 indicates the array element is
finite i.e. not equal to positive or negative infinity and 0 in places where it is
positive or negative infinity.
Examples
--------
>>> data = mx.nd.array([np.inf, -np.inf, np.NINF, -1])
>>> output = mx.nd.contrib.isfinite(data)
>>> output
[0. 0. 0. 1.]
<NDArray 4 @cpu(0)>
"""
is_data_not_nan = data == data # pylint: disable=comparison-with-itself
is_data_not_infinite = data.abs() != np.inf
return ndarray.logical_and(is_data_not_infinite, is_data_not_nan)
def isnan(data):
"""Performs an element-wise check to determine if the NDArray contains a NaN element
or not.
Parameters
----------
input : NDArray
An N-D NDArray.
Returns
-------
output: NDArray
The output NDarray, with same shape as input, where 1 indicates the array element is
NaN i.e. Not a Number and 0 otherwise.
Examples
--------
>>> data = mx.nd.array([np.nan, -1])
>>> output = mx.nd.contrib.isnan(data)
>>> output
[1. 0.]
<NDArray 2 @cpu(0)>
"""
return data != data # pylint: disable=comparison-with-itself
def adamw_update(weight, grad, mean, var, rescale_grad, lr, eta, beta1=0.9, beta2=0.999,
epsilon=1e-8, wd=0, clip_gradient=-1, out=None, name=None, **kwargs):
if not isinstance(rescale_grad, ndarray.NDArray):
rescale_grad = ndarray.full(shape=(1,), val=rescale_grad, ctx=weight.context)
else:
rescale_grad = rescale_grad.as_in_context(weight.context)
return ndarray._internal._adamw_update(weight=weight, grad=grad, mean=mean, var=var,
rescale_grad=rescale_grad, lr=lr, eta=eta,
beta1=beta1, beta2=beta2, epsilon=epsilon,
wd=wd, clip_gradient=clip_gradient, out=out,
name=name, **kwargs)
def mp_adamw_update(weight, grad, mean, var, weight32, rescale_grad, lr, eta, beta1=0.9,
beta2=0.999, epsilon=1e-8, wd=0, clip_gradient=-1, out=None,
name=None, **kwargs):
if not isinstance(rescale_grad, ndarray.NDArray):
rescale_grad = ndarray.full(shape=(1,), val=rescale_grad, ctx=weight.context)
else:
rescale_grad = rescale_grad.as_in_context(weight.context)
return ndarray._internal._mp_adamw_update(weight=weight, grad=grad, mean=mean, var=var,
weight32=weight32,
rescale_grad=rescale_grad, lr=lr, eta=eta,
beta1=beta1, beta2=beta2, epsilon=epsilon,
wd=wd, clip_gradient=clip_gradient, out=out,
name=name, **kwargs)
|
{
"content_hash": "e3611a84c563ad3536d7101e2e70dfab",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 100,
"avg_line_length": 36.846014492753625,
"alnum_prop": 0.6134519887900094,
"repo_name": "reminisce/mxnet",
"id": "601bc682db3862a8e2cae21d598baf551908457a",
"size": "21220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mxnet/ndarray/contrib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "215572"
},
{
"name": "C++",
"bytes": "7680259"
},
{
"name": "CMake",
"bytes": "99958"
},
{
"name": "Clojure",
"bytes": "622688"
},
{
"name": "Cuda",
"bytes": "970884"
},
{
"name": "Dockerfile",
"bytes": "85151"
},
{
"name": "Groovy",
"bytes": "122800"
},
{
"name": "HTML",
"bytes": "40277"
},
{
"name": "Java",
"bytes": "205196"
},
{
"name": "Julia",
"bytes": "436326"
},
{
"name": "Jupyter Notebook",
"bytes": "3660387"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "201597"
},
{
"name": "Perl",
"bytes": "1550163"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "13786"
},
{
"name": "Python",
"bytes": "7842403"
},
{
"name": "R",
"bytes": "357807"
},
{
"name": "Scala",
"bytes": "1305036"
},
{
"name": "Shell",
"bytes": "427407"
},
{
"name": "Smalltalk",
"bytes": "3497"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.