text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from .. import Availability, Class, Constant, Define, Method, Parameter, Type
gx_class = Class('DATAMINE',
doc="""
:class:`DATAMINE` functions provide an interface to Datamine Software Limited files.
See also :class:`GIS` for various other Datamine-specific functions.
""",
notes="None.")
gx_defines = [
Define('GIS_DMTYPE',
doc="Datamine file types",
constants=[
Constant('GIS_DMTYPE_STRING', value='2', type=Type.INT32_T),
Constant('GIS_DMTYPE_WIREFRAME_TR', value='8', type=Type.INT32_T),
Constant('GIS_DMTYPE_DTM', value='16', type=Type.INT32_T),
Constant('GIS_DMTYPE_BLOCKMODEL', value='32', type=Type.INT32_T),
Constant('GIS_DMTYPE_WIREFRAME_PT', value='64', type=Type.INT32_T),
Constant('GIS_DMTYPE_POINTDATA', value='1024', type=Type.INT32_T)
])]
gx_methods = {
'Miscellaneous': [
Method('CreateVoxel_DATAMINE', module='geoengine.interoperability', version='6.3.0',
availability=Availability.LICENSED,
doc="Create a Geosoft Voxel file from a Datamine block model file.",
notes="Create a Geosoft Voxel file from a Datamine block model file.",
return_type=Type.VOID,
parameters = [
Parameter('file', type=Type.STRING,
doc="Datamine file name"),
Parameter('field', type=Type.STRING,
doc="Field to use for data"),
Parameter('ipj', type="IPJ",
doc="Projection to set"),
Parameter('meta', type="META",
doc=":class:`META` to set"),
Parameter('voxel', type=Type.STRING,
doc="Output voxel file name")
]),
Method('NumericFieldLST_DATAMINE', module='geoengine.interoperability', version='6.3.0',
availability=Availability.LICENSED,
doc="Return a :class:`LST` containing the non-standard numeric fields in a Datamine file.",
notes="""
At this time, only :const:`GIS_DMTYPE_BLOCKMODEL` files are supported.
The field names go in the name part, and field indices (1 to N)
in the value part.
""",
return_type=Type.VOID,
parameters = [
Parameter('file', type=Type.STRING,
doc="Datamine file name"),
Parameter('lst', type="LST",
doc=":class:`LST` to populate")
])
]
}
|
{
"content_hash": "639d220d80642cb873833cf08e62a82e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 106,
"avg_line_length": 44.854838709677416,
"alnum_prop": 0.5142035239122618,
"repo_name": "GeosoftInc/gxapi",
"id": "e2cd49b38cef55f0c4f07c7bda9be2da4c4ed7b5",
"size": "2781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spec/core/DATAMINE.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "C++",
"bytes": "9250"
},
{
"name": "Objective-C",
"bytes": "485"
},
{
"name": "Python",
"bytes": "4111365"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1
async def sample_delete_data_labeling_job():
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteDataLabelingJobRequest(
name="name_value",
)
# Make the request
operation = client.delete_data_labeling_job(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteDataLabelingJob_async]
|
{
"content_hash": "bb0ce0c30f0ef286d193218e3fd3d23e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 81,
"avg_line_length": 26.043478260869566,
"alnum_prop": 0.7212020033388982,
"repo_name": "googleapis/python-aiplatform",
"id": "5393c948653f93ec5d913c7543bb986a318b0f3e",
"size": "1629",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
from app import app
from flask import render_template
@app.route("/")
def servicos():
return render_template("servicos.html")
|
{
"content_hash": "326e77579d587002c0b3f34d6db7b99d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 43,
"avg_line_length": 18.857142857142858,
"alnum_prop": 0.7272727272727273,
"repo_name": "h01000110/gerenciador-oficina-web",
"id": "c08299378f7a69b98e6c04bd6e0b98c3533a6be8",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/controllers/servicos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9352"
},
{
"name": "HTML",
"bytes": "18902"
},
{
"name": "JavaScript",
"bytes": "5830"
},
{
"name": "Python",
"bytes": "11432"
}
],
"symlink_target": ""
}
|
from __future__ import division
import re
# This is a naive text summarization algorithm
# Created by Shlomi Babluki
# April, 2013
class SummaryTool(object):
# Naive method for splitting a text into sentences
def split_content_to_sentences(self, content):
content = content.replace("\n", ". ")
return content.split(". ")
# Naive method for splitting a text into paragraphs
def split_content_to_paragraphs(self, content):
return content.split("\n\n")
# Caculate the intersection between 2 sentences
def sentences_intersection(self, sent1, sent2):
# split the sentence into words/tokens
s1 = set(sent1.split(" "))
s2 = set(sent2.split(" "))
# If there is not intersection, just return 0
# if (len(s1) + len(s2)) == 0:
if len(s1.intersection(s2)) == 0:
return 0
# We normalize the result by the average number of words
return len(s1.intersection(s2)) / ((len(s1) + len(s2)) / 2)
# Format a sentence - remove all non-alphbetic chars from the sentence
# We'll use the formatted sentence as a key in our sentences dictionary
def format_sentence(self, sentence):
sentence = re.sub(r'\W+', '', sentence)
return sentence
# Convert the content into a dictionary <K, V>
# k = The formatted sentence
# V = The rank of the sentence
def get_senteces_ranks(self, content):
# Split the content into sentences
sentences = self.split_content_to_sentences(content)
# Calculate the intersection of every two sentences
n = len(sentences)
values = [[0 for x in xrange(n)] for x in xrange(n)]
for i in range(0, n):
for j in range(0, n):
values[i][j] = self.sentences_intersection(sentences[i], sentences[j])
# Build the sentences dictionary
# The score of a sentences is the sum of all its intersection
sentences_dic = {}
for i in range(0, n):
score = 0
for j in range(0, n):
if i == j:
continue
score += values[i][j]
sentences_dic[self.format_sentence(sentences[i])] = score
return sentences_dic
# Return the best sentence in a paragraph
def get_best_sentence(self, paragraph, sentences_dic):
# Split the paragraph into sentences
sentences = self.split_content_to_sentences(paragraph)
# Ignore short paragraphs
if len(sentences) < 2:
return ""
# Get the best sentence according to the sentences dictionary
best_sentence = ""
max_value = 0
for s in sentences:
strip_s = self.format_sentence(s)
if strip_s:
if sentences_dic[strip_s] > max_value:
max_value = sentences_dic[strip_s]
best_sentence = s
return best_sentence
# Build the summary
def get_summary(self, title, content, sentences_dic):
# Split the content into paragraphs
paragraphs = self.split_content_to_paragraphs(content)
# Add the title
summary = []
summary.append(title.strip())
summary.append("")
# Add the best sentence from each paragraph
for p in paragraphs:
sentence = self.get_best_sentence(p, sentences_dic).strip()
if sentence:
summary.append(sentence)
return ("\n").join(summary)
# Main method, just run "python summary_tool.py"
def main():
# Demo
# Content from: "http://thenextweb.com/apps/2013/03/21/swayy-discover-curate-content/"
title = """
Swayy is a beautiful new dashboard for discovering and curating online content [Invites]
"""
content = """
Lior Degani, the Co-Founder and head of Marketing of Swayy, pinged me last week when I was in California to tell me about his startup and give me beta access. I heard his pitch and was skeptical. I was also tired, cranky and missing my kids – so my frame of mind wasn’t the most positive.
I went into Swayy to check it out, and when it asked for access to my Twitter and permission to tweet from my account, all I could think was, “If this thing spams my Twitter account I am going to bitch-slap him all over the Internet.” Fortunately that thought stayed in my head, and not out of my mouth.
One week later, I’m totally addicted to Swayy and glad I said nothing about the spam (it doesn’t send out spam tweets but I liked the line too much to not use it for this article). I pinged Lior on Facebook with a request for a beta access code for TNW readers. I also asked how soon can I write about it. It’s that good. Seriously. I use every content curation service online. It really is That Good.
What is Swayy? It’s like Percolate and LinkedIn recommended articles, mixed with trending keywords for the topics you find interesting, combined with an analytics dashboard that shows the trends of what you do and how people react to it. I like it for the simplicity and accuracy of the content curation. Everything I’m actually interested in reading is in one place – I don’t have to skip from another major tech blog over to Harvard Business Review then hop over to another major tech or business blog. It’s all in there. And it has saved me So Much Time
After I decided that I trusted the service, I added my Facebook and LinkedIn accounts. The content just got That Much Better. I can share from the service itself, but I generally prefer reading the actual post first – so I end up sharing it from the main link, using Swayy more as a service for discovery.
I’m also finding myself checking out trending keywords more often (more often than never, which is how often I do it on Twitter.com).
The analytics side isn’t as interesting for me right now, but that could be due to the fact that I’ve barely been online since I came back from the US last weekend. The graphs also haven’t given me any particularly special insights as I can’t see which post got the actual feedback on the graph side (however there are numbers on the Timeline side.) This is a Beta though, and new features are being added and improved daily. I’m sure this is on the list. As they say, if you aren’t launching with something you’re embarrassed by, you’ve waited too long to launch.
It was the suggested content that impressed me the most. The articles really are spot on – which is why I pinged Lior again to ask a few questions:
How do you choose the articles listed on the site? Is there an algorithm involved? And is there any IP?
Yes, we’re in the process of filing a patent for it. But basically the system works with a Natural Language Processing Engine. Actually, there are several parts for the content matching, but besides analyzing what topics the articles are talking about, we have machine learning algorithms that match you to the relevant suggested stuff. For example, if you shared an article about Zuck that got a good reaction from your followers, we might offer you another one about Kevin Systrom (just a simple example).
Who came up with the idea for Swayy, and why? And what’s your business model?
Our business model is a subscription model for extra social accounts (extra Facebook / Twitter, etc) and team collaboration.
The idea was born from our day-to-day need to be active on social media, look for the best content to share with our followers, grow them, and measure what content works best.
Who is on the team?
Ohad Frankfurt is the CEO, Shlomi Babluki is the CTO and Oz Katz does Product and Engineering, and I [Lior Degani] do Marketing. The four of us are the founders. Oz and I were in 8200 [an elite Israeli army unit] together. Emily Engelson does Community Management and Graphic Design.
If you use Percolate or read LinkedIn’s recommended posts I think you’ll love Swayy.
➤ Want to try Swayy out without having to wait? Go to this secret URL and enter the promotion code thenextweb . The first 300 people to use the code will get access.
Image credit: Thinkstock
"""
# Create a SummaryTool object
st = SummaryTool()
# Build the sentences dictionary
sentences_dic = st.get_senteces_ranks(content)
# Build the summary with the sentences dictionary
summary = st.get_summary(title, content, sentences_dic)
# Print the summary
print summary
# Print the ratio between the summary length and the original length
print ""
print "Original Length %s" % (len(title) + len(content))
print "Summary Length %s" % len(summary)
print "Summary Ratio: %s" % (100 - (100 * (len(summary) / (len(title) + len(content)))))
if __name__ == '__main__':
main()
|
{
"content_hash": "686db9bbdfbbd4b5bb74bb646cd24d00",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 568,
"avg_line_length": 49.394444444444446,
"alnum_prop": 0.682712855696772,
"repo_name": "minhhahl/tinvan",
"id": "adea73034dcbedec6169cd2a8d493cdc820416d0",
"size": "8962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinvan_v1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15645"
}
],
"symlink_target": ""
}
|
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
os.system("chmod 777 %s/stablonglast3d/*.sh" % SCRIPT_DIR)
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
{
"content_hash": "01df02da09380854b85adb70207ea254",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 92,
"avg_line_length": 30.37735849056604,
"alnum_prop": 0.5450310559006211,
"repo_name": "yugang/crosswalk-test-suite",
"id": "10db5246010fbde79829dbb6f44ae52edb2403eb",
"size": "3243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stability/wrt-stablonglast3d-android-tests/inst.apk.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3495"
},
{
"name": "CSS",
"bytes": "1694855"
},
{
"name": "Erlang",
"bytes": "2850"
},
{
"name": "Java",
"bytes": "155590"
},
{
"name": "JavaScript",
"bytes": "32256550"
},
{
"name": "PHP",
"bytes": "43783"
},
{
"name": "Perl",
"bytes": "1696"
},
{
"name": "Python",
"bytes": "4215706"
},
{
"name": "Shell",
"bytes": "638387"
},
{
"name": "XSLT",
"bytes": "2143471"
}
],
"symlink_target": ""
}
|
"""
DB persistence layer.
At this time, the persistence features of txOpenID are fairly dependant
on MySQL. However, every attempt has been made to keep MySQL-specific
code in this module, in hopes of making it easy to support new DB types.
"""
import time
from MySQLdb import cursors
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.enterprise import adbapi
class Connection(object):
"""
A representation of a database connection.
This is pretty much just a wrapper around
L{twisted.enterprise.adbapi.ConnectionPool}. This class
just adds a bunch of convenience methods for the limited
amount of DB access needed by txOpenID.
"""
def __init__(self, **kwargs):
"""
Create a new connection to the DB.
"""
self.conn = adbapi.ConnectionPool('MySQLdb',
host=kwargs.get('host', 'localhost'),
db=kwargs.get('db', 'txopenid'),
user=kwargs.get('user', 'txopenid'),
passwd=kwargs.get('passwd', 'txopenid'),
cursorclass=cursors.SSDictCursor,
cp_reconnect=True,
cp_noisy=False,
cp_min=3,
cp_max=10,
)
@inlineCallbacks
def checkUserIdentity(self, user, identity):
"""
Does the provided user have the provided identity?
@param user: the user in question
@type user: L{txopenid.user.User}
@param identity: the identity to check
@type user: str
@return: True if there were no errors.
"""
identity_query = "SELECT 1 FROM identity WHERE user_id = %s and url = %s"
result = yield self.conn.runQuery(identity_query, [user['id'], identity])
if(result):
returnValue(True)
returnValue(False)
@inlineCallbacks
def getUserIdentities(self, user):
"""
Return a list of identities for this user.
@param user: the user in question
@type user: L{txopenid.user.User}
@return: a list of identity records
@rtype: list(dict(column => value))
"""
identity_query = "SELECT * FROM identity WHERE user_id = %s"
result = yield self.conn.runQuery(identity_query, [user['id']])
returnValue(result)
@inlineCallbacks
def getUserIdForIdentity(self, identity):
"""
Return the user who posesses the provided identity.
@param identity: the identity to check for
@type user: str
@return: the registered user's ID
"""
identity_query = "SELECT user_id FROM identity WHERE url = %s"
result = yield self.conn.runQuery(identity_query, [identity])
if(result):
returnValue(result[0]['user_id'])
returnValue(None)
@inlineCallbacks
def saveUserIdentity(self, user, identity):
"""
Add the provided identity to the user's list.
@param user: the user in question
@type user: L{txopenid.user.User}
@param identity: the identity to save
@type user: str
@return: True if there were no errors.
"""
identity_operation = "INSERT INTO identity (user_id, url) VALUES (%s, %s)"
yield self.conn.runOperation(identity_operation, [user['id'], identity])
returnValue(True)
@inlineCallbacks
def removeUserIdentities(self, user, identity_ids):
"""
Remove these identities for this user.
@param user: the current user
@type user: L{txopenid.user.User}
@param identity_ids: a list of IDs to remove
@type identity_ids: int or list(int)
@return: True if there were no errors.
"""
if not(isinstance(identity_ids, (list, tuple))):
identity_ids = [identity_ids]
identity_operation = "DELETE FROM identity WHERE user_id = %%s AND id IN (%s)" % ','.join(['%s']*len(identity_ids))
yield self.conn.runOperation(identity_operation, [user['id']] + identity_ids)
returnValue(True)
@inlineCallbacks
def checkUserTrust(self, user, root):
"""
Does the provided user trust provided root URL?
@param user: the user in question
@type user: L{txopenid.user.User}
@param identity: the root URL to check
@type user: str
@return: True if there were no errors.
"""
trust_query = "SELECT 1 FROM trusted_root WHERE user_id = %s and url = %s"
result = yield self.conn.runQuery(trust_query, [user['id'], root])
if(result):
returnValue(True)
returnValue(False)
@inlineCallbacks
def getUserTrustedRoots(self, user):
"""
Return a list of trusted roots for this user.
@param user: the user in question
@type user: L{txopenid.user.User}
@return: a list of trusted_root records
@rtype: list(dict(column => value))
"""
trust_query = "SELECT * FROM trusted_root WHERE user_id = %s"
result = yield self.conn.runQuery(trust_query, [user['id']])
returnValue(result)
@inlineCallbacks
def saveUserRoot(self, user, root):
"""
Add the provided root to the user's list.
@param user: the user in question
@type user: L{txopenid.user.User}
@param identity: the root to trust
@type user: str
@return: True if there were no errors.
"""
root_operation = "INSERT INTO trusted_root (user_id, url) VALUES (%s, %s)"
yield self.conn.runOperation(root_operation, [user['id'], root])
returnValue(True)
@inlineCallbacks
def removeUserRoots(self, user, root_ids):
"""
Remove these roots for this user.
@param user: the current user
@type user: L{txopenid.user.User}
@param identity_ids: a list of IDs to remove
@type identity_ids: int or list(int)
@return: True if there were no errors.
"""
if not(isinstance(root_ids, (list, tuple))):
root_ids = [root_ids]
root_operation = "DELETE FROM trusted_root WHERE user_id = %%s AND id IN (%s)" % ','.join(['%s']*len(root_ids))
yield self.conn.runOperation(root_operation, [user['id']] + root_ids)
returnValue(True)
@inlineCallbacks
def loadUser(self, user_id):
"""
Return the user object for this user_id.
@param user_id: the user ID to load.
@type user_id: int
"""
user_query = "SELECT * FROM user u WHERE u.id = %s"
result = yield self.conn.runQuery(user_query, [user_id])
if(result):
from txopenid import user
if(result[0]):
u = user.User(result[0])
u.pool = self
returnValue(u)
returnValue(None)
@inlineCallbacks
def loadSession(self, sid):
"""
Return the session record for this user_id.
@param user_id: the user ID to load.
@type user_id: int
@return: the session record
@rtype: L{dict}
"""
session_query = "SELECT * FROM session s WHERE s.id = %s"
result = yield self.conn.runQuery(session_query, [sid])
if(result):
returnValue(result[0])
returnValue(None)
@inlineCallbacks
def saveSession(self, session):
"""
Save the provided session record.
@param session: the session record to save
@type session: dict
@return: True if there were no errors.
"""
if(session.get('_new') is None):
fields = []
values = []
for column, value in session.items():
if(column != 'id'):
fields.append('%s = %%s' % column)
values.append(value)
values.append(session['id'])
save_query = "UPDATE session SET %s WHERE id = %%s" % ', '.join(fields)
else:
del session['_new']
keys = session.keys()
values = session.values()
save_query = "INSERT INTO session (%s) VALUES (%s)" % (', '.join(keys), ', '.join(['%s'] * len(values)))
yield self.conn.runOperation(save_query, values)
returnValue(True)
@inlineCallbacks
def verifyLogin(self, username, password):
"""
Get the user_id for the provided username and password.
@param username: the login username
@type username: str
@param password: the login password
@type password: str
@return: the user_id, or None
@rtype: int
"""
session_query = "SELECT id FROM user u WHERE u.username = %s AND u.crypt = ENCRYPT(%s, SUBSTRING(u.crypt, 1, 2))"
result = yield self.conn.runQuery(session_query, [username, password])
if(result):
returnValue(result[0]['id'])
returnValue(None)
@inlineCallbacks
def verifySession(self, sid):
"""
Get the user_id for the provided session ID.
@param sid: the login session ID
@type sid: str
@return: the user_id, or None
@rtype: int
"""
session_query = "SELECT * FROM session s WHERE s.id = %s AND accessed - created < timeout"
result = yield self.conn.runQuery(session_query, [sid])
if(result):
returnValue(result[0]['user_id'])
returnValue(None)
def destroySession(self, sid):
"""
Remove expired sessions from the database.
"""
destroy_query = "DELETE FROM session WHERE id = %s"
return self.conn.runOperation(destroy_query, [sid])
def cleanupSessions(self):
"""
Remove expired sessions from the database.
"""
cleanup_query = "DELETE FROM session WHERE timeout < (%s - accessed)"
return self.conn.runOperation(cleanup_query, [int(time.time())])
|
{
"content_hash": "0b4a2547aa0ee44995e700a4a48085f1",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 117,
"avg_line_length": 27.861290322580643,
"alnum_prop": 0.6729188375593377,
"repo_name": "philchristensen/txOpenID",
"id": "fed93cdc76b3bb47e15d8e28f7899700262f1123",
"size": "8715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txopenid/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74677"
}
],
"symlink_target": ""
}
|
import unittest
import thread_cert
from pktverify.consts import MLE_ADVERTISEMENT, MLE_PARENT_REQUEST, MLE_CHILD_ID_RESPONSE, MLE_CHILD_ID_REQUEST, MGMT_ACTIVE_SET_URI, MGMT_ACTIVE_GET_URI, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, ADDRESS_REGISTRATION_TLV
from pktverify.packet_verifier import PacketVerifier
CHANNEL_INIT = 19
PANID_INIT = 0xface
PANID_FINAL = 0xabcd
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ROUTER2 = 4
class Cert_9_2_15_PendingPartition(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
LEADER: {
'name': 'LEADER',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'partition_id': 0xffffffff,
'router_selection_jitter': 1,
'allowlist': [COMMISSIONER, ROUTER1]
},
ROUTER1: {
'name': 'ROUTER_1',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'router_selection_jitter': 1,
'allowlist': [LEADER, ROUTER2]
},
ROUTER2: {
'name': 'ROUTER_2',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'router_selection_jitter': 1,
'allowlist': [ROUTER1]
},
}
def _setUpRouter2(self):
self.nodes[ROUTER2].add_allowlist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER2].enable_allowlist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=10,
active_timestamp=70,
delay_timer=600000,
mesh_local='fd00:0db9::',
)
self.simulator.go(5)
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[ROUTER2].reset()
self._setUpRouter2()
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=20,
active_timestamp=80,
delay_timer=200000,
mesh_local='fd00:0db7::',
panid=PANID_FINAL,
)
self.simulator.go(101)
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.simulator.go(100)
self.assertEqual(self.nodes[COMMISSIONER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[LEADER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER1].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER2].get_panid(), PANID_FINAL)
ipaddrs = self.nodes[ROUTER2].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
COMMISSIONER = pv.vars['COMMISSIONER']
ROUTER_1 = pv.vars['ROUTER_1']
ROUTER_2 = pv.vars['ROUTER_2']
_router2_pkts = pkts.filter_wpan_src64(ROUTER_2)
# Step 1: Ensure the topology is formed correctly
# Verify Commissioner, Leader and Router_1 are sending MLE advertisements
pkts.copy().filter_wpan_src64(LEADER).filter_mle_cmd(MLE_ADVERTISEMENT).must_next()
pkts.filter_wpan_dst64(COMMISSIONER).filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
pkts.copy().filter_wpan_src64(COMMISSIONER).filter_mle_cmd(MLE_ADVERTISEMENT).must_next()
pkts.filter_wpan_dst64(ROUTER_1).filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
pkts.copy().filter_wpan_src64(ROUTER_1).filter_mle_cmd(MLE_ADVERTISEMENT).must_next()
# Step 5: Router_2 begins attach process by sending a multicast MLE Parent Request
# The first MLE Parent Request sent MUST NOT be sent to all routers and REEDS
_router2_pkts.range(pkts.index).filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(
p.mle.tlv.type) and p.mle.tlv.scan_mask.r == 1 and p.mle.tlv.scan_mask.e == 0)
# Step 7: Router_2 MUST send a MLE Child ID Request to Router_1
_router2_pkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV
} < set(p.mle.tlv.type) and ADDRESS_REGISTRATION_TLV not in p.mle.tlv.type)
# Step 14: Router_2 begins attach process by sending a multicast MLE Parent Request
# The first MLE Parent Request sent MUST NOT be sent to all routers and REEDS
_router2_pkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(
p.mle.tlv.type) and p.mle.tlv.scan_mask.r == 1 and p.mle.tlv.scan_mask.e == 0)
# Step 16: Router_2 MUST send a MLE Child ID Request to Router_1
_router2_pkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV
} < set(p.mle.tlv.type) and ADDRESS_REGISTRATION_TLV not in p.mle.tlv.type)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ff5df8484bb5fff52bf28ff6b8582668",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 306,
"avg_line_length": 38.75438596491228,
"alnum_prop": 0.5937830089029726,
"repo_name": "chshu/openthread",
"id": "654393e1500419a7bf5dcf57738de42c424c1d74",
"size": "8232",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_9_2_15_PendingPartition.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "15850"
},
{
"name": "C",
"bytes": "959546"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "4339561"
},
{
"name": "Dockerfile",
"bytes": "6256"
},
{
"name": "M4",
"bytes": "64583"
},
{
"name": "Makefile",
"bytes": "134582"
},
{
"name": "Python",
"bytes": "2121036"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "64482"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import connections
from django.db import migrations
def forwards(_, __):
fill_organization_table()
def fill_organization_table():
insert_default_query = build_insert_default_query()
#query = build_insert_query()
execute_insert_query(insert_default_query)
#execute_insert_query(query)
def build_insert_query():
return """INSERT INTO aggregator_organization (title,description)
SELECT DISTINCT source,''
FROM aggregator_dataset
WHERE source != ''
"""
def build_insert_default_query():
return """INSERT INTO aggregator_organization (ID, title,description)
SELECT 1,'',''
WHERE NOT EXISTS
(SELECT 1
FROM aggregator_organization
WHERE id=1)"""
def execute_insert_query(query):
cursor = connections['default'].cursor()
cursor.execute(query)
class Migration(migrations.Migration):
dependencies = [
('aggregator', '0020_organization'),
]
operations = [
migrations.RunPython(forwards)
]
|
{
"content_hash": "c8b1ce460377dd338919ef883b72122a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 24.53191489361702,
"alnum_prop": 0.6209887250650477,
"repo_name": "dipapaspyros/bdo_platform",
"id": "615f52259261553092d2f51264cb34f0cd609fbd",
"size": "1223",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aggregator/migrations/0021_update_organizations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176900"
},
{
"name": "HTML",
"bytes": "69066"
},
{
"name": "JavaScript",
"bytes": "10644123"
},
{
"name": "Python",
"bytes": "195457"
},
{
"name": "XSLT",
"bytes": "1521"
}
],
"symlink_target": ""
}
|
"""
Generate a Flocker package that can be deployed onto cluster nodes.
"""
import os
import platform
from setuptools import setup, find_packages
import versioneer
versioneer.vcs = "git"
versioneer.versionfile_source = "flocker/_version.py"
versioneer.versionfile_build = "flocker/_version.py"
versioneer.tag_prefix = ""
versioneer.parentdir_prefix = "flocker-"
cmdclass = {}
# Let versioneer hook into the various distutils commands so it can rewrite
# certain data at appropriate times.
cmdclass.update(versioneer.get_cmdclass())
# Hard linking doesn't work inside VirtualBox shared folders. This means that
# you can't use tox in a directory that is being shared with Vagrant,
# since tox relies on `python setup.py sdist` which uses hard links. As a
# workaround, disable hard-linking if setup.py is a descendant of /vagrant.
# See
# https://stackoverflow.com/questions/7719380/python-setup-py-sdist-error-operation-not-permitted
# for more details.
if os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':
del os.link
with open("README.rst") as readme:
description = readme.read()
dev_requirements = [
# flake8 is pretty critical to have around to help point out
# obvious mistakes. It depends on PEP8, pyflakes and mccabe.
"pyflakes==0.8.1",
"pep8==1.5.7",
"mccabe==0.2.1",
"flake8==2.2.0",
# Run the test suite:
"tox==1.7.1",
# versioneer is necessary in order to update (but *not* merely to
# use) the automatic versioning tools.
"versioneer==0.10",
# Some of the tests use Conch:
"PyCrypto==2.6.1",
"pyasn1==0.1.7",
# The acceptance tests interact with MongoDB
"pymongo>=2.7.2",
# The acceptance tests interact with PostgreSQL
"pg8000==1.10.1",
# The acceptance tests interact with MySQL
"PyMySQL==0.6.2",
# The acceptance tests interact with Kibana via WebKit
"selenium==2.44.0",
# The cloud acceptance test runner needs these
"fabric==1.10.0",
"apache-libcloud==0.16.0",
# Packages are downloaded from Buildbot
"requests==2.4.3",
"requests-file==1.0",
"wheel==0.24.0",
"gitpython==1.0.0",
"tl.eggdeps==0.4",
"boto==2.30.0",
]
install_requirements = [
# This is necessary for a release because our version scheme does not
# adhere to PEP440.
# See https://clusterhq.atlassian.net/browse/FLOC-1373
"setuptools==3.6",
"eliot == 0.7.1",
"machinist == 0.2.0",
"zope.interface >= 4.0.5",
"pytz",
"characteristic >= 14.1.0",
"Twisted == 15.1.0",
# TLS support libraries for Twisted:
"service_identity == 14.0.0",
"idna == 2.0",
"pyOpenSSL == 0.15.1",
"PyYAML == 3.10",
"treq == 0.2.1",
"psutil == 2.1.2",
"netifaces >= 0.8",
"ipaddr == 2.1.11",
"docker-py == 0.7.1",
"jsonschema == 2.4.0",
"klein == 0.2.3",
"pyrsistent == 0.9.2",
"pycrypto == 2.6.1",
"effect==0.1a13",
"bitmath==1.2.3-4",
"boto==2.38.0",
]
# The test suite uses network namespaces
# nomenclature can only be installed on Linux
if platform.system() == 'Linux':
dev_requirements.extend([
"nomenclature >= 0.1.0",
])
install_requirements.extend([
"python-cinderclient==1.1.1",
"python-novaclient==2.24.1",
"python-keystoneclient==1.4.0",
"python-keystoneclient-rackspace==0.1.3",
])
setup(
# This is the human-targetted name of the software being packaged.
name="Flocker",
# This is a string giving the version of the software being packaged. For
# simplicity it should be something boring like X.Y.Z.
version=versioneer.get_version(),
# This identifies the creators of this software. This is left symbolic for
# ease of maintenance.
author="ClusterHQ Team",
# This is contact information for the authors.
author_email="support@clusterhq.com",
# Here is a website where more information about the software is available.
url="https://clusterhq.com/",
# A short identifier for the license under which the project is released.
license="Apache License, Version 2.0",
# Some details about what Flocker is. Synchronized with the README.rst to
# keep it up to date more easily.
long_description=description,
# This setuptools helper will find everything that looks like a *Python*
# package (in other words, things that can be imported) which are part of
# the Flocker package.
packages=find_packages(exclude=('admin', 'admin.*')),
package_data={
'flocker.node.functional': [
'sendbytes-docker/*',
'env-docker/*',
'retry-docker/*'
],
# These data files are used by the volumes API to define input and
# output schemas.
'flocker.control': ['schema/*.yml'],
},
entry_points={
# These are the command-line programs we want setuptools to install.
# Don't forget to modify the omnibus packaging tool
# (admin/packaging.py) if you make changes here.
'console_scripts': [
'flocker-volume = flocker.volume.script:flocker_volume_main',
'flocker-deploy = flocker.cli.script:flocker_deploy_main',
'flocker-container-agent = flocker.node.script:flocker_container_agent_main', # noqa
'flocker-dataset-agent = flocker.node.script:flocker_dataset_agent_main', # noqa
'flocker-control = flocker.control.script:flocker_control_main',
'flocker-ca = flocker.ca._script:flocker_ca_main',
'flocker = flocker.cli.script:flocker_cli_main',
],
},
install_requires=install_requirements, extras_require={
# This extra allows you to build and check the documentation for
# Flocker.
"doc": [
"Sphinx==1.2.2",
"sphinx-rtd-theme==0.1.6",
"pyenchant==1.6.6",
"sphinxcontrib-spelling==2.1.1",
"sphinx-prompt==1.0.0",
"sphinxcontrib-httpdomain==1.3.0",
],
# This extra is for developers who need to work on Flocker itself.
"dev": dev_requirements,
# This extra is for Flocker release engineers to set up their release
# environment.
"release": [
"gitpython==1.0.0",
"awscli==1.7.25",
"wheel==0.24.0",
"virtualenv",
"PyCrypto",
"pyasn1",
"tl.eggdeps==0.4",
"boto==2.38.0",
# Packages are downloaded from Buildbot
"requests==2.4.3",
"requests-file==1.0",
# TLS SNI Support is needed to test link redirects
"ndg-httpsclient==0.4.0",
],
},
cmdclass=cmdclass,
# Some "trove classifiers" which are relevant.
classifiers=[
"License :: OSI Approved :: Apache Software License",
],
)
|
{
"content_hash": "d5263632a930010e71feecdfea30b44c",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 97,
"avg_line_length": 31.852534562211982,
"alnum_prop": 0.6193576388888888,
"repo_name": "lukemarsden/flocker",
"id": "629836c9b48aa3213c6dd94b450fa1865819c556",
"size": "6974",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2366120"
},
{
"name": "Ruby",
"bytes": "6229"
},
{
"name": "Shell",
"bytes": "3418"
}
],
"symlink_target": ""
}
|
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2016. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
"""
This module implements simple helper functions for python samples
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2016 VMware, Inc. All rights reserved.'
import argparse
def build_arg_parser():
"""
Builds a standard argument parser with arguments for executing sample
setup script
-s, --testbed_setup
-t, --testbed_validate
-c, --testbed_cleanup
-o, --iso_cleanup
-e, --samples_setup
-r, --samples
-i, --samples_incremental
-l, --samples_cleanup
-v, --skipverification
-server, --vcenterserver
-p, --vcenterpassword
-e1, --esxhost1
-e2, --esxhost2
-epass, --esxpassword
-n, --nfsserver
"""
parser = argparse.ArgumentParser(
description='Arguments for running sample setup script')
parser.add_argument('-s', '--testbed_setup',
action='store_true',
help='Build the testbed. Will run cleanup before '
'trying to build in case there is '
'an intermediate failure')
parser.add_argument('-t', '--testbed_validate',
action='store_true',
help='Validate if the testbed is ready for the samples')
parser.add_argument('-c', '--testbed_cleanup',
action='store_true',
help='Tear down the testbed')
parser.add_argument('-o', '--iso_cleanup',
action='store_true',
help='Delete iso during cleanup. ')
parser.add_argument('-e', '--samples_setup',
action='store_true',
help='Run sample setup. ')
parser.add_argument('-r', '--samples',
action='store_true',
help='Run samples. ')
parser.add_argument('-i', '--samples_incremental',
action='store_true',
help='Runs samples that incrementally updates the VM '
'configuration. ')
parser.add_argument('-l', '--samples_cleanup',
action='store_true',
help='Clean up after sample run. ')
parser.add_argument('-v', '--skipverification',
action='store_true',
help='Verify server certificate when connecting to '
'vcenter. ')
parser.add_argument('-server', '--vcenterserver',
action='store',
help='Vcenter server IP to prepare the testbed to run the samples.'
'If not passed as argument, update testbed.py file')
parser.add_argument('-p', '--vcenterpassword',
action='store',
help='Vcenter server password'
'If not passed as argument, update testbed.py file')
parser.add_argument('-e1', '--esxhost1',
action='store',
help='ESX HOST 1 IP to prepare the testbed to run the samples.'
'If not passed as argument, update testbed.py file')
parser.add_argument('-e2', '--esxhost2',
action='store',
help='ESX HOST 2 IP to prepare the testbed to run the samples.'
'If not passed as argument, update testbed.py file')
parser.add_argument('-epass', '--esxpassword',
action='store',
help='ESX Server password'
'If not passed as argument, update testbed.py file')
parser.add_argument('-n', '--nfsserver',
action='store',
help='NFS Server IP to setup datastore for samples run.'
'If not passed as argument, update testbed.py file')
return parser
|
{
"content_hash": "a211b6425934f12d0539c0a891493410",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 91,
"avg_line_length": 37.705882352941174,
"alnum_prop": 0.5179407176287052,
"repo_name": "pgbidkar/vsphere-automation-sdk-python",
"id": "6701673654e091f1c3988cfc868d52212a3d86cf",
"size": "4487",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/vsphere/vcenter/setup/setup_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1232"
},
{
"name": "Python",
"bytes": "2656"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python
# This is a modified version by Davide Callegari (http://www.brokenseal.it/)
#
# This code is original from jsmin by Douglas Crockford, it was translated to
# Python by Baruch Even. The original code had the following copyright and
# license.
#
#
from StringIO import StringIO
def jsmin(js):
ins = StringIO(js)
outs = StringIO()
JavascriptMinify().minify(ins, outs)
str = outs.getvalue()
if len(str) > 0 and str[0] == '\n':
str = str[1:]
return str
def isAlphanum(c):
"""return true if the character is a letter, digit, underscore,
dollar sign, or non-ASCII character.
"""
return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or
(c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));
class UnterminatedComment(Exception):
pass
class UnterminatedStringLiteral(Exception):
pass
class UnterminatedRegularExpression(Exception):
pass
class JavascriptMinify(object):
def _outA(self):
self.outstream.write(self.theA)
def _outB(self):
self.outstream.write(self.theB)
def _get(self):
"""return the next character from stdin. Watch out for lookahead. If
the character is a control character, translate it to a space or
linefeed.
"""
c = self.theLookahead
self.theLookahead = None
if c == None:
c = self.instream.read(1)
if c >= ' ' or c == '\n':
return c
if c == '': # EOF
return '\000'
if c == '\r':
return '\n'
return ' '
def _peek(self):
self.theLookahead = self._get()
return self.theLookahead
def _next(self):
"""get the next character, excluding comments. peek() is used to see
if an unescaped '/' is followed by a '/' or '*'.
"""
c = self._get()
if c == '/' and self.theA != '\\':
p = self._peek()
if p == '/':
c = self._get()
while c > '\n':
c = self._get()
return c
if p == '*':
c = self._get()
while 1:
c = self._get()
if c == '*':
if self._peek() == '/':
self._get()
return ' '
if c == '\000':
raise UnterminatedComment()
return c
def _action(self, action):
"""do something! What you do is determined by the argument:
1 Output A. Copy B to A. Get the next B.
2 Copy B to A. Get the next B. (Delete A).
3 Get the next B. (Delete B).
action treats a string as a single character. Wow!
action recognizes a regular expression if it is preceded by ( or , or =.
"""
if action <= 1:
self._outA()
if action <= 2:
self.theA = self.theB
if self.theA == "'" or self.theA == '"':
while 1:
self._outA()
self.theA = self._get()
if self.theA == self.theB:
break
if self.theA <= '\n':
raise UnterminatedStringLiteral()
if self.theA == '\\':
self._outA()
self.theA = self._get()
if action <= 3:
self.theB = self._next()
if self.theB == '/' and (self.theA == '(' or self.theA == ',' or
self.theA == '=' or self.theA == ':' or
self.theA == '[' or self.theA == '?' or
self.theA == '!' or self.theA == '&' or
self.theA == '|' or self.theA == ';' or
self.theA == '{' or self.theA == '}' or
self.theA == '\n'):
self._outA()
self._outB()
while 1:
self.theA = self._get()
if self.theA == '/':
break
elif self.theA == '\\':
self._outA()
self.theA = self._get()
elif self.theA <= '\n':
raise UnterminatedRegularExpression()
self._outA()
self.theB = self._next()
def _jsmin(self):
"""Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed.
"""
self.theA = '\n'
self._action(3)
while self.theA != '\000':
if self.theA == ' ':
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
elif self.theA == '\n':
if self.theB in ['{', '[', '(', '+', '-']:
self._action(1)
elif self.theB == ' ':
self._action(3)
else:
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
else:
if self.theB == ' ':
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
elif self.theB == '\n':
if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
self._action(1)
else:
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
else:
self._action(1)
def minify(self, instream, outstream):
self.instream = instream
self.outstream = outstream
self.theA = '\n'
self.theB = None
self.theLookahead = None
self._jsmin()
self.instream.close()
if __name__ == '__main__':
import sys
jsm = JavascriptMinify()
jsm.minify(sys.stdin, sys.stdout)
|
{
"content_hash": "56f427b78ddeb61b9980fc782d4404bd",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 110,
"avg_line_length": 34.35204081632653,
"alnum_prop": 0.41155502747660777,
"repo_name": "brokenseal/broke",
"id": "499b694e74aa2c6cdcdde12411657746c6a3a02b",
"size": "7958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/python/jsmin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1148066"
},
{
"name": "Python",
"bytes": "43971"
}
],
"symlink_target": ""
}
|
import networkzero as nw0
address = nw0.discover("news1")
while True:
topic, temperature = nw0.wait_for_news_from(address)
print("Temperature is:", temperature)
|
{
"content_hash": "0b6e1ca5c21dc341ae62b506ee681473",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 56,
"avg_line_length": 24.428571428571427,
"alnum_prop": 0.7251461988304093,
"repo_name": "tjguk/networkzero",
"id": "557a3e1074e6c8b79f731e3dde7a761b170d1967",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/cookbook/simple-messaging/send_news_b.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "81"
},
{
"name": "C",
"bytes": "2621"
},
{
"name": "Python",
"bytes": "101990"
},
{
"name": "Shell",
"bytes": "2626"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name="hash",
version="3.0.0",
author="Wesley Shields <wxs@atarininja.org>, Marcus LaFerrera <@mlaferrera>",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Hash content",
packages=find_packages(exclude=['tests']),
include_package_data=True,
test_suite='tests',
tests_require=['asynctest>=0.13.0'],
)
|
{
"content_hash": "03ca9adee417641d6a4946c656b4afd9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 81,
"avg_line_length": 31.785714285714285,
"alnum_prop": 0.6764044943820224,
"repo_name": "PUNCH-Cyber/stoq-plugins-public",
"id": "19a18c5fb06b5c192bc4a2d160563e800ecb5b44",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hash/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "706"
},
{
"name": "Python",
"bytes": "199236"
},
{
"name": "Smarty",
"bytes": "1224"
},
{
"name": "YARA",
"bytes": "103573"
}
],
"symlink_target": ""
}
|
"""
None
"""
class Logger:
def __init__(self):
"""
Initialize your data structure here.
"""
self.track = {}
def shouldPrintMessage(self, timestamp: int, message: str) -> bool:
"""
Returns true if the message should be printed in the given timestamp, otherwise returns false.
If this method returns false, the message will not be printed.
The timestamp is in seconds granularity.
"""
if message not in self.track:
ret = True
else:
ret = timestamp - self.track[message] >= 10
if ret:
self.track[message] = timestamp
return ret
# Your Logger object will be instantiated and called as such:
# obj = Logger()
# param_1 = obj.shouldPrintMessage(timestamp,message)
|
{
"content_hash": "c160e1b655c4f8122c88b6a45c21ed68",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 102,
"avg_line_length": 25.375,
"alnum_prop": 0.5948275862068966,
"repo_name": "franklingu/leetcode-solutions",
"id": "47143a03f6b8537835c12e6423692c6a80fd5a18",
"size": "812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/logger-rate-limiter/Solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8919"
},
{
"name": "Java",
"bytes": "173033"
},
{
"name": "Python",
"bytes": "996874"
},
{
"name": "Shell",
"bytes": "2559"
}
],
"symlink_target": ""
}
|
"""Speaker diarization pipelines"""
import itertools
import math
from typing import Callable, Optional, Text, Union
import numpy as np
import torch
from einops import rearrange
from pyannote.core import Annotation, SlidingWindow, SlidingWindowFeature
from pyannote.metrics.diarization import GreedyDiarizationErrorRate
from pyannote.pipeline.parameter import ParamDict, Uniform
from pyannote.audio import Audio, Inference, Model, Pipeline
from pyannote.audio.core.io import AudioFile
from pyannote.audio.pipelines.clustering import Clustering
from pyannote.audio.pipelines.speaker_verification import PretrainedSpeakerEmbedding
from pyannote.audio.pipelines.utils import (
PipelineModel,
SpeakerDiarizationMixin,
get_devices,
get_model,
)
from pyannote.audio.utils.signal import binarize
def batchify(iterable, batch_size: int = 32, fillvalue=None):
"""Batchify iterable"""
# batchify('ABCDEFG', 3) --> ['A', 'B', 'C'] ['D', 'E', 'F'] [G, ]
args = [iter(iterable)] * batch_size
return itertools.zip_longest(*args, fillvalue=fillvalue)
class SpeakerDiarization(SpeakerDiarizationMixin, Pipeline):
"""Speaker diarization pipeline
Parameters
----------
segmentation : Model, str, or dict, optional
Pretrained segmentation model. Defaults to "pyannote/segmentation@2022.07".
See pyannote.audio.pipelines.utils.get_model for supported format.
segmentation_duration: float, optional
The segmentation model is applied on a window sliding over the whole audio file.
`segmentation_duration` controls the duration of this window. Defaults to the
duration used when training the model (model.specifications.duration).
segmentation_step: float, optional
The segmentation model is applied on a window sliding over the whole audio file.
`segmentation_step` controls the step of this window, provided as a ratio of its
duration. Defaults to 0.1 (i.e. 90% overlap between two consecuive windows).
embedding : Model, str, or dict, optional
Pretrained embedding model. Defaults to "pyannote/embedding@2022.07".
See pyannote.audio.pipelines.utils.get_model for supported format.
embedding_exclude_overlap : bool, optional
Exclude overlapping speech regions when extracting embeddings.
Defaults (False) to use the whole speech.
clustering : str, optional
Clustering algorithm. See pyannote.audio.pipelines.clustering.Clustering
for available options. Defaults to "HiddenMarkovModelClustering".
segmentation_batch_size : int, optional
Batch size used for speaker segmentation. Defaults to 32.
embedding_batch_size : int, optional
Batch size used for speaker embedding. Defaults to 32.
der_variant : dict, optional
Optimize for a variant of diarization error rate.
Defaults to {"collar": 0.0, "skip_overlap": False}. This is used in `get_metric`
when instantiating the metric: GreedyDiarizationErrorRate(**der_variant).
use_auth_token : str, optional
When loading private huggingface.co models, set `use_auth_token`
to True or to a string containing your hugginface.co authentication
token that can be obtained by running `huggingface-cli login`
Usage
-----
>>> pipeline = SpeakerDiarization()
>>> diarization = pipeline("/path/to/audio.wav")
>>> diarization = pipeline("/path/to/audio.wav", num_speakers=4)
>>> diarization = pipeline("/path/to/audio.wav", min_speakers=2, max_speakers=10)
Hyper-parameters
----------------
segmentation.threshold
segmentation.min_duration_off
clustering.???
"""
def __init__(
self,
segmentation: PipelineModel = "pyannote/segmentation@2022.07",
segmentation_duration: float = None,
segmentation_step: float = 0.1,
embedding: PipelineModel = "speechbrain/spkrec-ecapa-voxceleb@5c0be3875fda05e81f3c004ed8c7c06be308de1e",
embedding_exclude_overlap: bool = False,
clustering: str = "HiddenMarkovModelClustering",
embedding_batch_size: int = 32,
segmentation_batch_size: int = 32,
der_variant: dict = None,
use_auth_token: Union[Text, None] = None,
):
super().__init__()
self.segmentation_model = segmentation
model: Model = get_model(segmentation, use_auth_token=use_auth_token)
self.segmentation_batch_size = segmentation_batch_size
self.segmentation_duration = (
segmentation_duration or model.specifications.duration
)
self.segmentation_step = segmentation_step
self.embedding = embedding
self.embedding_batch_size = embedding_batch_size
self.embedding_exclude_overlap = embedding_exclude_overlap
self.klustering = clustering
self.der_variant = der_variant or {"collar": 0.0, "skip_overlap": False}
seg_device, emb_device = get_devices(needs=2)
model.to(seg_device)
self._segmentation = Inference(
model,
duration=self.segmentation_duration,
step=self.segmentation_step * self.segmentation_duration,
skip_aggregation=True,
batch_size=self.segmentation_batch_size,
)
self._frames: SlidingWindow = self._segmentation.model.introspection.frames
self.segmentation = ParamDict(
threshold=Uniform(0.1, 0.9),
min_duration_off=Uniform(0.0, 1.0),
)
if self.klustering == "OracleClustering":
metric = "not_applicable"
else:
self._embedding = PretrainedSpeakerEmbedding(
self.embedding, device=emb_device, use_auth_token=use_auth_token
)
self._audio = Audio(sample_rate=self._embedding.sample_rate, mono=True)
metric = self._embedding.metric
try:
Klustering = Clustering[clustering]
except KeyError:
raise ValueError(
f'clustering must be one of [{", ".join(list(Clustering.__members__))}]'
)
self.clustering = Klustering.value(metric=metric)
def default_parameters(self):
if (
self.segmentation_model == "pyannote/segmentation@2022.07"
and self.segmentation_duration == 5.0
and self.segmentation_step == 0.1
and self.embedding
== "speechbrain/spkrec-ecapa-voxceleb@5c0be3875fda05e81f3c004ed8c7c06be308de1e"
and self.embedding_exclude_overlap == True
and self.clustering == "HiddenMarkovModelClustering"
):
return {
"segmentation": {
"threshold": 0.58,
"min_duration_off": 0.0,
},
"clustering": {
"single_cluster_detection": {
"quantile": 0.05,
"threshold": 1.15,
},
"covariance_type": "diag",
"threshold": 0.35,
},
}
raise NotImplementedError()
def classes(self):
speaker = 0
while True:
yield f"SPEAKER_{speaker:02d}"
speaker += 1
@property
def CACHED_SEGMENTATION(self):
return "training_cache/segmentation"
def get_segmentations(self, file) -> SlidingWindowFeature:
"""Apply segmentation model
Parameter
---------
file : AudioFile
Returns
-------
segmentations : (num_chunks, num_frames, num_speakers) SlidingWindowFeature
"""
if self.training:
if self.CACHED_SEGMENTATION in file:
segmentations = file[self.CACHED_SEGMENTATION]
else:
segmentations = self._segmentation(file)
file[self.CACHED_SEGMENTATION] = segmentations
else:
segmentations: SlidingWindowFeature = self._segmentation(file)
return segmentations
def get_embeddings(
self,
file,
binary_segmentations: SlidingWindowFeature,
exclude_overlap: bool = False,
):
"""Extract embeddings for each (chunk, speaker) pair
Parameters
----------
file : AudioFile
binary_segmentations : (num_chunks, num_frames, num_speakers) SlidingWindowFeature
Binarized segmentation.
exclude_overlap : bool, optional
Exclude overlapping speech regions when extracting embeddings.
In case non-overlapping speech is too short, use the whole speech.
Returns
-------
embeddings : (num_chunks, num_speakers, dimension) array
"""
# when optimizing the hyper-parameters of this pipeline with frozen "segmentation_onset",
# one can reuse the embeddings from the first trial, bringing a massive speed up to
# the optimization process (and hence allowing to use a larger search space).
if self.training:
# we only re-use embeddings if they were extracted based on the same value of the
# "segmentation_onset" hyperparameter and "embedding_exclude_overlap" parameter.
cache = file.get("training_cache/embeddings", dict())
if (
cache.get("segmentation.threshold", None) == self.segmentation.threshold
and cache.get("embedding_exclude_overlap", None)
== self.embedding_exclude_overlap
):
return cache["embeddings"]
duration = binary_segmentations.sliding_window.duration
num_chunks, num_frames, _ = binary_segmentations.data.shape
if exclude_overlap:
# minimum number of samples needed to extract an embedding
# (a lower number of samples would result in an error)
min_num_samples = self._embedding.min_num_samples
# corresponding minimum number of frames
num_samples = duration * self._embedding.sample_rate
min_num_frames = math.ceil(num_frames * min_num_samples / num_samples)
# zero-out frames with overlapping speech
clean_frames = 1.0 * (
np.sum(binary_segmentations.data, axis=2, keepdims=True) < 2
)
clean_segmentations = SlidingWindowFeature(
binary_segmentations.data * clean_frames,
binary_segmentations.sliding_window,
)
else:
min_num_frames = -1
clean_segmentations = SlidingWindowFeature(
binary_segmentations.data, binary_segmentations.sliding_window
)
def iter_waveform_and_mask():
for (chunk, masks), (_, clean_masks) in zip(
binary_segmentations, clean_segmentations
):
# chunk: Segment(t, t + duration)
# masks: (num_frames, local_num_speakers) np.ndarray
waveform, _ = self._audio.crop(
file,
chunk,
duration=duration,
mode="pad",
)
# waveform: (1, num_samples) torch.Tensor
# mask may contain NaN (in case of partial stitching)
masks = np.nan_to_num(masks, nan=0.0).astype(np.float32)
clean_masks = np.nan_to_num(clean_masks, nan=0.0).astype(np.float32)
for mask, clean_mask in zip(masks.T, clean_masks.T):
# mask: (num_frames, ) np.ndarray
if np.sum(clean_mask) > min_num_frames:
used_mask = clean_mask
else:
used_mask = mask
yield waveform[None], torch.from_numpy(used_mask)[None]
# w: (1, 1, num_samples) torch.Tensor
# m: (1, num_frames) torch.Tensor
batches = batchify(
iter_waveform_and_mask(),
batch_size=self.embedding_batch_size,
fillvalue=(None, None),
)
embedding_batches = []
for batch in batches:
waveforms, masks = zip(*filter(lambda b: b[0] is not None, batch))
waveform_batch = torch.vstack(waveforms)
# (batch_size, 1, num_samples) torch.Tensor
mask_batch = torch.vstack(masks)
# (batch_size, num_frames) torch.Tensor
embedding_batch: np.ndarray = self._embedding(
waveform_batch, masks=mask_batch
)
# (batch_size, dimension) np.ndarray
embedding_batches.append(embedding_batch)
embedding_batches = np.vstack(embedding_batches)
embeddings = rearrange(embedding_batches, "(c s) d -> c s d", c=num_chunks)
# caching embeddings for subsequent trials
# (see comments at the top of this method for more details)
if self.training:
file["training_cache/embeddings"] = {
"segmentation.threshold": self.segmentation.threshold,
"embedding_exclude_overlap": self.embedding_exclude_overlap,
"embeddings": embeddings,
}
return embeddings
def reconstruct(
self,
segmentations: SlidingWindowFeature,
hard_clusters: np.ndarray,
count: SlidingWindowFeature,
) -> SlidingWindowFeature:
"""Build final discrete diarization out of clustered segmentation
Parameters
----------
segmentations : (num_chunks, num_frames, num_speakers) SlidingWindowFeature
Raw speaker segmentation.
hard_clusters : (num_chunks, num_speakers) array
Output of clustering step.
count : (total_num_frames, 1) SlidingWindowFeature
Instantaneous number of active speakers.
Returns
-------
discrete_diarization : SlidingWindowFeature
Discrete (0s and 1s) diarization.
"""
num_chunks, num_frames, local_num_speakers = segmentations.data.shape
num_clusters = np.max(hard_clusters) + 1
clustered_segmentations = np.NAN * np.zeros(
(num_chunks, num_frames, num_clusters)
)
for c, (cluster, (chunk, segmentation)) in enumerate(
zip(hard_clusters, segmentations)
):
# cluster is (local_num_speakers, )-shaped
# segmentation is (num_frames, local_num_speakers)-shaped
for k in np.unique(cluster):
if k == -2:
continue
# TODO: can we do better than this max here?
clustered_segmentations[c, :, k] = np.max(
segmentation[:, cluster == k], axis=1
)
clustered_segmentations = SlidingWindowFeature(
clustered_segmentations, segmentations.sliding_window
)
return self.to_diarization(clustered_segmentations, count)
def apply(
self,
file: AudioFile,
num_speakers: int = None,
min_speakers: int = None,
max_speakers: int = None,
hook: Optional[Callable] = None,
) -> Annotation:
"""Apply speaker diarization
Parameters
----------
file : AudioFile
Processed file.
num_speakers : int, optional
Number of speakers, when known.
min_speakers : int, optional
Minimum number of speakers. Has no effect when `num_speakers` is provided.
max_speakers : int, optional
Maximum number of speakers. Has no effect when `num_speakers` is provided.
hook : callable, optional
Hook called after each major step of the pipeline with the following
signature: hook("step_name", step_artefact, file=file)
Returns
-------
diarization : Annotation
Speaker diarization
"""
# setup hook (e.g. for debugging purposes)
hook = self.setup_hook(file, hook=hook)
num_speakers, min_speakers, max_speakers = self.set_num_speakers(
num_speakers=num_speakers,
min_speakers=min_speakers,
max_speakers=max_speakers,
)
segmentations = self.get_segmentations(file)
hook("segmentation", segmentations)
# shape: (num_chunks, num_frames, local_num_speakers)
# estimate frame-level number of instantaneous speakers
count = self.speaker_count(
segmentations,
onset=self.segmentation.threshold,
frames=self._frames,
)
hook("speaker_counting", count)
# shape: (num_frames, 1)
# dtype: int
# binarize segmentation
binarized_segmentations: SlidingWindowFeature = binarize(
segmentations,
onset=self.segmentation.threshold,
initial_state=False,
)
if self.klustering == "OracleClustering":
embeddings = None
else:
embeddings = self.get_embeddings(
file,
binarized_segmentations,
exclude_overlap=self.embedding_exclude_overlap,
)
hook("embeddings", embeddings)
# shape: (num_chunks, local_num_speakers, dimension)
hard_clusters, _ = self.clustering(
embeddings=embeddings,
segmentations=binarized_segmentations,
num_clusters=num_speakers,
min_clusters=min_speakers,
max_clusters=max_speakers,
file=file, # <== for oracle clustering
frames=self._frames, # <== for oracle clustering
)
# hard_clusters: (num_chunks, num_speakers)
# reconstruct discrete diarization from raw hard clusters
# keep track of inactive speakers
inactive_speakers = np.sum(binarized_segmentations.data, axis=1) == 0
# shape: (num_chunks, num_speakers)
hard_clusters[inactive_speakers] = -2
discrete_diarization = self.reconstruct(
segmentations,
hard_clusters,
count,
)
hook("discrete_diarization", discrete_diarization)
# convert to continuous diarization
diarization = self.to_annotation(
discrete_diarization,
min_duration_on=0.0,
min_duration_off=self.segmentation.min_duration_off,
)
diarization.uri = file["uri"]
# when reference is available, use it to map hypothesized speakers
# to reference speakers (this makes later error analysis easier
# but does not modify the actual output of the diarization pipeline)
if "annotation" in file and file["annotation"]:
return self.optimal_mapping(file["annotation"], diarization)
# when reference is not available, rename hypothesized speakers
# to human-readable SPEAKER_00, SPEAKER_01, ...
return diarization.rename_labels(
{
label: expected_label
for label, expected_label in zip(diarization.labels(), self.classes())
}
)
def get_metric(self) -> GreedyDiarizationErrorRate:
return GreedyDiarizationErrorRate(**self.der_variant)
|
{
"content_hash": "da22425c8364e586a5ed9b40125d76fc",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 112,
"avg_line_length": 37.21880998080614,
"alnum_prop": 0.6024960033005002,
"repo_name": "pyannote/pyannote-audio",
"id": "348e1cf117d7eb6ea5f930512f7a474fa38371c7",
"size": "20502",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyannote/audio/pipelines/speaker_diarization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1083"
},
{
"name": "HTML",
"bytes": "2004"
},
{
"name": "JavaScript",
"bytes": "239182"
},
{
"name": "Jupyter Notebook",
"bytes": "17688"
},
{
"name": "Python",
"bytes": "517872"
}
],
"symlink_target": ""
}
|
CHASSIS_FIELDS = ['uuid', 'description', 'created_at', 'updated_at', 'extra']
CHASSIS_FIELD_LABELS = ['UUID', 'Description', 'Created At', 'Updated At',
'Extra']
CHASSIS_LIST_FIELDS = ['uuid', 'description']
CHASSIS_LIST_FIELD_LABELS = ['UUID', 'Description']
# Nodes
NODE_FIELDS = ['chassis_uuid', 'created_at', 'console_enabled', 'driver',
'driver_info', 'driver_internal_info', 'extra',
'instance_info', 'instance_uuid', 'last_error',
'maintenance', 'maintenance_reason', 'power_state',
'properties', 'provision_state', 'reservation',
'target_power_state', 'target_provision_state',
'updated_at', 'inspection_finished_at',
'inspection_started_at', 'uuid', 'name']
NODE_FIELD_LABELS = ['Chassis UUID', 'Created At', 'Console Enabled', 'Driver',
'Driver Info', 'Driver Internal Info', 'Extra',
'Instance Info', 'Instance UUID', 'Last Error',
'Maintenance', 'Maintenance Reason', 'Power State',
'Properties', 'Provision State', 'Reservation',
'Target Power State', 'Target Provision State',
'Updated At', 'Inspection Finished At',
'Inspection Started At', 'UUID', 'Name']
NODE_LIST_FIELDS = ['uuid', 'name', 'instance_uuid', 'power_state',
'provision_state', 'maintenance']
NODE_LIST_FIELD_LABELS = ['UUID', 'Name', 'Instance UUID', 'Power State',
'Provision State', 'Maintenance']
# Ports
PORT_FIELDS = ['uuid', 'address', 'created_at', 'extra', 'node_uuid',
'updated_at']
PORT_FIELD_LABELS = ['UUID', 'Address', 'Created At', 'Extra', 'Node UUID',
'Updated At']
PORT_LIST_FIELDS = ['uuid', 'address']
PORT_LIST_FIELD_LABELS = ['UUID', 'Address']
|
{
"content_hash": "570149dd9be86c268672fabfaddc8c05",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 40.020833333333336,
"alnum_prop": 0.551795939614784,
"repo_name": "rdo-management/python-ironicclient",
"id": "e54bcd541b053ad797c8f7edbe39e20c0e6dd842",
"size": "2588",
"binary": false,
"copies": "1",
"ref": "refs/heads/mgt-master",
"path": "ironicclient/v1/resource_fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "408651"
}
],
"symlink_target": ""
}
|
import argparse
import contextlib
import os
import signal
import tempfile
from enum import Enum
import shlex
import subprocess
from tempfile import TemporaryDirectory, mkdtemp, mkstemp
import attr
import logging
import shutil
from dmpy.objects.dm_rule import DMRule
logger = logging.getLogger(__name__)
class SchedulingEngine(Enum):
none = 0
slurm = 1
sge = 2
def add_dm_args_to_argparse_object(object):
object.add_argument("-r", "--run", action="store_true")
object.add_argument("-j", "--jobs", type=int, default=1)
object.add_argument("-c", "--no-cleanup", action="store_true")
object.add_argument("-t", "--touch", action="store_true")
object.add_argument("-k", "--keep-going", action="store_true")
object.add_argument("-B", "--always-make", action="store_true")
object.add_argument("--scheduler", default=SchedulingEngine.none.name)
object.add_argument("--scheduler-args", default=None)
return object
def get_dm_arg_parser(description="dmpy powered analysis"):
parser = argparse.ArgumentParser(description=description)
parser = add_dm_args_to_argparse_object(parser)
return parser
@attr.s(slots=True)
class DMBuilder(object):
shell = attr.ib(default="/bin/bash -o pipefail")
rules = attr.ib(attr.Factory(list))
scheduler = attr.ib(default=SchedulingEngine.none)
scheduler_args = attr.ib(default=attr.Factory(list))
_targets = attr.ib(attr.Factory(set))
def add(self, target, deps, cmds, opts=None, intermediate=False):
if target is None:
raise ValueError("target may not be None type")
if target in self._targets:
raise Exception("Tried to add target twice: {}".format(target))
self._targets.add(target)
self.rules.append(DMRule(target, deps, cmds, opts, intermediate))
def write_to_filehandle(self, fh):
fh.write("SHELL = {}\n".format(self.shell))
intermediates = []
for rule in self.rules:
if rule.intermediate:
intermediates.append(rule.target)
if len(intermediates) > 0:
fh.write(".INTERMEDIATE: {}\n".format(" ".join(intermediates)))
for rule in self.rules:
dirname = os.path.abspath(os.path.dirname(rule.target))
fh.write("{}: {}\n".format(rule.target, ' '.join(rule.deps)))
if self.scheduler == SchedulingEngine.slurm:
cmd_prefix = ['srun', '--quit-on-interrupt', '--job-name', rule.name]
cmd_prefix += self.scheduler_args
cmd_prefix += ['bash', '-c']
cmd_prefix = ' '.join(cmd_prefix)
rule.recipe = [cmd_prefix + ' ' + shlex.quote(cmd) for cmd in rule.recipe]
if self.scheduler == SchedulingEngine.sge and len(rule.clusteropts) > 0:
shfile = f'{rule.target}.sh'
cmd_prefix = ['echo -e \'#!/bin/bash\\nset -xo pipefail\\n']
cmd_suffix = [f'\' > {shfile};',
'qsub', '-sync y', '-cwd', '-V',
f'-pe smp {rule.clusteropts["threads"]}',
f'-l h_vmem={rule.clusteropts["h_vmem"]}G,h_stack=32M',
f'-q {rule.clusteropts["queue"]}',
f'-o {rule.target}.log.out',
f'-e {rule.target}.log.err',
f'-N {os.path.basename(rule.name)}',
shfile
]
cmd_prefix = ' '.join(cmd_prefix)
cmd_suffix = ' '.join(cmd_suffix)
rule.recipe = [cmd_prefix + '\\n' + cmd + '\\n' + cmd_suffix for cmd in rule.recipe]
rule.recipe.insert(0, "@test -d {0} || mkdir -p {0}".format(dirname))
for cmd in rule.recipe:
cmd = cmd.replace("$", "$$")
fh.write("\t{}\n".format(cmd))
fh.write("all: {}\n".format(" ".join([r.target for r in self.rules])))
fh.write(".DELETE_ON_ERROR:\n")
fh.flush()
@attr.s(slots=True)
class DistributedMake(object):
run = attr.ib(False)
keep_going = attr.ib(False)
jobs = attr.ib(1)
no_cleanup = attr.ib(False)
question = attr.ib(False)
touch = attr.ib(False)
debug = attr.ib(False)
always_make = attr.ib(False)
shell = attr.ib('/bin/bash -o pipefail')
exit_on_keyboard_interrupt = attr.ib(True)
args_object = attr.ib(None)
_makefile_fp = attr.ib(init=False)
_dm_builder = attr.ib(attr.Factory(DMBuilder))
_tempdir = attr.ib(None)
def __attrs_post_init__(self):
self._handle_args_object()
def _handle_args_object(self):
if self.args_object is None:
return
for attr_string in ['run', 'no_cleanup', 'jobs', 'touch', 'keep_going', 'always_make']:
if attr_string in self.args_object:
setattr(self, attr_string, getattr(self.args_object, attr_string))
if "scheduler" in self.args_object:
self._dm_builder.scheduler = SchedulingEngine[self.args_object.scheduler]
if 'scheduler_args' in self.args_object and self.args_object.scheduler_args is not None:
self._dm_builder.scheduler_args = shlex.split(self.args_object.scheduler_args)
def add(self, target, deps, commands, opts=None):
self._dm_builder.add(target, deps, commands, opts)
def execute(self, callable=subprocess.Popen, popen_args=None):
if popen_args is None:
popen_args = {}
with contextlib.ExitStack() as stack:
if not self.no_cleanup:
self.tempdir = stack.enter_context(remove_dir_on_exit(self.tempdir))
makefile = os.path.join(self.tempdir, 'makefile')
with open(makefile, 'wt') as makefile_fp:
self._dm_builder.shell = self.shell
self._dm_builder.write_to_filehandle(makefile_fp)
makecmd = self.build_make_command(makefile)
logger.warning(' '.join(makecmd))
process = callable(' '.join(makecmd), shell=True, **popen_args)
try:
completed_process = process.communicate()
except KeyboardInterrupt:
process.send_signal(signal.SIGINT)
message = 'Exiting after keyboard interrupt'
if self.exit_on_keyboard_interrupt:
logger.warning(message)
exit()
raise KeyboardInterrupt(message)
logger.warning(' '.join(makecmd))
return completed_process
def build_make_command(self, makefile_name):
makecmd = ['make', '-Werror']
if not self.run:
makecmd.append("-n")
if self.keep_going:
makecmd.append("-k")
if self.question:
makecmd.extend(["-q", self.question])
if self.touch:
makecmd.extend(["-t"])
if self.debug:
makecmd.append("-d")
if self.always_make:
makecmd.append("-B")
makecmd.extend(["-j", str(self.jobs)])
makecmd.extend(["-f", makefile_name])
makecmd.append("all")
return makecmd
@property
def tempdir(self):
"""Returns the temporary dir that is used during execute()
Cleaned up after execute() if no_cleanup=False
>>> dm = DistributedMake()
>>> tempdir = dm.tempdir
>>> os.path.exists(tempdir)
True
>>> dm.execute()
(...)
>>> os.path.exists(tempdir)
False
"""
if self._tempdir is None:
self._tempdir = mkdtemp()
return self._tempdir
@tempdir.setter
def tempdir(self, new_tempdir):
self._tempdir = new_tempdir
def get_tempfile(self):
"""Returns a tempfile that is cleaned up after execute() is called
:returns: returns a tuple containing an OS-level handle to an open file
(as would be returned by os.open()) and the absolute pathname of that file, in that order
>>> dm = DistributedMake()
>>> file_handle, file_name = dm.get_tempfile()
>>> os.path.exists(file_name)
True
>>> dm.execute()
(...)
>>> os.path.exists(file_name)
False
The temporary file is not cleaned up when no_cleanup is set to True.
>>> dm = DistributedMake(no_cleanup=True)
>>> file_handle, file_name = dm.get_tempfile()
>>> os.path.exists(file_name)
True
>>> dm.execute()
(...)
>>> os.path.exists(file_name)
True
"""
return mkstemp(dir=self.tempdir)
@contextlib.contextmanager
def remove_dir_on_exit(dir):
yield dir
shutil.rmtree(dir)
|
{
"content_hash": "c974ed65ea49a224ac476cd5cef91665",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 100,
"avg_line_length": 35.395161290322584,
"alnum_prop": 0.5694919115971747,
"repo_name": "kvg/dmpy",
"id": "a428791bdd7aa3c52ba73e51d3bc3c825fbb03c5",
"size": "8778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dmpy/distributedmake.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "559"
},
{
"name": "Python",
"bytes": "24432"
}
],
"symlink_target": ""
}
|
from pkg_resources import Requirement
import pkg_resources
import sys
if __name__ == "__main__":
installed = []
for pkg in sys.stdin.readlines():
pkg = pkg.strip()
if len(pkg):
req = Requirement.parse(pkg)
try:
found = pkg_resources.working_set.find(req)
if found:
installed.append(pkg)
except pkg_resources.VersionConflict:
pass
sys.stdout.write('\n'.join(installed))
exit(0)
|
{
"content_hash": "66afa3475ffaa5a8a880d63b053d0ff2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 59,
"avg_line_length": 25.75,
"alnum_prop": 0.537864077669903,
"repo_name": "pywizard/pywizard",
"id": "ed97d905c6c50beb98c81a2e8ebbccb963617aa1",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywizard/resources/_util_package_pip_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "93736"
},
{
"name": "Shell",
"bytes": "5842"
}
],
"symlink_target": ""
}
|
"""Script to parse CUPS Internet Printing Protocol (IPP) files."""
import argparse
import logging
import sys
from dtformats import cups_ipp
from dtformats import output_writers
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts information from CUPS IPP files.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH',
default=None, help='path of the CUPS IPP file.')
options = argument_parser.parse_args()
if not options.source:
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print(f'Unable to open output writer with error: {exception!s}')
print('')
return False
cups_ipp_file = cups_ipp.CupsIppFile(
debug=options.debug, output_writer=output_writer)
cups_ipp_file.Open(options.source)
print('CUPS Internet Printing Protocol (IPP) information:')
print('')
cups_ipp_file.Close()
output_writer.WriteText('\n')
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
{
"content_hash": "afb4ada99caf7a85ab75760daadcb716",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 72,
"avg_line_length": 22.6231884057971,
"alnum_prop": 0.66944266495836,
"repo_name": "libyal/dtformats",
"id": "c34b3a58ea5ca4706025fca3a75614a1b9bed6a2",
"size": "1607",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/cups_ipp.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "827"
},
{
"name": "Python",
"bytes": "700241"
},
{
"name": "Shell",
"bytes": "1139"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.loader import get_template
from django.template import Context
from django.core.cache import cache
from app.models import *
import urllib
import urllib2
from math import floor
from time import time
from google.appengine.api import mail
def index(request):
if request.META["HTTP_USER_AGENT"].find('iPhone') >= 0:
# user is coming from iPhone. Send to iphone page
return HttpResponseRedirect('/iphone')
message = request.GET.get('message', False)
message_type = request.GET.get('message_type', '')
if request.GET.get('manual_apn', '') == 'true':
form = '_manual_apn_form.html'
else:
form = '_carrier_select_form.html'
c = {'form': form, 'message': message, 'message_type': message_type,
'carriers': listed_carriers()}
return render_to_response('index.html', c)
def index_iphone(request):
message = request.GET.get('message', False)
message_type = request.GET.get('message_type', '')
if request.GET.get('manual_apn', '') == 'true':
form = '_manual_apn_form.html'
else:
form = '_carrier_select_form.html'
c = {'form': form, 'message': message, 'message_type': message_type,
'carriers': listed_carriers(), 'google_ad': google_ad(request)}
return render_to_response('index_iphone.html', c)
def get_config(request, carrier_id):
# get the carrier by ID
carrier = db.get(db.Key(carrier_id))
# Log the download
log = BundleDownloaded(ip = request.META["REMOTE_ADDR"], ua=request.META["HTTP_USER_AGENT"], carrier=carrier)
log.put()
# render the details into the template
t = get_template('mobileconfig.xml')
c = Context({"carrier": carrier})
# return it.
response = HttpResponse(t.render(c), mimetype='application/x-apple-aspen-config')
response['Content-Disposition'] = 'attachment; filename=tether.mobileconfig'
return response
# This needs to be redone with proper forms, not this php like crap
def submit_request(request):
carrier = request.REQUEST.get('carrier', False)
apn = request.REQUEST.get('apn', '')
username = request.REQUEST.get('username', '')
password = request.REQUEST.get('password', '')
action = request.REQUEST.get('submit_action', '')
to = request.REQUEST.get('to', '')
carrier_id = ''
# check to see if we have a carrier specified. If so, act on that.
if carrier:
carrier_id = carrier
# otherwise build from params, try to find, if not create, then set the id
else:
# if apn not set, redirect home with error.
if apn == '':
# redirect home with error
message = urllib.quote('You must enter an APN!')
return HttpResponseRedirect('/?manual_apn=true&message_type=error&message=' + message)
# otherwise, try to find, and if not found create
query = Carrier.all().filter('apn =', apn).filter('username = ', username).filter('password = ', password)
results = query.fetch(1)
for result in results:
carrier_id = str(result.key())
if carrier_id == '':
# no carrier found, create
carrier = Carrier(apn = apn, username = username, password = password)
carrier_id = str(carrier.put())
config_uri = request.build_absolute_uri('/get_config/' + carrier_id + '/tether.mobileconfig')
# check the action. If 'Download', redirect the user to the download page for the id
if action == 'Download':
return HttpResponseRedirect(config_uri)
# if 'send', generate a link to the thingy, and mail it. redirect home with status
else:
if to == '':
# redirect home with error
message = urllib.quote('You must enter a email address to send the details.')
return HttpResponseRedirect('/?message_type=error&message=' + message)
else:
message = mail.EmailMessage(sender="TetherMe <lstoll@lstoll.net>",
subject="Tethering config")
message.to = to
message.body = """
Click on the following link to download the settings.
Once the settings open, install them - you should then be ready to go with tethering.
%s
If you have any problems, please e-mail me at the address on the site.
Enjoy!
""" % config_uri
message.send()
log = MessageSent(to = to, carrier=db.get(db.Key(carrier_id)))
log.put()
statusmsg = urllib.quote('Details sent! Please check your mail on your phone.')
return HttpResponseRedirect('/?message_type=success&message=' + statusmsg)
#end
# returns the listed carriers, getting from cache if there, otherwise caching
def listed_carriers():
items = cache.get('listed_carriers')
if not items:
items = Carrier.all().filter('listed =', True).order("name").fetch(1000)
cache.set('listed_carriers', items, 10 * 60)
return items
def google_ad(request, publisher_id='pub-5195662196335636', format='mobile_double'):
scheme = 'https://' if request.is_secure() else 'http://'
params = {
'ad_type':'text_image',
'channel':'',
'client':'ca-mb-' + publisher_id,
'dt':repr(floor(1000*time())),
'format':format,
'https':'on' if request.is_secure() else '',
'host':scheme + request.META.get('HTTP_HOST', ''),
'ip':request.META.get('REMOTE_ADDR', ''),
'markup':'xhtml',
'oe':'utf8',
'output':'xhtml',
'ref':request.META.get('HTTP_REFERER', ''),
'url':scheme + request.META.get('HTTP_HOST', '') + \
request.META.get('PATH_INFO', ''),
'useragent':request.META.get('HTTP_USER_AGENT', '')
}
screen_res = request.META.get('HTTP_UA_PIXELS', '')
delimiter = 'x'
if screen_res == '':
screen_res = request.META.get('HTTP_X_UP_DEVCAP_SCREENPIXELS', '')
delimiter = ','
res_array = screen_res.split(delimiter)
if len(res_array) == 2:
params['u_w'] = res_array[0]
params['u_h'] = res_array[1]
dcmguid = request.META.get('HTTP_X_DCMGUID', '')
if dcmguid != '':
params['dcmguid'] = dcmguid
url = 'http://pagead2.googlesyndication.com/pagead/ads?' + urllib.urlencode(params)
return urllib2.urlopen(url).read()
|
{
"content_hash": "27eb195f8b3fce19a1fdb8173adac895",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 111,
"avg_line_length": 37.80864197530864,
"alnum_prop": 0.656,
"repo_name": "lstoll/tetherme",
"id": "86f5d0681e2913ef948bceb8c6a6319bc26f02b9",
"size": "6125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "134752"
},
{
"name": "Python",
"bytes": "149901"
}
],
"symlink_target": ""
}
|
import unittest
from pydirections.director import Director
from pydirections.route_requester import DirectionsRequest
from pydirections.exceptions import InvalidModeError, InvalidAPIKeyError, InvalidAlternativeError
from pydirections.exceptions import InvalidRouteRestrictionError
import os
MAPS_API_KEY = os.environ['MAPS_API_KEY']
class TestOptionalParameters(unittest.TestCase):
def test_invalid_mode(self):
"""
Tests the is_valid_mode function for an invalid input
"""
requester = DirectionsRequest(origin="San Francisco, CA", destination="Palo Alto, CA", key=MAPS_API_KEY)
with self.assertRaises(InvalidModeError):
requester.mode = "flying"
def test_invalid_alternative(self):
"""
Tests for error handling when an invalid value is provided to
the set_alternative function
"""
requester = DirectionsRequest(origin="San Francisco, CA", destination="Palo Alto, CA", key=MAPS_API_KEY)
with self.assertRaises(InvalidAlternativeError):
requester.set_alternatives('False')
def test_invalid_restrictions(self):
"""
Tests for invalid route restrictions
"""
requester = DirectionsRequest(origin="San Francisco, CA", destination="Palo Alto, CA", key=MAPS_API_KEY)
with self.assertRaises(InvalidRouteRestrictionError):
requester.set_route_restrictions("freeways", "railways")
class TestAPIKey(unittest.TestCase):
def test_invalid_api_key(self):
requester = DirectionsRequest(origin="San Francisco, CA", destination="Palo Alto, CA", key=MAPS_API_KEY)
invalid_key = 123456
with self.assertRaises(InvalidAPIKeyError):
requester.key = invalid_key
class TestBasicResponse(unittest.TestCase):
def test_basic_valid_response(self):
requester = DirectionsRequest(origin="San Francisco, CA", destination="Palo Alto, CA", key=MAPS_API_KEY)
response = Director.fetch_directions(requester)
self.assertEqual(response.status, "OK")
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "45ed59dd8cdf6a7296a65b085c77174b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 106,
"avg_line_length": 38.58,
"alnum_prop": 0.7662001036806636,
"repo_name": "apranav19/pydirections",
"id": "4023c19111304b6896ea9e3044c590baf9159dc5",
"size": "1929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_route_requester.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11973"
}
],
"symlink_target": ""
}
|
import numpy as np
from rlscore.utilities import creators
from rlscore.kernel import LinearKernel
from rlscore.utilities.adapter import SvdAdapter
from rlscore.utilities.adapter import LinearSvdAdapter
from rlscore.utilities.adapter import PreloadedKernelMatrixSvdAdapter
from rlscore.utilities import array_tools
class AbstractLearner(object):
'''Base class for learning algorithms'''
def __init__(self, **kwargs):
super(AbstractLearner, self).__init__()
def createLearner(cls, **kwargs):
learner = cls(**kwargs)
return learner
createLearner = classmethod(createLearner)
def train(self):
"""Trains the learning algorithm.
After the learner is trained, one can call the method getModel
to get the trained model
"""
pass
def getModel(self):
"""Returns the trained model, call this only after training.
Returns
-------
model : {LinearModel, DualModel}
prediction function
"""
raise Exception("AbstractLearner does not have an implemented getModel function.")
class AbstractSupervisedLearner(AbstractLearner):
'''Base class for supervised learning algorithms'''
def __init__(self, **kwargs):
super(AbstractSupervisedLearner, self).__init__(**kwargs)
Y = kwargs['train_labels']
self.Y = array_tools.as_labelmatrix(Y)
self.size = self.Y.shape[0]
self.ysize = self.Y.shape[1]
class AbstractSvdLearner(AbstractLearner):
"""Base class for singular value decomposition based learners"""
def __init__(self, **kwargs):
super(AbstractSvdLearner, self).__init__(**kwargs)
#THE GREAT SVD MONOLITH!!!
if kwargs.has_key('kernel_matrix'):
self.svdad = PreloadedKernelMatrixSvdAdapter.createAdapter(**kwargs)
else:
if not kwargs.has_key('kernel_obj'):
if not kwargs.has_key("kernel"):
kwargs["kernel"] = "LinearKernel"
kwargs['kernel_obj'] = creators.createKernelByModuleName(**kwargs)
if isinstance(kwargs['kernel_obj'], LinearKernel):
self.svdad = LinearSvdAdapter.createAdapter(**kwargs)
else:
self.svdad = SvdAdapter.createAdapter(**kwargs)
self.svals = self.svdad.svals
self.svecs = self.svdad.rsvecs
#if not kwargs.has_key('regparam'):
# kwargs['regparam'] = 1.
self.size = self.svecs.shape[0]
def getModel(self):
"""Returns the trained model, call this only after training.
Returns
-------
model : {LinearModel, DualModel}
prediction function
"""
model = self.svdad.createModel(self)
return model
class AbstractSvdSupervisedLearner(AbstractSupervisedLearner,AbstractSvdLearner):
"""Base class for supervised singular value decomposition based learners"""
def __init__(self, **kwargs):
super(AbstractSvdSupervisedLearner, self).__init__(**kwargs)
AbstractSupervisedLearner.loadResources(self)
AbstractSvdLearner.loadResources(self)
if self.size != self.svecs.shape[0]:
tivc = str(self.svecs.shape[0])
tlc = str(self.size)
raise Exception('The number ' + tivc + ' of training feature vectors is different from the number ' + tlc + ' of training labels.')
def train(self):
"""Trains the learning algorithm.
After the learner is trained, one can call the method getModel
to get the trained model
"""
regparam = self.regparam
self.solve(regparam)
def solve(self, regparam):
"""Trains the learning algorithm, using the given regularization parameter.
Parameters
----------
regparam: float (regparam > 0)
regularization parameter
"""
pass
class CallbackFunction(object):
def callback(self, learner):
pass
def finished(self, learner):
pass
class AbstractIterativeLearner(AbstractLearner):
"""Base class for iterative learners"""
def __init__(self, **kwargs):
super(AbstractIterativeLearner, self).__init__(**kwargs)
if kwargs.has_key('callback'):
self.callbackfun = kwargs['callback']
else:
self.callbackfun = None
def callback(self):
if not self.callbackfun == None:
self.callbackfun.callback(self)
def finished(self):
if not self.callbackfun == None:
self.callbackfun.finished(self)
|
{
"content_hash": "465770eedc7e7ff6962a687efa36bddd",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 143,
"avg_line_length": 30.793548387096774,
"alnum_prop": 0.6067462811648858,
"repo_name": "max291/RLScore",
"id": "49a254c1e7060fa885b7ead8f14a93621163236b",
"size": "4773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rlscore/learner/abstract_learner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11240"
},
{
"name": "Python",
"bytes": "426056"
}
],
"symlink_target": ""
}
|
from .base import StreamProtocol
from struct import Struct
class MessageProtocol(StreamProtocol):
HEADER = Struct("!I")
# compression? max size?
def connected(self):
self._buffer = ""
self._length = None
def disconnected(self):
self._process()
def _process(self):
while True:
if self._length is None:
if len(self._buffer) >= self.HEADER.size:
self._length, = self.HEADER.unpack(self._buffer[:HEADER.size])
self._buffer = self._buffer[HEADER.size:]
else:
break
elif len(self._buffer) >= self._length:
message = self._buffer[:self._length]
self._buffer = self._buffer[self._length:]
self._length = None
self.message_received(message)
else:
break
def data_received(self, data):
self._buffer += data
self._process()
def message_received(self, data):
pass
class Sequential(MessageProtocol):
def connected(self):
MessageProtocol.connected(self)
self.state = self.statemachine()
self.state.next()
def message_received(self, data):
self.state.send(data)
def final_message_received(self, data):
pass
|
{
"content_hash": "123820280461e956f4f1daa45435d86a",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 82,
"avg_line_length": 18.076923076923077,
"alnum_prop": 0.5255319148936171,
"repo_name": "tomerfiliba/tangled",
"id": "4f87687ecbbd5c3de544f5f12d1db3580324ee78",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protocols/rpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36178"
}
],
"symlink_target": ""
}
|
import sys, os
sys.path.insert(0, os.path.abspath(".."))
from flask_pymongo._version import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath("."))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named "sphinx.ext.*") or your custom ones.
extensions = ["sphinx.ext.intersphinx", "sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"Flask-PyMongo"
copyright = u"2011 - 2017, Dan Crosta"
# The version info for the project you"re documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# convert "x.y.z.devN" => "x.y"
version = ".".join(__version__.split(".", 2)[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ""
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = "%B %d, %Y"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, "()" will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "flask_small"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
sys.path.append(os.path.abspath("_themes"))
html_theme_path = ["_themes"]
# custom settings for flask theme
html_theme_options = {
"index_logo": "", #TODO
"github_fork": "dcrosta/flask-pymongo",
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not "", a "Last updated on:" timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Flask-PyMongodoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
#"papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
#"pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
#"preamble": "",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "Flask-PyMongo.tex", u"Flask-PyMongo Documentation",
u"Dan Crosta", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "flask-pymongo", u"Flask-PyMongo Documentation",
[u"Dan Crosta"], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
("index", "Flask-PyMongo", u"Flask-PyMongo Documentation",
u"Dan Crosta", "Flask-PyMongo", "One line description of project.",
"Miscellaneous"),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: "footnote", "no", or "inline".
#texinfo_show_urls = "footnote"
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"flask": ("https://flask.palletsprojects.com/", None),
"pymongo": ("https://pymongo.readthedocs.io/en/stable/", None),
}
|
{
"content_hash": "327bd5064101502ed422f09df8d79bab",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 80,
"avg_line_length": 32.278225806451616,
"alnum_prop": 0.6984384759525296,
"repo_name": "dcrosta/flask-pymongo",
"id": "a1823a4752a19cca5a26620236919d94a0a037bb",
"size": "8429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "30363"
}
],
"symlink_target": ""
}
|
"""Test wrapper to /bin/compare_results.py
"""
import os
import subprocess
from pycits.tools import NotExecutableError
from nose.tools import nottest, assert_equal
from pycits.tools import reformat_swarm_cls
INFILE = os.path.join("tests", "test_data", "compare_clusters",
"compare_list.txt")
OUTDIR = os.path.join("tests", "test_out_compare")
TARGET = os.path.join("tests", "test_targets", "compare_clusters",
"DNAMIX_S95_L001_RESULTS_cd_0.99_sw_1_BC_0.9_V_0.99.txt")
# folder checking
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
def get_sorted_list(in_file):
"""funct to return a sorted list.
takes in a file, returns sorted list"""
out_list = []
with open(in_file) as fh:
data = fh.read().split("\n")
for line in data:
if not line.strip():
continue # if the last line is blank
if line.startswith("#"): # dont want comment lines
continue
out_list.append(line)
return sorted(out_list)
def test_compare_results_exec():
"""Run compare_results.py on test data and compare output to
precomputed target. The default option are the actual
test data. So we only need to call the program
"""
outfile = os.path.join(OUTDIR, "compare_tests.txt")
prog = os.path.join("bin", "compare_results.py")
temp_s = ["python3",
prog,
" -o",
outfile,
" --in_list",
INFILE]
cmd_s = ' '.join(temp_s)
pipe = subprocess.run(cmd_s, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
if not os.path.isfile(outfile):
sys_exit("outfile not generated: %s" % outfile)
tests_data = get_sorted_list(TARGET)
result = get_sorted_list(outfile)
assert_equal(tests_data, result)
|
{
"content_hash": "e1358f2f0bfe2f3b4e5490d0d6d4416e",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 31.721311475409838,
"alnum_prop": 0.5901808785529715,
"repo_name": "widdowquinn/THAPBI-pycits",
"id": "e5f81c6767656815cc6c5854259512345364e529",
"size": "1958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_script_compare_results.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "469746"
},
{
"name": "Python",
"bytes": "199487"
},
{
"name": "UnrealScript",
"bytes": "31869"
}
],
"symlink_target": ""
}
|
import base58
import json
import logging
import os
import re
import sys
import yaml
import envparse
from appdirs import user_data_dir, user_config_dir
from lbrynet.core import utils
from lbrynet.core.Error import InvalidCurrencyError
from lbrynet.androidhelpers.paths import (
android_internal_storage_dir,
android_app_internal_storage_dir
)
try:
from lbrynet.winhelpers.knownpaths import get_path, FOLDERID, UserHandle
except (ImportError, ValueError, NameError):
# Android platform: NameError: name 'c_wchar' is not defined
pass
log = logging.getLogger(__name__)
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ENV_NAMESPACE = 'LBRY_'
LBRYCRD_WALLET = 'lbrycrd'
LBRYUM_WALLET = 'lbryum'
PTC_WALLET = 'ptc'
PROTOCOL_PREFIX = 'lbry'
APP_NAME = 'LBRY'
LINUX = 1
DARWIN = 2
WINDOWS = 3
ANDROID = 4
KB = 2 ** 10
MB = 2 ** 20
DEFAULT_DHT_NODES = [
('lbrynet1.lbry.io', 4444),
('lbrynet2.lbry.io', 4444),
('lbrynet3.lbry.io', 4444)
]
settings_decoders = {
'.json': json.loads,
'.yml': yaml.load
}
settings_encoders = {
'.json': json.dumps,
'.yml': yaml.safe_dump
}
def _win_path_to_bytes(path):
"""
Encode Windows paths to string. appdirs.user_data_dir()
on windows will return unicode path, unlike other platforms
which returns string. This will cause problems
because we use strings for filenames and combining them with
os.path.join() will result in errors.
"""
for encoding in ('ASCII', 'MBCS'):
try:
return path.encode(encoding)
except (UnicodeEncodeError, LookupError):
pass
return path
def _get_old_directories(platform):
dirs = {}
if platform == WINDOWS:
appdata = get_path(FOLDERID.RoamingAppData, UserHandle.current)
dirs['data'] = os.path.join(appdata, 'lbrynet')
dirs['lbryum'] = os.path.join(appdata, 'lbryum')
dirs['download'] = get_path(FOLDERID.Downloads, UserHandle.current)
elif platform == DARWIN:
dirs['data'] = user_data_dir('LBRY')
dirs['lbryum'] = os.path.expanduser('~/.lbryum')
dirs['download'] = os.path.expanduser('~/Downloads')
elif platform == LINUX:
dirs['data'] = os.path.expanduser('~/.lbrynet')
dirs['lbryum'] = os.path.expanduser('~/.lbryum')
dirs['download'] = os.path.expanduser('~/Downloads')
else:
raise ValueError('unknown platform value')
return dirs
def _get_new_directories(platform):
dirs = {}
if platform == ANDROID:
dirs['data'] = '%s/lbrynet' % android_app_internal_storage_dir()
dirs['lbryum'] = '%s/lbryum' % android_app_internal_storage_dir()
dirs['download'] = '%s/Download' % android_internal_storage_dir()
elif platform == WINDOWS:
dirs['data'] = user_data_dir('lbrynet', 'lbry')
dirs['lbryum'] = user_data_dir('lbryum', 'lbry')
dirs['download'] = get_path(FOLDERID.Downloads, UserHandle.current)
elif platform == DARWIN:
_get_old_directories(platform)
elif platform == LINUX:
dirs['data'] = user_data_dir('lbry/lbrynet')
dirs['lbryum'] = user_data_dir('lbry/lbryum')
try:
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read()).group(1)
down_dir = re.sub('\$HOME', os.getenv('HOME'), down_dir)
dirs['download'] = re.sub('\"', '', down_dir)
except EnvironmentError:
dirs['download'] = os.getenv('XDG_DOWNLOAD_DIR')
if not dirs['download']:
dirs['download'] = os.path.expanduser('~/Downloads')
else:
raise ValueError('unknown platform value')
return dirs
if 'ANDROID_ARGUMENT' in os.environ:
# https://github.com/kivy/kivy/blob/master/kivy/utils.py#L417-L421
platform = ANDROID
dirs = _get_new_directories(ANDROID)
elif 'darwin' in sys.platform:
platform = DARWIN
dirs = _get_old_directories(DARWIN)
elif 'win' in sys.platform:
platform = WINDOWS
if os.path.isdir(_get_old_directories(WINDOWS)['data']) or \
os.path.isdir(_get_old_directories(WINDOWS)['lbryum']):
dirs = _get_old_directories(WINDOWS)
else:
dirs = _get_new_directories(WINDOWS)
dirs['data'] = _win_path_to_bytes(dirs['data'])
dirs['lbryum'] = _win_path_to_bytes(dirs['lbryum'])
dirs['download'] = _win_path_to_bytes(dirs['download'])
else:
platform = LINUX
if os.path.isdir(_get_old_directories(LINUX)['data']) or \
os.path.isdir(_get_old_directories(LINUX)['lbryum']):
dirs = _get_old_directories(LINUX)
else:
dirs = _get_new_directories(LINUX)
default_data_dir = dirs['data']
default_lbryum_dir = dirs['lbryum']
default_download_dir = dirs['download']
ICON_PATH = 'icons' if platform is WINDOWS else 'app.icns'
def server_port(server_and_port):
server, port = server_and_port.split(':')
return server, int(port)
class Env(envparse.Env):
"""An Env parser that automatically namespaces the variables with LBRY"""
def __init__(self, **schema):
self.original_schema = schema
my_schema = {
self._convert_key(key): self._convert_value(value)
for key, value in schema.items()
}
envparse.Env.__init__(self, **my_schema)
def __call__(self, key, *args, **kwargs):
my_key = self._convert_key(key)
return super(Env, self).__call__(my_key, *args, **kwargs)
@staticmethod
def _convert_key(key):
return ENV_NAMESPACE + key.upper()
@staticmethod
def _convert_value(value):
""" Allow value to be specified as a tuple or list.
If you do this, the tuple/list must be of the
form (cast, default) or (cast, default, subcast)
"""
if isinstance(value, (tuple, list)):
new_value = {'cast': value[0], 'default': value[1]}
if len(value) == 3:
new_value['subcast'] = value[2]
return new_value
return value
TYPE_DEFAULT = 'default'
TYPE_PERSISTED = 'persisted'
TYPE_ENV = 'env'
TYPE_CLI = 'cli'
TYPE_RUNTIME = 'runtime'
FIXED_SETTINGS = {
'ANALYTICS_ENDPOINT': 'https://api.segment.io/v1',
'ANALYTICS_TOKEN': 'Ax5LZzR1o3q3Z3WjATASDwR5rKyHH0qOIRIbLmMXn2H=',
'API_ADDRESS': 'lbryapi',
'APP_NAME': APP_NAME,
'BLOBFILES_DIR': 'blobfiles',
'BLOB_SIZE': 2 * MB,
'CRYPTSD_FILE_EXTENSION': '.cryptsd',
'CURRENCIES': {
'BTC': {'type': 'crypto'},
'LBC': {'type': 'crypto'},
'USD': {'type': 'fiat'},
},
'DB_REVISION_FILE_NAME': 'db_revision',
'ICON_PATH': ICON_PATH,
'LOGGLY_TOKEN': 'BQEzZmMzLJHgAGxkBF00LGD0YGuyATVgAmqxAQEuAQZ2BQH4',
'LOG_FILE_NAME': 'lbrynet.log',
'LOG_POST_URL': 'https://lbry.io/log-upload',
'MAX_BLOB_REQUEST_SIZE': 64 * KB,
'MAX_HANDSHAKE_SIZE': 64 * KB,
'MAX_REQUEST_SIZE': 64 * KB,
'MAX_RESPONSE_INFO_SIZE': 64 * KB,
'MAX_BLOB_INFOS_TO_REQUEST': 20,
'PROTOCOL_PREFIX': PROTOCOL_PREFIX,
'SLACK_WEBHOOK': ('nUE0pUZ6Yl9bo29epl5moTSwnl5wo20ip2IlqzywMKZiIQSFZR5'
'AHx4mY0VmF0WQZ1ESEP9kMHZlp1WzJwWOoKN3ImR1M2yUAaMyqGZ='),
'WALLET_TYPES': [LBRYUM_WALLET, LBRYCRD_WALLET],
}
ADJUSTABLE_SETTINGS = {
# By default, daemon will block all cross origin requests
# but if this is set, this value will be used for the
# Access-Control-Allow-Origin. For example
# set to '*' to allow all requests, or set to 'http://localhost:8080'
# if you're running a test UI on that port
'allowed_origin': (str, ''),
# Changing this value is not-advised as it could potentially
# expose the lbrynet daemon to the outside world which would
# give an attacker access to your wallet and you could lose
# all of your credits.
'api_host': (str, 'localhost'),
'api_port': (int, 5279),
'cache_time': (int, 150),
'data_dir': (str, default_data_dir),
'data_rate': (float, .0001), # points/megabyte
'delete_blobs_on_remove': (bool, True),
'dht_node_port': (int, 4444),
'download_directory': (str, default_download_dir),
'download_timeout': (int, 180),
'is_generous_host': (bool, True),
'announce_head_blobs_only': (bool, False),
'known_dht_nodes': (list, DEFAULT_DHT_NODES, server_port),
'lbryum_wallet_dir': (str, default_lbryum_dir),
'max_connections_per_stream': (int, 5),
'seek_head_blob_first': (bool, False),
# TODO: writing json on the cmd line is a pain, come up with a nicer
# parser for this data structure. maybe 'USD:25'
'max_key_fee': (json.loads, {'currency': 'USD', 'amount': 50.0}),
'disable_max_key_fee': (bool, False),
'min_info_rate': (float, .02), # points/1000 infos
'min_valuable_hash_rate': (float, .05), # points/1000 infos
'min_valuable_info_rate': (float, .05), # points/1000 infos
'peer_port': (int, 3333),
'pointtrader_server': (str, 'http://127.0.0.1:2424'),
'reflector_port': (int, 5566),
# if reflect_uploads is True, reflect files on publish
'reflect_uploads': (bool, True),
# if auto_re_reflect is True, attempt to re-reflect files on startup and
# at every auto_re_reflect_interval seconds, useful if initial reflect is unreliable
'auto_re_reflect': (bool, True),
'auto_re_reflect_interval': (int, 3600),
'reflector_servers': (list, [('reflector.lbry.io', 5566)], server_port),
'run_reflector_server': (bool, False),
'sd_download_timeout': (int, 3),
'share_usage_data': (bool, True), # whether to share usage stats and diagnostic info with LBRY
'peer_search_timeout': (int, 3),
'use_auth_http': (bool, False),
'use_upnp': (bool, True),
'wallet': (str, LBRYUM_WALLET),
}
class Config(object):
def __init__(self, fixed_defaults, adjustable_defaults, persisted_settings=None,
environment=None, cli_settings=None):
self._installation_id = None
self._session_id = base58.b58encode(utils.generate_id())
self._node_id = None
self._fixed_defaults = fixed_defaults
self._adjustable_defaults = adjustable_defaults
self._data = {
TYPE_DEFAULT: {}, # defaults
TYPE_PERSISTED: {}, # stored settings from daemon_settings.yml (or from a db, etc)
TYPE_ENV: {}, # settings from environment variables
TYPE_CLI: {}, # command-line arguments
TYPE_RUNTIME: {}, # set during runtime (using self.set(), etc)
}
# the order in which a piece of data is searched for. earlier types override later types
self._search_order = (
TYPE_RUNTIME, TYPE_CLI, TYPE_ENV, TYPE_PERSISTED, TYPE_DEFAULT
)
self._data[TYPE_DEFAULT].update(self._fixed_defaults)
self._data[TYPE_DEFAULT].update(
{k: v[1] for (k, v) in self._adjustable_defaults.iteritems()})
if persisted_settings is None:
persisted_settings = {}
self._validate_settings(persisted_settings)
self._data[TYPE_PERSISTED].update(persisted_settings)
env_settings = self._parse_environment(environment)
self._validate_settings(env_settings)
self._data[TYPE_ENV].update(env_settings)
if cli_settings is None:
cli_settings = {}
self._validate_settings(cli_settings)
self._data[TYPE_CLI].update(cli_settings)
def __repr__(self):
return self.get_current_settings_dict().__repr__()
def __iter__(self):
for k in self._data[TYPE_DEFAULT].iterkeys():
yield k
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __contains__(self, name):
return name in self._data[TYPE_DEFAULT]
@staticmethod
def _parse_environment(environment):
env_settings = {}
if environment is not None:
assert isinstance(environment, Env)
for opt in environment.original_schema:
if environment(opt) is not None:
env_settings[opt] = environment(opt)
return env_settings
def _assert_valid_data_type(self, data_type):
assert data_type in self._data, KeyError('{} in is not a valid data type'.format(data_type))
def get_valid_setting_names(self):
return self._data[TYPE_DEFAULT].keys()
def _is_valid_setting(self, name):
return name in self.get_valid_setting_names()
def _assert_valid_setting(self, name):
assert self._is_valid_setting(name), \
KeyError('{} is not a valid setting'.format(name))
def _validate_settings(self, data):
invalid_settings = set(data.keys()) - set(self.get_valid_setting_names())
if len(invalid_settings) > 0:
raise KeyError('invalid settings: {}'.format(', '.join(invalid_settings)))
def _assert_editable_setting(self, name):
self._assert_valid_setting(name)
assert name not in self._fixed_defaults, \
ValueError('{} is not an editable setting'.format(name))
def _validate_currency(self, currency):
if currency not in self._fixed_defaults['CURRENCIES'].keys():
raise InvalidCurrencyError(currency)
def get(self, name, data_type=None):
"""Get a config value
Args:
name: the name of the value to get
data_type: if given, get the value from a specific data set (see below)
Returns: the config value for the given name
If data_type is None, get() will search for the given name in each data set, in
order of precedence. It will return the first value it finds. This is the "effective"
value of a config name. For example, ENV values take precedence over DEFAULT values,
so if a value is present in ENV and in DEFAULT, the ENV value will be returned
"""
self._assert_valid_setting(name)
if data_type is not None:
self._assert_valid_data_type(data_type)
return self._data[data_type][name]
for possible_data_type in self._search_order:
if name in self._data[possible_data_type]:
return self._data[possible_data_type][name]
raise KeyError('{} is not a valid setting'.format(name))
def set(self, name, value, data_types=(TYPE_RUNTIME,)):
"""Set a config value
Args:
name: the name of the value to set
value: the value
data_types: what type(s) of data this is
Returns: None
By default, this sets the RUNTIME value of a config. If you wish to set other
data types (e.g. PERSISTED values to save to a file, CLI values from parsed
command-line options, etc), you can specify that with the data_types param
"""
if name == "max_key_fee":
currency = str(value["currency"]).upper()
self._validate_currency(currency)
self._assert_editable_setting(name)
for data_type in data_types:
self._assert_valid_data_type(data_type)
self._data[data_type][name] = value
def update(self, updated_settings, data_types=(TYPE_RUNTIME,)):
for k, v in updated_settings.iteritems():
try:
self.set(k, v, data_types=data_types)
except (KeyError, AssertionError):
pass
def get_current_settings_dict(self):
current_settings = {}
for key in self.get_valid_setting_names():
current_settings[key] = self.get(key)
return current_settings
def get_adjustable_settings_dict(self):
return {
key: val for key, val in self.get_current_settings_dict().iteritems()
if key in self._adjustable_defaults
}
def save_conf_file_settings(self):
path = self.get_conf_filename()
ext = os.path.splitext(path)[1]
encoder = settings_encoders.get(ext, False)
assert encoder is not False, 'Unknown settings format %s' % ext
with open(path, 'w') as settings_file:
settings_file.write(encoder(self._data[TYPE_PERSISTED]))
def load_conf_file_settings(self):
path = self.get_conf_filename()
ext = os.path.splitext(path)[1]
decoder = settings_decoders.get(ext, False)
assert decoder is not False, 'Unknown settings format %s' % ext
try:
with open(path, 'r') as settings_file:
data = settings_file.read()
decoded = self._fix_old_conf_file_settings(decoder(data))
log.info('Loaded settings file: %s', path)
self._validate_settings(decoded)
self._data[TYPE_PERSISTED].update(decoded)
except (IOError, OSError) as err:
log.info('%s: Failed to update settings from %s', err, path)
def _fix_old_conf_file_settings(self, settings_dict):
if 'API_INTERFACE' in settings_dict:
settings_dict['api_host'] = settings_dict['API_INTERFACE']
del settings_dict['API_INTERFACE']
if 'startup_scripts' in settings_dict:
del settings_dict['startup_scripts']
if 'upload_log' in settings_dict:
settings_dict['share_usage_data'] = settings_dict['upload_log']
del settings_dict['upload_log']
if 'share_debug_info' in settings_dict:
settings_dict['share_usage_data'] = settings_dict['share_debug_info']
del settings_dict['share_debug_info']
for key in settings_dict.keys():
if not self._is_valid_setting(key):
log.warning('Ignoring invalid conf file setting: %s', key)
del settings_dict[key]
return settings_dict
def ensure_data_dir(self):
# although there is a risk of a race condition here we don't
# expect there to be multiple processes accessing this
# directory so the risk can be ignored
if not os.path.isdir(self['data_dir']):
os.makedirs(self['data_dir'])
return self['data_dir']
def get_log_filename(self):
"""
Return the log file for this platform.
Also ensure the containing directory exists.
"""
return os.path.join(self.ensure_data_dir(), self['LOG_FILE_NAME'])
def get_api_connection_string(self):
return 'http://%s:%i/%s' % (self['api_host'], self['api_port'], self['API_ADDRESS'])
def get_ui_address(self):
return 'http://%s:%i' % (self['api_host'], self['api_port'])
def get_db_revision_filename(self):
return os.path.join(self.ensure_data_dir(), self['DB_REVISION_FILE_NAME'])
def get_conf_filename(self):
data_dir = self.ensure_data_dir()
yml_path = os.path.join(data_dir, 'daemon_settings.yml')
json_path = os.path.join(data_dir, 'daemon_settings.json')
if os.path.isfile(yml_path):
return yml_path
elif os.path.isfile(json_path):
return json_path
else:
return yml_path
def get_installation_id(self):
install_id_filename = os.path.join(self.ensure_data_dir(), "install_id")
if not self._installation_id:
if os.path.isfile(install_id_filename):
with open(install_id_filename, "r") as install_id_file:
self._installation_id = install_id_file.read()
if not self._installation_id:
self._installation_id = base58.b58encode(utils.generate_id())
with open(install_id_filename, "w") as install_id_file:
install_id_file.write(self._installation_id)
return self._installation_id
def get_node_id(self):
node_id_filename = os.path.join(self.ensure_data_dir(), "node_id")
if not self._node_id:
if os.path.isfile(node_id_filename):
with open(node_id_filename, "r") as node_id_file:
self._node_id = base58.b58decode(node_id_file.read())
if not self._node_id:
self._node_id = utils.generate_id()
with open(node_id_filename, "w") as node_id_file:
node_id_file.write(base58.b58encode(self._node_id))
return self._node_id
def get_session_id(self):
return self._session_id
# type: Config
settings = None
def get_default_env():
env_defaults = {}
for k, v in ADJUSTABLE_SETTINGS.iteritems():
if len(v) == 3:
env_defaults[k] = (v[0], None, v[2])
else:
env_defaults[k] = (v[0], None)
return Env(**env_defaults)
def initialize_settings(load_conf_file=True):
global settings
if settings is None:
settings = Config(FIXED_SETTINGS, ADJUSTABLE_SETTINGS,
environment=get_default_env())
settings.installation_id = settings.get_installation_id()
settings.node_id = settings.get_node_id()
if load_conf_file:
settings.load_conf_file_settings()
|
{
"content_hash": "50571620ff5fc8edc0a6677a55829ade",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 100,
"avg_line_length": 36.9492119089317,
"alnum_prop": 0.612806901128069,
"repo_name": "zestyr/lbry",
"id": "9fde690adc7c4169f6dd934001470999853b10fb",
"size": "21098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lbrynet/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "1080"
},
{
"name": "Python",
"bytes": "1034464"
},
{
"name": "Ruby",
"bytes": "309"
},
{
"name": "Shell",
"bytes": "2881"
}
],
"symlink_target": ""
}
|
def identity(x):
return x
def cyclist(tree):
res = []
reschildren = yield res
res.extend(reschildren)
yield
def foldcyc(tree,branch=cyclist,leaf=identity,shared=identity,getchildren=iter):
mem = dict()
def _fold(tree):
if id(tree) in mem:
return shared(mem[id(tree)])
else:
try:
children = getchildren(tree)
except:
res = leaf(tree)
mem[id(tree)] = res
return res
coroutine = branch(tree)
res = coroutine.next()
mem[id(tree)] = res
reschildren = [_fold(child) for child in children]
coroutine.send(reschildren)
return res
return _fold(tree)
|
{
"content_hash": "0325a818b20e1e76097c84079db8d6fb",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 80,
"avg_line_length": 27.392857142857142,
"alnum_prop": 0.5215123859191656,
"repo_name": "ActiveState/code",
"id": "c6465f4f64bcc205e39481a17b9e97ea7df31347",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/578118_Cycleaware_tree_transformations/recipe-578118.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser, Namespace
from typing import List
import numpy
from keras import Model
from PIL import Image
from pprint import pprint
from acquisition.ebay_downloader_io import EbayDownloaderIO
from data_sets.contains_images import ContainsImages
from utils.with_verbose import WithVerbose
from train import TrainingRunner
SAVE_FOLDER = 'data'
DEFAULT_SIZE = 139
def parse_command_line() -> Namespace:
parser = ArgumentParser(
description="Train neural networks recognizing style from liked eBay items"
)
parser.add_argument(
'--verbose', '-v', action='store_true', help="Print info about extracted tags"
)
parser.add_argument(
'--save-folder', default=SAVE_FOLDER,
help='Folder under which to store items, images and weights'
)
parser.add_argument(
'--weights-file', '-w', default=None, help='HDF5 file from which to load precomputed set of weights'
)
parser.add_argument(
'--images-file', default=None, help='Pickle file from which to load precomputed image data set'
)
parser.add_argument(
'--image-size', '-s', type=int, default=DEFAULT_SIZE,
help='Size (both width and height) to which images are resized'
)
parser.add_argument(
'--demo', type=int, default=0, help='Number of images to try to predict as demo'
)
parser.add_argument(
'--predict-image', action='append',
help='Image which is evaluated'
)
parser.add_argument(
'--predict-item-url', action='append',
help='URL of eBay item which is evaluated'
)
parser.add_argument(
'--type', default='inception', help='Type of neural network used',
choices=list(TrainingRunner.NETWORK_TYPES.keys())
)
return parser.parse_args()
def get_items(item_urls: List[str]) -> None:
pass
class Predictor(WithVerbose, ContainsImages):
def __init__(self, args: Namespace) -> None:
self.io = EbayDownloaderIO(
args.save_folder, args.image_size, weights_file=args.weights_file, verbose=args.verbose
)
self.verbose = args.verbose
self.size = (args.image_size, args.image_size)
self.demo = args.demo
self.neural_network_type = TrainingRunner.decode_network_name(args.type)
self.model = self.setup_model()
def setup_model(self, loss_function: str='mean_squared_error', optimizer: str='sgd') -> Model:
model = self.neural_network_type(
input_shape=(*self.size, 3), classes=2 # image_data.num_classes
)
model.compile(loss=loss_function, optimizer=optimizer, metrics=['accuracy'])
self._print_status('Model compiled')
self.io.load_weights(model)
return model
def predict(self, image_files: List[str]) -> None:
images = numpy.asarray([self.downscale(Image.open(file).convert('RGB')) for file in image_files])
for image in images:
self.show_image(image)
predictions = self.model.predict(images, batch_size=len(images), verbose=1)
for prediction in predictions:
# pprint(
# [
# (label, prob)
# for label, prob in image_data.labels_sorted_by_probability(prediction).items()
# if prob > 0.01
# ]
# )
pprint(prediction)
if __name__ == '__main__':
args = parse_command_line()
predictor = Predictor(args)
predictor.predict(args.predict_image)
|
{
"content_hash": "3c9eeebc12b191de4d5633e343bbebfb",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 108,
"avg_line_length": 34.349514563106794,
"alnum_prop": 0.6353872244205766,
"repo_name": "lene/style-scout",
"id": "bd711bd661e73b1f4f3ffae0a623b01fb416e5e9",
"size": "3538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "predict.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "134896"
}
],
"symlink_target": ""
}
|
from baselines.bench.benchmarks import *
from baselines.bench.monitor import *
|
{
"content_hash": "d027012c0021762d77d4009ab8958ae2",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 40,
"avg_line_length": 39.5,
"alnum_prop": 0.8227848101265823,
"repo_name": "dsbrown1331/CoRL2019-DREX",
"id": "4cbd5bba3d615bbd65f3c0ca4070da96a695553c",
"size": "79",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "drex-mujoco/learner/baselines/baselines/bench/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "918"
},
{
"name": "HTML",
"bytes": "591968"
},
{
"name": "Jupyter Notebook",
"bytes": "1160596"
},
{
"name": "Python",
"bytes": "1438389"
}
],
"symlink_target": ""
}
|
from functools import reduce
import gc
import io
import locale # system locale module, not tornado.locale
import logging
import operator
import textwrap
import sys
import unittest
import warnings
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.netutil import Resolver
from tornado.options import define, add_parse_callback, options
TEST_MODULES = [
"tornado.httputil.doctests",
"tornado.iostream.doctests",
"tornado.util.doctests",
"tornado.test.asyncio_test",
"tornado.test.auth_test",
"tornado.test.autoreload_test",
"tornado.test.concurrent_test",
"tornado.test.curl_httpclient_test",
"tornado.test.escape_test",
"tornado.test.gen_test",
"tornado.test.http1connection_test",
"tornado.test.httpclient_test",
"tornado.test.httpserver_test",
"tornado.test.httputil_test",
"tornado.test.import_test",
"tornado.test.ioloop_test",
"tornado.test.iostream_test",
"tornado.test.locale_test",
"tornado.test.locks_test",
"tornado.test.netutil_test",
"tornado.test.log_test",
"tornado.test.options_test",
"tornado.test.process_test",
"tornado.test.queues_test",
"tornado.test.routing_test",
"tornado.test.simple_httpclient_test",
"tornado.test.tcpclient_test",
"tornado.test.tcpserver_test",
"tornado.test.template_test",
"tornado.test.testing_test",
"tornado.test.twisted_test",
"tornado.test.util_test",
"tornado.test.web_test",
"tornado.test.websocket_test",
"tornado.test.wsgi_test",
]
def all():
return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES)
def test_runner_factory(stderr):
class TornadoTextTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
kwargs["stream"] = stderr
super().__init__(*args, **kwargs)
def run(self, test):
result = super().run(test)
if result.skipped:
skip_reasons = set(reason for (test, reason) in result.skipped)
self.stream.write( # type: ignore
textwrap.fill(
"Some tests were skipped because: %s"
% ", ".join(sorted(skip_reasons))
)
)
self.stream.write("\n") # type: ignore
return result
return TornadoTextTestRunner
class LogCounter(logging.Filter):
"""Counts the number of WARNING or higher log records."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.info_count = self.warning_count = self.error_count = 0
def filter(self, record):
if record.levelno >= logging.ERROR:
self.error_count += 1
elif record.levelno >= logging.WARNING:
self.warning_count += 1
elif record.levelno >= logging.INFO:
self.info_count += 1
return True
class CountingStderr(io.IOBase):
def __init__(self, real):
self.real = real
self.byte_count = 0
def write(self, data):
self.byte_count += len(data)
return self.real.write(data)
def flush(self):
return self.real.flush()
def main():
# Be strict about most warnings (This is set in our test running
# scripts to catch import-time warnings, but set it again here to
# be sure). This also turns on warnings that are ignored by
# default, including DeprecationWarnings and python 3.2's
# ResourceWarnings.
warnings.filterwarnings("error")
# setuptools sometimes gives ImportWarnings about things that are on
# sys.path even if they're not being used.
warnings.filterwarnings("ignore", category=ImportWarning)
# Tornado generally shouldn't use anything deprecated, but some of
# our dependencies do (last match wins).
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("error", category=DeprecationWarning, module=r"tornado\..*")
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
warnings.filterwarnings(
"error", category=PendingDeprecationWarning, module=r"tornado\..*"
)
# The unittest module is aggressive about deprecating redundant methods,
# leaving some without non-deprecated spellings that work on both
# 2.7 and 3.2
warnings.filterwarnings(
"ignore", category=DeprecationWarning, message="Please use assert.* instead"
)
warnings.filterwarnings(
"ignore",
category=PendingDeprecationWarning,
message="Please use assert.* instead",
)
# Twisted 15.0.0 triggers some warnings on py3 with -bb.
warnings.filterwarnings("ignore", category=BytesWarning, module=r"twisted\..*")
if (3,) < sys.version_info < (3, 6):
# Prior to 3.6, async ResourceWarnings were rather noisy
# and even
# `python3.4 -W error -c 'import asyncio; asyncio.get_event_loop()'`
# would generate a warning.
warnings.filterwarnings(
"ignore", category=ResourceWarning, module=r"asyncio\..*"
)
# This deprecation warning is introduced in Python 3.8 and is
# triggered by pycurl. Unforunately, because it is raised in the C
# layer it can't be filtered by module and we must match the
# message text instead (Tornado's C module uses PY_SSIZE_T_CLEAN
# so it's not at risk of running into this issue).
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="PY_SSIZE_T_CLEAN will be required",
)
logging.getLogger("tornado.access").setLevel(logging.CRITICAL)
define(
"httpclient",
type=str,
default=None,
callback=lambda s: AsyncHTTPClient.configure(
s, defaults=dict(allow_ipv6=False)
),
)
define("httpserver", type=str, default=None, callback=HTTPServer.configure)
define("resolver", type=str, default=None, callback=Resolver.configure)
define(
"debug_gc",
type=str,
multiple=True,
help="A comma-separated list of gc module debug constants, "
"e.g. DEBUG_STATS or DEBUG_COLLECTABLE,DEBUG_OBJECTS",
callback=lambda values: gc.set_debug(
reduce(operator.or_, (getattr(gc, v) for v in values))
),
)
define(
"fail-if-logs",
default=True,
help="If true, fail the tests if any log output is produced (unless captured by ExpectLog)",
)
def set_locale(x):
locale.setlocale(locale.LC_ALL, x)
define("locale", type=str, default=None, callback=set_locale)
log_counter = LogCounter()
add_parse_callback(lambda: logging.getLogger().handlers[0].addFilter(log_counter))
# Certain errors (especially "unclosed resource" errors raised in
# destructors) go directly to stderr instead of logging. Count
# anything written by anything but the test runner as an error.
orig_stderr = sys.stderr
counting_stderr = CountingStderr(orig_stderr)
sys.stderr = counting_stderr # type: ignore
import tornado.testing
kwargs = {}
# HACK: unittest.main will make its own changes to the warning
# configuration, which may conflict with the settings above
# or command-line flags like -bb. Passing warnings=False
# suppresses this behavior, although this looks like an implementation
# detail. http://bugs.python.org/issue15626
kwargs["warnings"] = False
kwargs["testRunner"] = test_runner_factory(orig_stderr)
try:
tornado.testing.main(**kwargs)
finally:
# The tests should run clean; consider it a failure if they
# logged anything at info level or above.
if (
log_counter.info_count > 0
or log_counter.warning_count > 0
or log_counter.error_count > 0
or counting_stderr.byte_count > 0
):
logging.error(
"logged %d infos, %d warnings, %d errors, and %d bytes to stderr",
log_counter.info_count,
log_counter.warning_count,
log_counter.error_count,
counting_stderr.byte_count,
)
if options.fail_if_logs:
sys.exit(1)
if __name__ == "__main__":
main()
|
{
"content_hash": "f9761d0e1cf2789ce5706f5865968157",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 100,
"avg_line_length": 34.63900414937759,
"alnum_prop": 0.6436272160996646,
"repo_name": "tornadoweb/tornado",
"id": "6075b1e2bd981a45fa6b902d9162ba36c5f1da72",
"size": "8348",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tornado/test/runtests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1524"
},
{
"name": "Cython",
"bytes": "780"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1557339"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
}
|
"""
Django settings for population project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7%)4ui#rsj5l#fwq5(q9+6!^q-1_l%_u(662w6-8xg(+@7vdsg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'population','query',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'population.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TEMPLATE_CONTEXT_PROCESSORS = (
# other context processors....
'django.core.context_processors.static',
# other context processors....
)
WSGI_APPLICATION = 'population.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
{
"content_hash": "8e2b5d76dc52385bbc75045e0d3ecfac",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 91,
"avg_line_length": 26.246268656716417,
"alnum_prop": 0.6912141029286324,
"repo_name": "Chromium97/lnmiithackathon",
"id": "370512ef8e8d8bef233e57f04f7f8e9ef94a4fde",
"size": "3517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "query/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "350776"
},
{
"name": "HTML",
"bytes": "18345"
},
{
"name": "JavaScript",
"bytes": "633219"
},
{
"name": "Python",
"bytes": "22029"
}
],
"symlink_target": ""
}
|
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
VERSION = "unknown"
class AzureCommunicationJobRouterServiceConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for AzureCommunicationJobRouterService.
Note that all parameters used to create this instance are saved as instance
attributes.
:param endpoint: The endpoint of the Azure Communication resource. Required.
:type endpoint: str
:keyword api_version: Api Version. Default value is "2022-07-18-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, endpoint: str, **kwargs: Any) -> None:
super(AzureCommunicationJobRouterServiceConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-07-18-preview") # type: str
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
self.endpoint = endpoint
self.api_version = api_version
kwargs.setdefault("sdk_moniker", "communication-jobrouter/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
|
{
"content_hash": "d58d34f50423783554af7749ded34542",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 117,
"avg_line_length": 50.651162790697676,
"alnum_prop": 0.7171717171717171,
"repo_name": "Azure/azure-sdk-for-python",
"id": "17be90fca9d5115c50a9d6ce96c9eb40eacd52c9",
"size": "2646",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/communication/azure-communication-jobrouter/azure/communication/jobrouter/_generated/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from google.cloud.bigquery import analyticshub_v1
def sample_list_org_data_exchanges():
# Create a client
client = analyticshub_v1.AnalyticsHubServiceClient()
# Initialize request argument(s)
request = analyticshub_v1.ListOrgDataExchangesRequest(
organization="organization_value",
)
# Make the request
page_result = client.list_org_data_exchanges(request=request)
# Handle the response
for response in page_result:
print(response)
# [END analyticshub_v1_generated_AnalyticsHubService_ListOrgDataExchanges_sync]
|
{
"content_hash": "1bc6af74fda2fc021823039313baf184",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 28.5,
"alnum_prop": 0.7350877192982456,
"repo_name": "googleapis/python-bigquery-analyticshub",
"id": "e59812baebb967eed66ac64cf0262a5b1a9170a7",
"size": "1994",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/analyticshub_v1_generated_analytics_hub_service_list_org_data_exchanges_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "530070"
},
{
"name": "Shell",
"bytes": "30705"
}
],
"symlink_target": ""
}
|
import mongoengine as me
from st2common import log as logging
from st2common.models.db import MongoDBAccess
from st2common.models.db import stormbase
from st2common.constants.types import ResourceType
__all__ = [
'RunnerTypeDB',
]
LOG = logging.getLogger(__name__)
PACK_SEPARATOR = '.'
class RunnerTypeDB(stormbase.StormBaseDB, stormbase.UIDFieldMixin):
"""
The representation of an RunnerType in the system. An RunnerType
has a one-to-one mapping to a particular ActionRunner implementation.
Attributes:
id: See StormBaseAPI
name: See StormBaseAPI
description: See StormBaseAPI
enabled: A flag indicating whether the runner for this type is enabled.
runner_module: The python module that implements the action runner for this type.
runner_parameters: The specification for parameters for the action runner.
"""
RESOURCE_TYPE = ResourceType.RUNNER_TYPE
UID_FIELDS = ['name']
enabled = me.BooleanField(
required=True, default=True,
help_text='A flag indicating whether the runner for this type is enabled.')
runner_module = me.StringField(
required=True,
help_text='The python module that implements the action runner for this type.')
runner_parameters = me.DictField(
help_text='The specification for parameters for the action runner.')
query_module = me.StringField(
required=False,
help_text='The python module that implements the query module for this runner.')
meta = {
'indexes': stormbase.UIDFieldMixin.get_indexes()
}
def __init__(self, *args, **values):
super(RunnerTypeDB, self).__init__(*args, **values)
self.uid = self.get_uid()
# specialized access objects
runnertype_access = MongoDBAccess(RunnerTypeDB)
MODELS = [RunnerTypeDB]
|
{
"content_hash": "3e38902ad7f12c14a119a260e14e5fe2",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 89,
"avg_line_length": 31.775862068965516,
"alnum_prop": 0.6983179598480738,
"repo_name": "pixelrebel/st2",
"id": "ecdc42f515a46f1dcca87e4d12563743909a90bb",
"size": "2623",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "st2common/st2common/models/db/runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "41838"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3734646"
},
{
"name": "Shell",
"bytes": "40304"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from .models import Cat, Dog
from relatedentities.models import RelatedEntity
from relatedentities.utils import add_related
class RelatedEntityModelTests(TestCase):
def test_delete_removes_both_sides_of_relationship(self):
cat = Cat.objects.create(name='Test Cat')
dog = Dog.objects.create(name='Test Dog')
related = add_related(cat, dog)
rev_related = add_related(dog, cat)
related.delete()
self.assertFalse(RelatedEntity.objects.filter(pk=related.pk).exists())
self.assertFalse(RelatedEntity.objects.filter(pk=rev_related.pk).exists())
def test_is_related_to(self):
cat = Cat.objects.create(name='Test Cat')
dog = Dog.objects.create(name='Test Dog')
related = add_related(cat, dog)
self.assertTrue(cat.is_related_to(dog))
def test_is_related_to_works_with_reverse_relationship(self):
cat = Cat.objects.create(name='Test Cat')
dog = Dog.objects.create(name='Test Dog')
related = add_related(cat, dog)
self.assertTrue(dog.is_related_to(cat))
def test_is_related_to_works_with_reverse_relationship(self):
cat = Cat.objects.create(name='Test Cat')
dog = Dog.objects.create(name='Test Dog')
self.assertFalse(cat.is_related_to(dog))
|
{
"content_hash": "ab994a461d1e2c4c743684ed6c7d8fdd",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 82,
"avg_line_length": 32.51219512195122,
"alnum_prop": 0.673668417104276,
"repo_name": "grantmcconnaughey/django-related-entities",
"id": "b1653d27d5b942d798f00b4e66cd9d4862fe17d1",
"size": "1333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1268"
},
{
"name": "Python",
"bytes": "13651"
}
],
"symlink_target": ""
}
|
from pkg_resources import require
require('pypes')
# import the Dataflow module
from pypes.pipeline import Dataflow
# import the Component Interface
from pypes.component import Component
# import the built-in ConsoleOutputWriter
from pypes.filters import ConsoleOutputWriter
class HelloWorld(Component):
__metatype__ = 'ADAPTER'
def __init__(self):
Component.__init__(self)
def run(self):
while True:
for data in self.receive_all('in'):
message = 'Hello %s' % data
self.send('out', message)
self.yield_ctrl()
hello = HelloWorld() # our custom component
printer = ConsoleOutputWriter() # writes to console (STDOUT)
Network = {
hello: {printer:('out','in')}
}
if __name__ == '__main__':
# create a new data flow
p = Dataflow(Network)
# send some data through the data flow
for name in ['Tom', 'Dick', 'Harry']:
p.send(name)
# shut down the data flow
p.close()
|
{
"content_hash": "b1aab08aabf5738c59d6872fe64dc429",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 60,
"avg_line_length": 26.342105263157894,
"alnum_prop": 0.6223776223776224,
"repo_name": "rasata/pypes",
"id": "a6078dc54ed4c563fe02916e834cfc0799194e20",
"size": "1028",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "core/examples/HelloWorldExample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "231488"
},
{
"name": "HTML",
"bytes": "1352"
},
{
"name": "JavaScript",
"bytes": "7537787"
},
{
"name": "Makefile",
"bytes": "55"
},
{
"name": "Mako",
"bytes": "4866"
},
{
"name": "Python",
"bytes": "736113"
},
{
"name": "Shell",
"bytes": "32"
}
],
"symlink_target": ""
}
|
from CTFd.models import Users
from tests.helpers import (
create_ctfd,
destroy_ctfd,
gen_award,
login_as_user,
register_user,
)
def test_accessing_hidden_users():
"""Hidden users should not give any data from /users or /api/v1/users"""
app = create_ctfd()
with app.app_context():
register_user(app, name="visible_user", email="visible_user@ctfd.io") # ID 2
register_user(app, name="hidden_user", email="hidden_user@ctfd.io") # ID 3
register_user(app, name="banned_user", email="banned_user@ctfd.io") # ID 4
user = Users.query.filter_by(name="hidden_user").first()
user.hidden = True
app.db.session.commit()
user = Users.query.filter_by(name="banned_user").first()
user.banned = True
app.db.session.commit()
with login_as_user(app, name="visible_user") as client:
assert client.get("/users/3").status_code == 404
assert client.get("/api/v1/users/3").status_code == 404
assert client.get("/api/v1/users/3/solves").status_code == 404
assert client.get("/api/v1/users/3/fails").status_code == 404
assert client.get("/api/v1/users/3/awards").status_code == 404
assert client.get("/users/4").status_code == 404
assert client.get("/api/v1/users/4").status_code == 404
assert client.get("/api/v1/users/4/solves").status_code == 404
assert client.get("/api/v1/users/4/fails").status_code == 404
assert client.get("/api/v1/users/4/awards").status_code == 404
destroy_ctfd(app)
def test_hidden_user_visibility():
"""Hidden users should not show up on /users or /api/v1/users or /api/v1/scoreboard"""
app = create_ctfd()
with app.app_context():
register_user(app, name="hidden_user")
with login_as_user(app, name="hidden_user") as client:
user = Users.query.filter_by(id=2).first()
user_id = user.id
user_name = user.name
user.hidden = True
app.db.session.commit()
r = client.get("/users")
response = r.get_data(as_text=True)
assert user_name not in response
r = client.get("/api/v1/users")
response = r.get_json()
assert user_name not in response
gen_award(app.db, user_id)
r = client.get("/scoreboard")
response = r.get_data(as_text=True)
assert user_name not in response
r = client.get("/api/v1/scoreboard")
response = r.get_json()
assert user_name not in response
# User should re-appear after disabling hiding
# Use an API call to cause a cache clear
with login_as_user(app, name="admin") as admin:
r = admin.patch("/api/v1/users/2", json={"hidden": False})
assert r.status_code == 200
r = client.get("/users")
response = r.get_data(as_text=True)
assert user_name in response
r = client.get("/api/v1/users")
response = r.get_data(as_text=True)
assert user_name in response
r = client.get("/api/v1/scoreboard")
response = r.get_data(as_text=True)
assert user_name in response
destroy_ctfd(app)
|
{
"content_hash": "d8db790fe124482f93f8ed98a694a693",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 90,
"avg_line_length": 38.23863636363637,
"alnum_prop": 0.575334323922734,
"repo_name": "LosFuzzys/CTFd",
"id": "d9a0288b2aa901bb9d3215a8d39ba3fd1b717ab1",
"size": "3412",
"binary": false,
"copies": "1",
"ref": "refs/heads/losctf",
"path": "tests/users/test_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1956"
},
{
"name": "Dockerfile",
"bytes": "932"
},
{
"name": "HTML",
"bytes": "314286"
},
{
"name": "JavaScript",
"bytes": "646022"
},
{
"name": "Makefile",
"bytes": "841"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "933857"
},
{
"name": "SCSS",
"bytes": "40023"
},
{
"name": "Shell",
"bytes": "2759"
},
{
"name": "Vue",
"bytes": "25361"
}
],
"symlink_target": ""
}
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This module has some examples of function calls for queries from
small part of innings dataset. The query is also mentioned.
"""
import pandas
import slice_compare
from util.enums import *
def test_1():
"""
An example from the IPL dataset
question : compare total runs of 'Mumbai indians' and
'Chennai Super Kings' by season.
"""
table = pandas.read_csv('data/ipl_innings.csv')
query_result = slice_compare.slice_compare(table, 'total_runs',
['batsman_team', 'season'],
['total_runs'], 'batsman_team',
'Mumbai Indians', 'Chennai Super Kings',
SummaryOperators.SUM, dimensions = ['season']
)
print(query_result)
expected_result = """ season batsman_team SUM of total_runs
0 2008 Chennai Super Kings 868
1 2008 Mumbai Indians 346"""
expected_suggestion = """[]"""
assert(expected_result == query_result[0].to_string())
assert(expected_suggestion == str(query_result[1]))
def test_2():
"""
An example from the IPL dataset
question : compare total salary of 'A' and 'B' for year 2019.
"""
table = pandas.read_csv('data/salary_list_modified.csv')
query_result = slice_compare.slice_compare(table, 'salary',
['Person name', 'year', 'month'],
['salary'], 'Person name', 'A',
'B', SummaryOperators.SUM,
slices = [('Person name', Filters.IN, ['A', 'B'])],
dimensions = ['year'])
print(query_result)
expected_result = """ year Person name SUM of salary
0 2019 A 10239
1 2019 B 8190"""
expected_suggestion = """[{'suggestion': 'the relation between slices might changed a lot if you will consider month in grouping.', 'oversight': <Oversights.SIMPSONS_PARADOX: 8>, 'is_row_level_suggestion': True, 'row_list': [{'row': 1, 'confidence_score': 100}, {'row': 2, 'confidence_score': 100}]}]"""
assert(expected_result == query_result[0].to_string())
assert(expected_suggestion == str(query_result[1]))
def test_3():
"""
An example from the IPL dataset
question : compare total run scored in 1st innings and second innings by batsman_teams.
"""
table = pandas.read_csv('data/ipl_innings.csv')
query_result = slice_compare.slice_compare(table, 'total_runs',
['batsman_team', 'innings'],
['total_runs'], 'innings',
'1st', '2nd', SummaryOperators.SUM,
dimensions = ['batsman_team'],
slices = [('innings', Filters.IN, ['1st', '2nd'])])
print(query_result)
expected_output = """ batsman_team innings SUM of total_runs
0 Chennai Super Kings 1st 544
1 Chennai Super Kings 2nd 324
2 Deccan Chargers 1st 40
3 Deccan Chargers 2nd 102
4 Delhi Daredevils 1st 248
5 Delhi Daredevils 2nd 342
6 Gujarat Lions 1st 100
7 Gujarat Lions 2nd 4
8 Kings XI Punjab 1st 448
9 Kings XI Punjab 2nd 522
10 Kolkata Knight Riders 1st 338
11 Kolkata Knight Riders 2nd 708
12 Mumbai Indians 1st 330
13 Mumbai Indians 2nd 16
14 Pune Warriors 1st 12
15 Pune Warriors 2nd 158
16 Rajasthan Royals 1st 368
17 Rajasthan Royals 2nd 608
18 Royal Challengers Bangalore 1st 866
19 Royal Challengers Bangalore 2nd 136
20 Sunrisers Hyderabad 1st 63
21 Sunrisers Hyderabad 2nd 331"""
expected_suggestion = """[]"""
assert(expected_output == query_result[0].to_string())
assert(expected_suggestion == str(query_result[1]))
def test_4():
"""
question : compare average hour work per day of A and rest all interns.
"""
table = pandas.read_csv('data/intern_performance.csv')
query_result = slice_compare.slice_compare(table, 'avg_hour_of_work',
['intern_name'], ['avg_hour_of_work', 'lines_of_code'],
'intern_name', 'A', '*', SummaryOperators.MEAN)
print(query_result)
expected_output = """ intern_name avg_hour_of_work
0 A 8.5
1 ALL 5.5"""
expected_suggestion = "[{'suggestion': 'A looks different from others on avg_hour_of_work. You might also want to look at lines_of_code since A also looks different on this.', 'oversight': <Oversights.BENCHMARK_SET_TOO_DIFFERENT: 10>, 'is_row_level_suggestion': True, 'row_list': [{'row': 1, 'confidence_score': 100}, {'row': 2, 'confidence_score': 100}]}]"
assert(expected_output == query_result[0].to_string())
assert(expected_suggestion == str(query_result[1]))
def test_5():
"""
question : compare average score of A and B by class.
"""
table = pandas.read_csv('data/student_score1.csv')
query_result = slice_compare.slice_compare(table, 'marks',
['class', 'student_name', 'subject'],
['marks'], 'student_name', 'A', 'B',
SummaryOperators.MEAN,
dimensions = ['class'])
print(query_result)
expected_output = """ class student_name MEAN of marks
0 7th A 75
1 7th B 75
2 8th A 75
3 8th B 75"""
expected_suggestion = "[{'suggestion': 'Some values are similar here but will vary if we add subject for grouping ', 'oversight': <Oversights.TOP_DOWN_ERROR: 9>, 'is_row_level_suggestion': True, 'row_list': [{'row': 1, 'confidence_score': 100}, {'row': 2, 'confidence_score': 100}, {'row': 3, 'confidence_score': 100}, {'row': 4, 'confidence_score': 100}]}, {'suggestion': 'the relation between slices might changed a lot if you will consider subject in grouping.', 'oversight': <Oversights.SIMPSONS_PARADOX: 8>, 'is_row_level_suggestion': True, 'row_list': [{'row': 1, 'confidence_score': 100}, {'row': 2, 'confidence_score': 100}]}]"
assert(expected_output == query_result[0].to_string())
assert(expected_suggestion == str(query_result[1]))
print("\ncompare total runs of 'Mumbai indians' and 'Chennai Super Kings'")
test_1()
print("\ncompare total salary of 'A' and 'B' for year 2019.")
test_2()
print("\ncompare total run scored in 1st innings and second innings by batsman_teams.")
test_3()
print("\ncompare average hour work per day of A and rest all interns.")
test_4()
print("\ncompare average score of A and B by class.")
test_5()
print("\nTest cases completed")
|
{
"content_hash": "d44c683134b02dad0ad97f30862deb99",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 639,
"avg_line_length": 49.92215568862275,
"alnum_prop": 0.5284874655151733,
"repo_name": "google/debaised-analysis",
"id": "434ace1ed0772654b472efe99797a9cca03d4e2a",
"size": "8337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intents/test_slice_compare.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "129"
},
{
"name": "HTML",
"bytes": "584541"
},
{
"name": "JavaScript",
"bytes": "229958"
},
{
"name": "Python",
"bytes": "357832"
},
{
"name": "Shell",
"bytes": "7483"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
from holoviews import Tiles
from holoviews.element.comparison import ComparisonTestCase
class TestCoordinateConversion(ComparisonTestCase):
def test_spot_check_lonlat_to_eastingnorthing(self):
# Anchor implementation with a few hard-coded known values.
# Generated ad-hoc from https://epsg.io/transform#s_srs=4326&t_srs=3857
easting, northing = Tiles.lon_lat_to_easting_northing(0, 0)
self.assertAlmostEqual(easting, 0)
self.assertAlmostEqual(northing, 0)
easting, northing = Tiles.lon_lat_to_easting_northing(20, 10)
self.assertAlmostEqual(easting, 2226389.82, places=2)
self.assertAlmostEqual(northing, 1118889.97, places=2)
easting, northing = Tiles.lon_lat_to_easting_northing(-33, -18)
self.assertAlmostEqual(easting, -3673543.20, places=2)
self.assertAlmostEqual(northing, -2037548.54, places=2)
easting, northing = Tiles.lon_lat_to_easting_northing(85, -75)
self.assertAlmostEqual(easting, 9462156.72, places=2)
self.assertAlmostEqual(northing, -12932243.11, places=2)
easting, northing = Tiles.lon_lat_to_easting_northing(180, 85)
self.assertAlmostEqual(easting, 20037508.34, places=2)
self.assertAlmostEqual(northing, 19971868.88, places=2)
def test_spot_check_eastingnorthing_to_lonlat(self):
# Anchor implementation with a few hard-coded known values.
# Generated ad-hoc from https://epsg.io/transform#s_srs=3857&t_srs=4326
lon, lat = Tiles.easting_northing_to_lon_lat(0, 0)
self.assertAlmostEqual(lon, 0)
self.assertAlmostEqual(lat, 0)
lon, lat = Tiles.easting_northing_to_lon_lat(1230020, -432501)
self.assertAlmostEqual(lon, 11.0494578, places=2)
self.assertAlmostEqual(lat, -3.8822487, places=2)
lon, lat = Tiles.easting_northing_to_lon_lat(-2130123, 1829312)
self.assertAlmostEqual(lon, -19.1352205, places=2)
self.assertAlmostEqual(lat, 16.2122187, places=2)
lon, lat = Tiles.easting_northing_to_lon_lat(-1000000, 5000000)
self.assertAlmostEqual(lon, -8.9831528, places=2)
self.assertAlmostEqual(lat, 40.9162745, places=2)
lon, lat = Tiles.easting_northing_to_lon_lat(-20037508.34, 20037508.34)
self.assertAlmostEqual(lon, -180.0, places=2)
self.assertAlmostEqual(lat, 85.0511288, places=2)
def test_check_lonlat_to_eastingnorthing_identity(self):
for lon in np.linspace(-180, 180, 100):
for lat in np.linspace(-85, 85, 100):
easting, northing = Tiles.lon_lat_to_easting_northing(lon, lat)
new_lon, new_lat = Tiles.easting_northing_to_lon_lat(easting, northing)
self.assertAlmostEqual(lon, new_lon, places=2)
self.assertAlmostEqual(lat, new_lat, places=2)
def test_check_eastingnorthing_to_lonlat_identity(self):
for easting in np.linspace(-20037508.34, 20037508.34, 100):
for northing in np.linspace(-20037508.34, 20037508.34, 100):
lon, lat = Tiles.easting_northing_to_lon_lat(easting, northing)
new_easting, new_northing = Tiles.lon_lat_to_easting_northing(lon, lat)
self.assertAlmostEqual(easting, new_easting, places=2)
self.assertAlmostEqual(northing, new_northing, places=2)
def check_array_type_preserved(self, constructor, array_type, check):
lons, lats = np.meshgrid(
np.linspace(-180, 180, 100), np.linspace(-85, 85, 100)
)
lons = lons.flatten()
lats = lats.flatten()
array_lons = constructor(lons)
array_lats = constructor(lats)
self.assertIsInstance(array_lons, array_type)
self.assertIsInstance(array_lats, array_type)
eastings, northings = Tiles.lon_lat_to_easting_northing(
array_lons, array_lats
)
self.assertIsInstance(eastings, array_type)
self.assertIsInstance(northings, array_type)
new_lons, new_lats = Tiles.easting_northing_to_lon_lat(
eastings, northings
)
self.assertIsInstance(new_lons, array_type)
self.assertIsInstance(new_lats, array_type)
check(array_lons, new_lons)
check(array_lats, new_lats)
def test_check_numpy_array(self):
self.check_array_type_preserved(
np.array, np.ndarray,
lambda a, b: np.testing.assert_array_almost_equal(a, b, decimal=2)
)
def test_pandas_series(self):
self.check_array_type_preserved(
pd.Series, pd.Series,
lambda a, b: pd.testing.assert_series_equal(
a, b, check_exact=False, check_less_precise=True,
)
)
|
{
"content_hash": "ba2cfc2aba721236844070b232e0a279",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 87,
"avg_line_length": 42.14912280701754,
"alnum_prop": 0.6522372528616025,
"repo_name": "ioam/holoviews",
"id": "ecdbfa0bc9232e7ca92c5ec0e570a425f87ecfa1",
"size": "4805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holoviews/tests/element/test_tiles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1546"
},
{
"name": "HTML",
"bytes": "18997"
},
{
"name": "JavaScript",
"bytes": "20747"
},
{
"name": "Jupyter Notebook",
"bytes": "1379"
},
{
"name": "Python",
"bytes": "3241652"
}
],
"symlink_target": ""
}
|
"""
This module is deprecated. Please use `airflow.providers.google.cloud.operators.local_to_gcs`.
"""
import warnings
from airflow.providers.google.cloud.operators.local_to_gcs import LocalFilesystemToGCSOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.local_to_gcs`,",
DeprecationWarning, stacklevel=2
)
class FileToGoogleCloudStorageOperator(LocalFilesystemToGCSOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.local_to_gcs.LocalFilesystemToGCSOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.google.cloud.operators.local_to_gcs.LocalFilesystemToGCSOperator`.""",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwargs)
|
{
"content_hash": "fa8adb32e71df18684fa88e9eafee3d2",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 101,
"avg_line_length": 32.857142857142854,
"alnum_prop": 0.7032608695652174,
"repo_name": "mtagle/airflow",
"id": "6bf4986390c3ed4a96e964e05fb55a9dd73cf232",
"size": "1707",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/file_to_gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10006634"
},
{
"name": "Shell",
"bytes": "217011"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
""" Test the quoted APOGEE uncertainties from individual (rebinned) spectra. """
__author__ = "Andy Casey <arc@ast.cam.ac.uk>"
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from glob import glob
from itertools import combinations
def get_differences(apStar_filename):
image = fits.open(apStar_filename)
N_visits = image[0].header["NVISITS"]
data_index = 1
error_index = 2
mask_index = 3
# Generate all permutations.
differences = []
for i, j in combinations(range(N_visits), 2):
di = image[data_index].data[i + 2, :]
dj = image[data_index].data[j + 2, :]
sigma = np.sqrt(image[error_index].data[i + 2, :]**2 \
+ image[error_index].data[j + 2, :]**2)
ok = (di > 0) * (dj > 0) * np.isfinite(di * dj * sigma) \
* (image[mask_index].data[i + 2, :] == 0) \
* (image[mask_index].data[j + 2, :] == 0)
differences.extend(((di - dj)/sigma)[ok])
differences = np.array(differences).flatten()
return differences
def plot_differences(differences):
fig, ax = plt.subplots(1)
y_bin, x_bin, _ = ax.hist(differences, bins=100, facecolor="#666666")
x = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 1000)
y = np.exp(-0.5*x**2)/np.sqrt(2*np.pi)
ax.plot(x, y*np.trapz(y_bin, x=x_bin[1:])/np.sqrt(2*np.pi), lw=2, c="r")
ax.set_title("mu = {0:.1f}, sigma(|d|) = {1:.1f}".format(
np.median(differences), np.std(np.abs(differences))))
ax.set_xlabel("(F1 - F2)/sqrt(sigma_1^2 + sigma_2^2)")
return fig
if __name__ == "__main__":
filenames = glob("APOGEE/*.fits")
all_differences = []
for filename in filenames:
differences = get_differences(filename)
if len(differences) > 0:
fig = plot_differences(differences)
fig.savefig("APOGEE/{0}.png".format(filename.split("/")[-1].split(".")[0]))
plt.close("all")
print(filename)
all_differences.extend(differences)
fig = plot_differences(np.array(all_differences))
fig.savefig("APOGEE/all.png")
|
{
"content_hash": "b80a0e97747a04009a6661e9a3331133",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 87,
"avg_line_length": 29.041095890410958,
"alnum_prop": 0.5872641509433962,
"repo_name": "andycasey/luminosity-cannon",
"id": "79083f742da3f3eee14c296fe653dcc7712b69ac",
"size": "2167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/check_apogee_spectra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "669"
},
{
"name": "Python",
"bytes": "101687"
},
{
"name": "TeX",
"bytes": "59380"
}
],
"symlink_target": ""
}
|
import datetime
from django.contrib.auth.models import User
from django.http import HttpRequest
from eav.models import Attribute
from education.models import Role, School, UserProfile, EmisReporter
from poll.models import Poll
from rapidsms.contrib.locations.models import LocationType, Location, Point
from rapidsms.models import Backend
from rapidsms_httprouter.router import get_router
from script.models import ScriptSession
def create_user_with_group(username,group=None, location=None):
user = User.objects.create(username=username)
user.set_password('password')
if group is not None:
user.groups.add(group)
if location is not None:
UserProfile.objects.create(name=username,location=location,role=group,user=user)
user.save()
return user
def create_group(group_name):
return Role.objects.create(name=group_name)
def create_location(location_name,location_type, point=None, **kwargs):
if point is not None:
kwargs['point'] = Point.objects.create(**point)
fields={
"name":location_name,
"type":location_type
}
fields.update(kwargs)
return Location.objects.create(**fields)
def create_location_type(location_type):
return LocationType.objects.create(name=location_type, slug=location_type)
def create_school(name,location):
return School.objects.create(name=name,location=location)
def create_emis_reporters(name, reporting_location, school, identity, group):
reporter = EmisReporter.objects.create(name=name, reporting_location=reporting_location)
if school is not None:
reporter.schools.add(school)
reporter.groups.add(group)
backend, created = Backend.objects.get_or_create(name='fake_backend')
reporter.connection_set.create(identity=identity, backend=backend)
reporter.save()
return reporter
def create_poll_with_reporters(name,question,type,user,contacts):
params = {
"default_response": "",
"name": name,
"question": question,
"user": user,
"type": type,
"response_type": Poll.RESPONSE_TYPE_ALL
}
poll = Poll.objects.create(**params)
poll.contacts.add(*contacts)
poll.save()
return poll
def create_view(class_name,user, poll, group,**kwargs):
view = class_name(poll_name=poll.name, restrict_group=group.name)
request = HttpRequest()
request.user = user
view.request = request
view.kwargs = kwargs
return view
def create_attribute():
params = {
"description": "A response value for a Poll with expected numeric responses",
"datatype": "float",
"enum_group": None,
"required": False,
"type": None,
"slug": "poll_number_value",
"name": "Number"
}
Attribute.objects.exists() or Attribute.objects.create(**params) # Only create it once.
return Attribute.objects.get(slug="poll_number_value")
def fake_incoming(message, reporter):
router = get_router()
connection = reporter.default_connection
return router.handle_incoming(connection.backend.name, connection.identity, message)
|
{
"content_hash": "3201186e7ab214dd7994d749442831b4",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 92,
"avg_line_length": 34.6,
"alnum_prop": 0.7026332691072575,
"repo_name": "unicefuganda/edtrac",
"id": "2f32b6bc1e9bc9560327f6f04fb1771ae7f54da3",
"size": "3158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edtrac_project/rapidsms_edtrac/education/test/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "277434"
},
{
"name": "JavaScript",
"bytes": "190346"
},
{
"name": "Python",
"bytes": "2621572"
},
{
"name": "Shell",
"bytes": "4755"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models
from .._serialization import Deserializer, Serializer
from ._configuration import NetworkManagementClientConfiguration
from .operations import (
ApplicationGatewaysOperations,
BgpServiceCommunitiesOperations,
ExpressRouteCircuitAuthorizationsOperations,
ExpressRouteCircuitPeeringsOperations,
ExpressRouteCircuitsOperations,
ExpressRouteServiceProvidersOperations,
LoadBalancersOperations,
LocalNetworkGatewaysOperations,
NetworkInterfacesOperations,
NetworkManagementClientOperationsMixin,
NetworkSecurityGroupsOperations,
NetworkWatchersOperations,
PacketCapturesOperations,
PublicIPAddressesOperations,
RouteFilterRulesOperations,
RouteFiltersOperations,
RouteTablesOperations,
RoutesOperations,
SecurityRulesOperations,
SubnetsOperations,
UsagesOperations,
VirtualNetworkGatewayConnectionsOperations,
VirtualNetworkGatewaysOperations,
VirtualNetworkPeeringsOperations,
VirtualNetworksOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class NetworkManagementClient(
NetworkManagementClientOperationsMixin
): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""Network Client.
:ivar network_interfaces: NetworkInterfacesOperations operations
:vartype network_interfaces:
azure.mgmt.network.v2016_12_01.operations.NetworkInterfacesOperations
:ivar application_gateways: ApplicationGatewaysOperations operations
:vartype application_gateways:
azure.mgmt.network.v2016_12_01.operations.ApplicationGatewaysOperations
:ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizationsOperations
operations
:vartype express_route_circuit_authorizations:
azure.mgmt.network.v2016_12_01.operations.ExpressRouteCircuitAuthorizationsOperations
:ivar express_route_circuit_peerings: ExpressRouteCircuitPeeringsOperations operations
:vartype express_route_circuit_peerings:
azure.mgmt.network.v2016_12_01.operations.ExpressRouteCircuitPeeringsOperations
:ivar express_route_circuits: ExpressRouteCircuitsOperations operations
:vartype express_route_circuits:
azure.mgmt.network.v2016_12_01.operations.ExpressRouteCircuitsOperations
:ivar express_route_service_providers: ExpressRouteServiceProvidersOperations operations
:vartype express_route_service_providers:
azure.mgmt.network.v2016_12_01.operations.ExpressRouteServiceProvidersOperations
:ivar load_balancers: LoadBalancersOperations operations
:vartype load_balancers: azure.mgmt.network.v2016_12_01.operations.LoadBalancersOperations
:ivar network_security_groups: NetworkSecurityGroupsOperations operations
:vartype network_security_groups:
azure.mgmt.network.v2016_12_01.operations.NetworkSecurityGroupsOperations
:ivar security_rules: SecurityRulesOperations operations
:vartype security_rules: azure.mgmt.network.v2016_12_01.operations.SecurityRulesOperations
:ivar network_watchers: NetworkWatchersOperations operations
:vartype network_watchers: azure.mgmt.network.v2016_12_01.operations.NetworkWatchersOperations
:ivar packet_captures: PacketCapturesOperations operations
:vartype packet_captures: azure.mgmt.network.v2016_12_01.operations.PacketCapturesOperations
:ivar public_ip_addresses: PublicIPAddressesOperations operations
:vartype public_ip_addresses:
azure.mgmt.network.v2016_12_01.operations.PublicIPAddressesOperations
:ivar route_filters: RouteFiltersOperations operations
:vartype route_filters: azure.mgmt.network.v2016_12_01.operations.RouteFiltersOperations
:ivar route_filter_rules: RouteFilterRulesOperations operations
:vartype route_filter_rules:
azure.mgmt.network.v2016_12_01.operations.RouteFilterRulesOperations
:ivar route_tables: RouteTablesOperations operations
:vartype route_tables: azure.mgmt.network.v2016_12_01.operations.RouteTablesOperations
:ivar routes: RoutesOperations operations
:vartype routes: azure.mgmt.network.v2016_12_01.operations.RoutesOperations
:ivar bgp_service_communities: BgpServiceCommunitiesOperations operations
:vartype bgp_service_communities:
azure.mgmt.network.v2016_12_01.operations.BgpServiceCommunitiesOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.network.v2016_12_01.operations.UsagesOperations
:ivar virtual_networks: VirtualNetworksOperations operations
:vartype virtual_networks: azure.mgmt.network.v2016_12_01.operations.VirtualNetworksOperations
:ivar subnets: SubnetsOperations operations
:vartype subnets: azure.mgmt.network.v2016_12_01.operations.SubnetsOperations
:ivar virtual_network_peerings: VirtualNetworkPeeringsOperations operations
:vartype virtual_network_peerings:
azure.mgmt.network.v2016_12_01.operations.VirtualNetworkPeeringsOperations
:ivar virtual_network_gateways: VirtualNetworkGatewaysOperations operations
:vartype virtual_network_gateways:
azure.mgmt.network.v2016_12_01.operations.VirtualNetworkGatewaysOperations
:ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnectionsOperations
operations
:vartype virtual_network_gateway_connections:
azure.mgmt.network.v2016_12_01.operations.VirtualNetworkGatewayConnectionsOperations
:ivar local_network_gateways: LocalNetworkGatewaysOperations operations
:vartype local_network_gateways:
azure.mgmt.network.v2016_12_01.operations.LocalNetworkGatewaysOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft
Azure subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2016-12-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = NetworkManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.network_interfaces = NetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.application_gateways = ApplicationGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.express_route_circuits = ExpressRouteCircuitsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.express_route_service_providers = ExpressRouteServiceProvidersOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.load_balancers = LoadBalancersOperations(self._client, self._config, self._serialize, self._deserialize)
self.network_security_groups = NetworkSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.security_rules = SecurityRulesOperations(self._client, self._config, self._serialize, self._deserialize)
self.network_watchers = NetworkWatchersOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.packet_captures = PacketCapturesOperations(self._client, self._config, self._serialize, self._deserialize)
self.public_ip_addresses = PublicIPAddressesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.route_filters = RouteFiltersOperations(self._client, self._config, self._serialize, self._deserialize)
self.route_filter_rules = RouteFilterRulesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.route_tables = RouteTablesOperations(self._client, self._config, self._serialize, self._deserialize)
self.routes = RoutesOperations(self._client, self._config, self._serialize, self._deserialize)
self.bgp_service_communities = BgpServiceCommunitiesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.usages = UsagesOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.subnets = SubnetsOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_peerings = VirtualNetworkPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.virtual_network_gateways = VirtualNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.local_network_gateways = LocalNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> NetworkManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
{
"content_hash": "ca2038d7b14fcb37c7adbf09d8485b05",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 119,
"avg_line_length": 52.30172413793103,
"alnum_prop": 0.7422943794297017,
"repo_name": "Azure/azure-sdk-for-python",
"id": "7b6dc19eae83bc7e951aabdd60845b2805c43b8f",
"size": "12602",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_12_01/_network_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
import inferi
class Tests(TestCase):
def test_combinatorics(self):
# How many ways of arranging six objects are there?
self.assertEqual(inferi.permutations(6), 720)
# How many ways of arranging three of five objects are there?
self.assertEqual(inferi.permutations(6, 3), 120)
# How many combinations of six objects are there?
self.assertEqual(inferi.combinations(6), 1)
self.assertEqual(inferi.combinations(6, 5), 6)
self.assertEqual(inferi.combinations(6, 4), 15)
self.assertEqual(inferi.combinations(6, 3), 20)
self.assertEqual(inferi.combinations(6, 2), 15)
# How many outcomes are there of a three-stage experiment?
self.assertEqual(inferi.multiplications(6), 6)
self.assertEqual(inferi.multiplications(6, 3), 18)
self.assertEqual(inferi.multiplications(6, 6, 3), 108)
# These permutations and combinations etc. can actually be produced
options = ["A", "B", "C", "D", "E"]
self.assertEqual(set(inferi.permutate(options, 2)), set((
("A", "B"), ("B", "A"), ("A", "C"), ("C", "A"), ("A", "D"),
("D", "A"), ("A", "E"), ("E", "A"), ("B", "C"), ("C", "B"),
("B", "D"), ("D", "B"), ("B", "E"), ("E", "B"), ("C", "D"),
("D", "C"), ("C", "E"), ("E", "C"), ("D", "E"), ("E", "D")
)))
combinations = tuple(inferi.combine(options, 2))
self.assertEqual(len(combinations), 10)
for set_ in (
set(["A", "B"]), set(["A", "C"]), set(["A", "D"]), set(["A", "E"]),
set(["B", "C"]), set(["B", "D"]), set(["B", "E"]), set(["C", "D"]),
set(["C", "E"]), set(["D", "E"])
):
self.assertIn(set_, combinations)
self.assertEqual(tuple(inferi.multiply(options, options)), (
("A", "A"), ("A", "B"), ("A", "C"), ("A", "D"), ("A", "E"),
("B", "A"), ("B", "B"), ("B", "C"), ("B", "D"), ("B", "E"),
("C", "A"), ("C", "B"), ("C", "C"), ("C", "D"), ("C", "E"),
("D", "A"), ("D", "B"), ("D", "C"), ("D", "D"), ("D", "E"),
("E", "A"), ("E", "B"), ("E", "C"), ("E", "D"), ("E", "E")
))
def test_events(self):
# Rolling a die
sample_space = inferi.SampleSpace(1, 2, 3, 4, 5, 6)
self.assertEqual(len(sample_space.simple_events), 6)
self.assertEqual(sample_space.outcomes(), set(range(1, 7)))
self.assertIn(4, sample_space)
self.assertNotIn(4.5, sample_space)
for event in sample_space.simple_events:
self.assertEqual(event.probability(), 1 / 6)
for event1 in sample_space.simple_events:
for event2 in sample_space.simple_events:
if event1 is event2:
self.assertFalse(event1.mutually_exclusive_with(event2))
self.assertFalse(event2.mutually_exclusive_with(event1))
else:
self.assertTrue(event1.mutually_exclusive_with(event2))
self.assertTrue(event2.mutually_exclusive_with(event1))
self.assertEqual(sample_space.chances_of(0), 0)
self.assertEqual(sample_space.chances_of(1), 1 / 6)
self.assertEqual(sample_space.chances_of(6), 1 / 6)
self.assertEqual(sample_space.chances_of(7), 0)
self.assertIn(sample_space.event(2), sample_space.simple_events)
self.assertEqual(sample_space.event(5).outcome, 5)
for i in range(1000):
self.assertIn(sample_space.experiment(), range(1, 7))
event1 = sample_space.event(2, 5, name="2 or 5")
self.assertEqual(event1.name, "2 or 5")
self.assertEqual(event1.probability(), 1 / 3)
event2 = sample_space.event(lambda o: o % 2 == 0, name="even")
self.assertEqual(event2.probability(), 1 / 2)
self.assertEqual(len(event2.simple_events), 3)
self.assertTrue(sample_space.event(1).mutually_exclusive_with(event2))
self.assertFalse(sample_space.event(2).mutually_exclusive_with(event2))
self.assertTrue(event2.mutually_exclusive_with(sample_space.event(1)))
self.assertFalse(event2.mutually_exclusive_with(sample_space.event(2)))
self.assertFalse(event1.mutually_exclusive_with(event2))
combined = event1 | event2
self.assertEqual(combined.outcomes(), {2, 4, 5, 6})
combined = event1 & event2
self.assertEqual(combined.outcomes(), {2})
odd = event2.complement
self.assertEqual(odd.probability(), 0.5)
self.assertEqual(odd.outcomes(), {1, 3, 5})
self.assertEqual(odd.probability(given=event2), 0)
self.assertEqual(
event2.probability(given=sample_space.event(1, 2, 3)), 1 / 3
)
self.assertTrue(event2.dependent_on(sample_space.event(1, 2, 3)))
self.assertFalse(event2.independent_of(sample_space.event(1, 2, 3)))
# Unfair die
sample_space = inferi.SampleSpace(1, 2, 3, 4, 5, 6, p={4: 0.3})
self.assertEqual(len(sample_space.simple_events), 6)
self.assertEqual(sample_space.chances_of(6), 0.14)
self.assertEqual(sample_space.chances_of(5), 0.14)
self.assertEqual(sample_space.chances_of(4), 0.3)
outcomes = [sample_space.experiment() for _ in range(1000)]
self.assertGreaterEqual(outcomes.count(4), 200)
event = sample_space.event(lambda o: o % 2 == 0, name="even")
self.assertEqual(event.probability(), 2.9 / 5)
self.assertEqual(len(event.simple_events), 3)
# Rolling two die
dice = [1, 2, 3, 4, 5, 6]
sample_space = inferi.SampleSpace(*inferi.multiply(dice, dice))
self.assertEqual(len(sample_space.simple_events), 36)
self.assertEqual(sample_space.chances_of((6, 6)), 1 / 36)
event = sample_space.event(lambda o: sum(o) == 10, name="ten")
self.assertEqual(event.probability(), 3 / 36)
# Picking cards
cards = inferi.multiply(["H", "D", "S", "C"], range(13))
sample_space = inferi.SampleSpace(*cards)
self.assertEqual(sample_space.chances_of(("S", 0)), 1 / 52)
self.assertAlmostEqual(
sample_space.event(lambda o: o[0] == "H").probability(), 1 / 4, delta=0.0000001
)
ace = sample_space.event(lambda o: o[1] == 0)
spade = sample_space.event(lambda o: o[0] == "S")
self.assertEqual(spade.probability(), 0.25)
self.assertEqual(spade.probability(given=ace), 0.25)
self.assertFalse(spade.dependent_on(ace))
self.assertTrue(spade.independent_of(ace))
# colour blind
space = inferi.SampleSpace(p={
("M", True): 0.04, ("M", False): 0.47,
("F", True): 0.002, ("F", False): 0.488
})
male = space.event(lambda o: o[0] == "M")
female = male.complement
colour_blind = space.event(lambda o: o[1])
self.assertEqual(male.probability(), 0.51)
self.assertEqual(female.probability(), 0.49)
self.assertEqual(colour_blind.probability(), 0.042)
self.assertEqual(colour_blind.probability(given=male), 0.04 / 0.51)
self.assertTrue(colour_blind.dependent_on(male))
|
{
"content_hash": "0439c7cf7cb26cf02e604738cf986f38",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 88,
"avg_line_length": 49.321917808219176,
"alnum_prop": 0.571726149145952,
"repo_name": "samirelanduk/inferi",
"id": "ba216f6ad2d3c453bb7f3acfa1a1df7528c584d0",
"size": "7201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_probability.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80716"
}
],
"symlink_target": ""
}
|
'''
TODO Add a proper introduction of the package.
'''
from pkg_resources import resource_filename # @UnresolvedImport # pylint: disable=E0611
# S-W direction parameters
UPPER_LEFT_DIRECTION = 1
UPPER_DIRECTION = 2
LEFT_DIRECTION = 3
NO_DIRECTION = 2**5
STOP_DIRECTION = 2**6
IN_ALIGNMENT = 2**5 + 2**6
def read_file(filename):
''' Reads a file and returns its contents as a single string '''
with open(filename) as contents:
return ''.join(contents.readlines())
|
{
"content_hash": "3a8fb3a043e9775c5cca166347177c3b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 89,
"avg_line_length": 26.88888888888889,
"alnum_prop": 0.7024793388429752,
"repo_name": "swarris/pyPaSWAS",
"id": "a9b75fc15b43d15e528ee1c9f934037f68f6f84e",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyPaSWAS/Core/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "55486"
},
{
"name": "Cuda",
"bytes": "39887"
},
{
"name": "Dockerfile",
"bytes": "4587"
},
{
"name": "Makefile",
"bytes": "1520"
},
{
"name": "Python",
"bytes": "235631"
},
{
"name": "Shell",
"bytes": "14043"
}
],
"symlink_target": ""
}
|
"""
Utility for caching master images.
"""
import os
import tempfile
import time
import uuid
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import fileutils
import six
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _LI, _LW
from ironic.common import image_service
from ironic.common import images
from ironic.common import utils
from ironic.conf import CONF
LOG = logging.getLogger(__name__)
# This would contain a sorted list of instances of ImageCache to be
# considered for cleanup. This list will be kept sorted in non-increasing
# order of priority.
_cache_cleanup_list = []
class ImageCache(object):
"""Class handling access to cache for master images."""
def __init__(self, master_dir, cache_size, cache_ttl):
"""Constructor.
:param master_dir: cache directory to work on
Value of None disables image caching.
:param cache_size: desired maximum cache size in bytes
:param cache_ttl: cache entity TTL in seconds
"""
self.master_dir = master_dir
self._cache_size = cache_size
self._cache_ttl = cache_ttl
if master_dir is not None:
fileutils.ensure_tree(master_dir)
def fetch_image(self, href, dest_path, ctx=None, force_raw=True):
"""Fetch image by given href to the destination path.
Does nothing if destination path exists and is up to date with cache
and href contents.
Only creates a hard link (dest_path) to cached image if requested
image is already in cache and up to date with href contents.
Otherwise downloads an image, stores it in cache and creates a hard
link (dest_path) to it.
:param href: image UUID or href to fetch
:param dest_path: destination file path
:param ctx: context
:param force_raw: boolean value, whether to convert the image to raw
format
"""
img_download_lock_name = 'download-image'
if self.master_dir is None:
# NOTE(ghe): We don't share images between instances/hosts
if not CONF.parallel_image_downloads:
with lockutils.lock(img_download_lock_name):
_fetch(ctx, href, dest_path, force_raw)
else:
_fetch(ctx, href, dest_path, force_raw)
return
# TODO(ghe): have hard links and counts the same behaviour in all fs
# NOTE(vdrok): File name is converted to UUID if it's not UUID already,
# so that two images with same file names do not collide
if service_utils.is_glance_image(href):
master_file_name = service_utils.parse_image_ref(href)[0]
else:
# NOTE(vdrok): Doing conversion of href in case it's unicode
# string, UUID cannot be generated for unicode strings on python 2.
href_encoded = href.encode('utf-8') if six.PY2 else href
master_file_name = str(uuid.uuid5(uuid.NAMESPACE_URL,
href_encoded))
master_path = os.path.join(self.master_dir, master_file_name)
if CONF.parallel_image_downloads:
img_download_lock_name = 'download-image:%s' % master_file_name
# TODO(dtantsur): lock expiration time
with lockutils.lock(img_download_lock_name):
# NOTE(vdrok): After rebuild requested image can change, so we
# should ensure that dest_path and master_path (if exists) are
# pointing to the same file and their content is up to date
cache_up_to_date = _delete_master_path_if_stale(master_path, href,
ctx)
dest_up_to_date = _delete_dest_path_if_stale(master_path,
dest_path)
if cache_up_to_date and dest_up_to_date:
LOG.debug("Destination %(dest)s already exists "
"for image %(href)s",
{'href': href, 'dest': dest_path})
return
if cache_up_to_date:
# NOTE(dtantsur): ensure we're not in the middle of clean up
with lockutils.lock('master_image'):
os.link(master_path, dest_path)
LOG.debug("Master cache hit for image %(href)s",
{'href': href})
return
LOG.info(_LI("Master cache miss for image %(href)s, "
"starting download"),
{'href': href})
self._download_image(
href, master_path, dest_path, ctx=ctx, force_raw=force_raw)
# NOTE(dtantsur): we increased cache size - time to clean up
self.clean_up()
def _download_image(self, href, master_path, dest_path, ctx=None,
force_raw=True):
"""Download image by href and store at a given path.
This method should be called with uuid-specific lock taken.
:param href: image UUID or href to fetch
:param master_path: destination master path
:param dest_path: destination file path
:param ctx: context
:param force_raw: boolean value, whether to convert the image to raw
format
"""
# TODO(ghe): timeout and retry for downloads
# TODO(ghe): logging when image cannot be created
tmp_dir = tempfile.mkdtemp(dir=self.master_dir)
tmp_path = os.path.join(tmp_dir, href.split('/')[-1])
try:
_fetch(ctx, href, tmp_path, force_raw)
# NOTE(dtantsur): no need for global lock here - master_path
# will have link count >1 at any moment, so won't be cleaned up
os.link(tmp_path, master_path)
os.link(master_path, dest_path)
finally:
utils.rmtree_without_raise(tmp_dir)
@lockutils.synchronized('master_image')
def clean_up(self, amount=None):
"""Clean up directory with images, keeping cache of the latest images.
Files with link count >1 are never deleted.
Protected by global lock, so that no one messes with master images
after we get listing and before we actually delete files.
:param amount: if present, amount of space to reclaim in bytes,
cleaning will stop, if this goal was reached,
even if it is possible to clean up more files
"""
if self.master_dir is None:
return
LOG.debug("Starting clean up for master image cache %(dir)s",
{'dir': self.master_dir})
amount_copy = amount
listing = _find_candidates_for_deletion(self.master_dir)
survived, amount = self._clean_up_too_old(listing, amount)
if amount is not None and amount <= 0:
return
amount = self._clean_up_ensure_cache_size(survived, amount)
if amount is not None and amount > 0:
LOG.warning(
_LW("Cache clean up was unable to reclaim %(required)d "
"MiB of disk space, still %(left)d MiB required"),
{'required': amount_copy / 1024 / 1024,
'left': amount / 1024 / 1024})
def _clean_up_too_old(self, listing, amount):
"""Clean up stage 1: drop images that are older than TTL.
This method removes files all files older than TTL seconds
unless 'amount' is non-None. If 'amount' is non-None,
it starts removing files older than TTL seconds,
oldest first, until the required 'amount' of space is reclaimed.
:param listing: list of tuples (file name, last used time)
:param amount: if not None, amount of space to reclaim in bytes,
cleaning will stop, if this goal was reached,
even if it is possible to clean up more files
:returns: tuple (list of files left after clean up,
amount still to reclaim)
"""
threshold = time.time() - self._cache_ttl
survived = []
for file_name, last_used, stat in listing:
if last_used < threshold:
try:
os.unlink(file_name)
except EnvironmentError as exc:
LOG.warning(_LW("Unable to delete file %(name)s from "
"master image cache: %(exc)s"),
{'name': file_name, 'exc': exc})
else:
if amount is not None:
amount -= stat.st_size
if amount <= 0:
amount = 0
break
else:
survived.append((file_name, last_used, stat))
return survived, amount
def _clean_up_ensure_cache_size(self, listing, amount):
"""Clean up stage 2: try to ensure cache size < threshold.
Try to delete the oldest files until conditions is satisfied
or no more files are eligible for deletion.
:param listing: list of tuples (file name, last used time)
:param amount: amount of space to reclaim, if possible.
if amount is not None, it has higher priority than
cache size in settings
:returns: amount of space still required after clean up
"""
# NOTE(dtantsur): Sort listing to delete the oldest files first
listing = sorted(listing,
key=lambda entry: entry[1],
reverse=True)
total_listing = (os.path.join(self.master_dir, f)
for f in os.listdir(self.master_dir))
total_size = sum(os.path.getsize(f)
for f in total_listing)
while listing and (total_size > self._cache_size or
(amount is not None and amount > 0)):
file_name, last_used, stat = listing.pop()
try:
os.unlink(file_name)
except EnvironmentError as exc:
LOG.warning(_LW("Unable to delete file %(name)s from "
"master image cache: %(exc)s"),
{'name': file_name, 'exc': exc})
else:
total_size -= stat.st_size
if amount is not None:
amount -= stat.st_size
if total_size > self._cache_size:
LOG.info(_LI("After cleaning up cache dir %(dir)s "
"cache size %(actual)d is still larger than "
"threshold %(expected)d"),
{'dir': self.master_dir, 'actual': total_size,
'expected': self._cache_size})
return max(amount, 0) if amount is not None else 0
def _find_candidates_for_deletion(master_dir):
"""Find files eligible for deletion i.e. with link count ==1.
:param master_dir: directory to operate on
:returns: iterator yielding tuples (file name, last used time, stat)
"""
for filename in os.listdir(master_dir):
filename = os.path.join(master_dir, filename)
stat = os.stat(filename)
if not os.path.isfile(filename) or stat.st_nlink > 1:
continue
# NOTE(dtantsur): Detect most recently accessed files,
# seeing atime can be disabled by the mount option
# Also include ctime as it changes when image is linked to
last_used_time = max(stat.st_mtime, stat.st_atime, stat.st_ctime)
yield filename, last_used_time, stat
def _free_disk_space_for(path):
"""Get free disk space on a drive where path is located."""
stat = os.statvfs(path)
return stat.f_frsize * stat.f_bavail
def _fetch(context, image_href, path, force_raw=False):
"""Fetch image and convert to raw format if needed."""
path_tmp = "%s.part" % path
images.fetch(context, image_href, path_tmp, force_raw=False)
# Notes(yjiang5): If glance can provide the virtual size information,
# then we can firstly clean cache and then invoke images.fetch().
if force_raw:
required_space = images.converted_size(path_tmp)
directory = os.path.dirname(path_tmp)
_clean_up_caches(directory, required_space)
images.image_to_raw(image_href, path, path_tmp)
else:
os.rename(path_tmp, path)
def _clean_up_caches(directory, amount):
"""Explicitly cleanup caches based on their priority (if required).
:param directory: the directory (of the cache) to be freed up.
:param amount: amount of space to reclaim.
:raises: InsufficientDiskSpace exception, if we cannot free up enough space
after trying all the caches.
"""
free = _free_disk_space_for(directory)
if amount < free:
return
# NOTE(dtantsur): filter caches, whose directory is on the same device
st_dev = os.stat(directory).st_dev
caches_to_clean = [x[1]() for x in _cache_cleanup_list]
caches = (c for c in caches_to_clean
if os.stat(c.master_dir).st_dev == st_dev)
for cache_to_clean in caches:
cache_to_clean.clean_up(amount=(amount - free))
free = _free_disk_space_for(directory)
if amount < free:
break
else:
raise exception.InsufficientDiskSpace(path=directory,
required=amount / 1024 / 1024,
actual=free / 1024 / 1024,
)
def clean_up_caches(ctx, directory, images_info):
"""Explicitly cleanup caches based on their priority (if required).
This cleans up the caches to free up the amount of space required for the
images in images_info. The caches are cleaned up one after the other in
the order of their priority. If we still cannot free up enough space
after trying all the caches, this method throws exception.
:param ctx: context
:param directory: the directory (of the cache) to be freed up.
:param images_info: a list of tuples of the form (image_uuid,path)
for which space is to be created in cache.
:raises: InsufficientDiskSpace exception, if we cannot free up enough space
after trying all the caches.
"""
total_size = sum(images.download_size(ctx, uuid)
for (uuid, path) in images_info)
_clean_up_caches(directory, total_size)
def cleanup(priority):
"""Decorator method for adding cleanup priority to a class."""
def _add_property_to_class_func(cls):
_cache_cleanup_list.append((priority, cls))
_cache_cleanup_list.sort(reverse=True, key=lambda tuple_: tuple_[0])
return cls
return _add_property_to_class_func
def _delete_master_path_if_stale(master_path, href, ctx):
"""Delete image from cache if it is not up to date with href contents.
:param master_path: path to an image in master cache
:param href: image href
:param ctx: context to use
:returns: True if master_path is up to date with href contents,
False if master_path was stale and was deleted or it didn't exist
"""
if service_utils.is_glance_image(href):
# Glance image contents cannot be updated without changing image's UUID
return os.path.exists(master_path)
if os.path.exists(master_path):
img_service = image_service.get_image_service(href, context=ctx)
img_mtime = img_service.show(href).get('updated_at')
if not img_mtime:
# This means that href is not a glance image and doesn't have an
# updated_at attribute
LOG.warning(_LW("Image service couldn't determine last "
"modification time of %(href)s, considering "
"cached image up to date."), {'href': href})
return True
master_mtime = utils.unix_file_modification_datetime(master_path)
if img_mtime <= master_mtime:
return True
# Delete image from cache as it is outdated
LOG.info(_LI('Image %(href)s was last modified at %(remote_time)s. '
'Deleting the cached copy "%(cached_file)s since it was '
'last modified at %(local_time)s and may be outdated.'),
{'href': href, 'remote_time': img_mtime,
'local_time': master_mtime, 'cached_file': master_path})
os.unlink(master_path)
return False
def _delete_dest_path_if_stale(master_path, dest_path):
"""Delete dest_path if it does not point to cached image.
:param master_path: path to an image in master cache
:param dest_path: hard link to an image
:returns: True if dest_path points to master_path, False if dest_path was
stale and was deleted or it didn't exist
"""
dest_path_exists = os.path.exists(dest_path)
if not dest_path_exists:
# Image not cached, re-download
return False
master_path_exists = os.path.exists(master_path)
if (not master_path_exists or
os.stat(master_path).st_ino != os.stat(dest_path).st_ino):
# Image exists in cache, but dest_path out of date
os.unlink(dest_path)
return False
return True
|
{
"content_hash": "95e12f2ae5d8eca38abeb4f6117bd7e6",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 79,
"avg_line_length": 42.12771084337349,
"alnum_prop": 0.5914888749070526,
"repo_name": "ruyang/ironic",
"id": "11702a5b7325199bef12b049b8cd9cb315f37c9e",
"size": "18148",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ironic/drivers/modules/image_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5133461"
},
{
"name": "Shell",
"bytes": "107097"
}
],
"symlink_target": ""
}
|
"""
QUESTION:
Given a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the
original list.
For example,
Given 1->2->3->3->4->4->5, return 1->2->5.
Given 1->1->1->2->3, return 2->3.
ANSWER:
Iterate list and compare
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def deleteDuplicates(self, head):
dump = ListNode(0)
dump.next = head
p = dump
while p.next:
lst = p.next
cnt = 1
while lst.next and p.next.val == lst.next.val:
lst = lst.next
cnt += 1
if cnt < 2:
p = lst
else:
p.next = lst.next
return dump.next
if __name__ == '__main__':
print
|
{
"content_hash": "2f2beb00f2f30c407d2bacb3c254a01c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 112,
"avg_line_length": 23.82051282051282,
"alnum_prop": 0.527448869752422,
"repo_name": "tktrungna/leetcode",
"id": "73129d20d84e8c3dc4c23d9eef9ee17f8bb63e0b",
"size": "929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/remove-duplicates-from-sorted-list-ii.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "410956"
}
],
"symlink_target": ""
}
|
"""Switching v2 features on and off."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import random_ops
from tensorflow.python.data.experimental.ops import readers as exp_readers
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.ops import variable_scope
from tensorflow.python.util.tf_export import tf_export
# Metrics to track the status of v2_behavior
_v2_behavior_usage_gauge = monitoring.BoolGauge(
"/tensorflow/version/v2_behavior",
"whether v2_behavior is enabled or disabled", "status")
@tf_export(v1=["enable_v2_behavior"])
def enable_v2_behavior():
"""Enables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 2.x.
This function is called in the main TensorFlow `__init__.py` file, user should
not need to call it, except during complex migrations.
"""
_v2_behavior_usage_gauge.get_cell("enable").set(True)
# TF2 behavior is enabled if either 1) enable_v2_behavior() is called or
# 2) the TF2_BEHAVIOR=1 environment variable is set. In the latter case,
# the modules below independently check if tf2.enabled().
tf2.enable()
ops.enable_eager_execution()
tensor_shape.enable_v2_tensorshape() # Also switched by tf2
variable_scope.enable_resource_variables()
ops.enable_tensor_equality()
# Enables TensorArrayV2 and control flow V2.
control_flow_v2_toggles.enable_control_flow_v2()
# Make sure internal uses of tf.data symbols map to V2 versions.
dataset_ops.Dataset = dataset_ops.DatasetV2
readers.FixedLengthRecordDataset = readers.FixedLengthRecordDatasetV2
readers.TFRecordDataset = readers.TFRecordDatasetV2
readers.TextLineDataset = readers.TextLineDatasetV2
counter.Counter = counter.CounterV2
interleave_ops.choose_from_datasets = interleave_ops.choose_from_datasets_v2
interleave_ops.sample_from_datasets = interleave_ops.sample_from_datasets_v2
random_ops.RandomDataset = random_ops.RandomDatasetV2
exp_readers.CsvDataset = exp_readers.CsvDatasetV2
exp_readers.SqlDataset = exp_readers.SqlDatasetV2
exp_readers.make_batched_features_dataset = (
exp_readers.make_batched_features_dataset_v2)
exp_readers.make_csv_dataset = exp_readers.make_csv_dataset_v2
@tf_export(v1=["disable_v2_behavior"])
def disable_v2_behavior():
"""Disables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 1.x.
User can call this function to disable 2.x behavior during complex migrations.
"""
_v2_behavior_usage_gauge.get_cell("disable").set(True)
tf2.disable()
ops.disable_eager_execution()
tensor_shape.disable_v2_tensorshape() # Also switched by tf2
variable_scope.disable_resource_variables()
ops.disable_tensor_equality()
# Disables TensorArrayV2 and control flow V2.
control_flow_v2_toggles.disable_control_flow_v2()
# Make sure internal uses of tf.data symbols map to V1 versions.
dataset_ops.Dataset = dataset_ops.DatasetV1
readers.FixedLengthRecordDataset = readers.FixedLengthRecordDatasetV1
readers.TFRecordDataset = readers.TFRecordDatasetV1
readers.TextLineDataset = readers.TextLineDatasetV1
counter.Counter = counter.CounterV1
interleave_ops.choose_from_datasets = interleave_ops.choose_from_datasets_v1
interleave_ops.sample_from_datasets = interleave_ops.sample_from_datasets_v1
random_ops.RandomDataset = random_ops.RandomDatasetV1
exp_readers.CsvDataset = exp_readers.CsvDatasetV1
exp_readers.SqlDataset = exp_readers.SqlDatasetV1
exp_readers.make_batched_features_dataset = (
exp_readers.make_batched_features_dataset_v1)
exp_readers.make_csv_dataset = exp_readers.make_csv_dataset_v1
|
{
"content_hash": "9911233ba87001ccabae615508cafbd5",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 80,
"avg_line_length": 46.505050505050505,
"alnum_prop": 0.7821459600347523,
"repo_name": "aam-at/tensorflow",
"id": "eac841fb2fee928b0503ba79afaaef9848938dd4",
"size": "5293",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/python/compat/v2_compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "16049"
},
{
"name": "C",
"bytes": "784149"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "69481042"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73667"
},
{
"name": "Go",
"bytes": "1670128"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "844222"
},
{
"name": "Jupyter Notebook",
"bytes": "1665601"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101287"
},
{
"name": "Objective-C",
"bytes": "104023"
},
{
"name": "Objective-C++",
"bytes": "182460"
},
{
"name": "PHP",
"bytes": "17733"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "49451363"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4697"
},
{
"name": "Shell",
"bytes": "495434"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
}
|
import dbus
bus = dbus.SystemBus()
dummy = dbus.Interface(bus.get_object('org.bluez', '/'), 'org.freedesktop.DBus.Introspectable')
#print dummy.Introspect()
manager = dbus.Interface(bus.get_object('org.bluez', '/'), 'org.bluez.Manager')
try:
adapter = dbus.Interface(bus.get_object('org.bluez', manager.DefaultAdapter()), 'org.bluez.Adapter')
except:
pass
|
{
"content_hash": "b1a0a969c4cba8c935c012f60804002f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 101,
"avg_line_length": 22.8125,
"alnum_prop": 0.7123287671232876,
"repo_name": "sergecodd/FireFox-OS",
"id": "5af61535ba4d37477213760a4862545136c00a15",
"size": "365",
"binary": false,
"copies": "105",
"ref": "refs/heads/master",
"path": "B2G/external/bluetooth/bluez/test/dbusdef.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "443"
},
{
"name": "ApacheConf",
"bytes": "85"
},
{
"name": "Assembly",
"bytes": "5123438"
},
{
"name": "Awk",
"bytes": "46481"
},
{
"name": "Batchfile",
"bytes": "56250"
},
{
"name": "C",
"bytes": "101720951"
},
{
"name": "C#",
"bytes": "38531"
},
{
"name": "C++",
"bytes": "148896543"
},
{
"name": "CMake",
"bytes": "23541"
},
{
"name": "CSS",
"bytes": "2758664"
},
{
"name": "DIGITAL Command Language",
"bytes": "56757"
},
{
"name": "Emacs Lisp",
"bytes": "12694"
},
{
"name": "Erlang",
"bytes": "889"
},
{
"name": "FLUX",
"bytes": "34449"
},
{
"name": "GLSL",
"bytes": "26344"
},
{
"name": "Gnuplot",
"bytes": "710"
},
{
"name": "Groff",
"bytes": "447012"
},
{
"name": "HTML",
"bytes": "43343468"
},
{
"name": "IDL",
"bytes": "1455122"
},
{
"name": "Java",
"bytes": "43261012"
},
{
"name": "JavaScript",
"bytes": "46646658"
},
{
"name": "Lex",
"bytes": "38358"
},
{
"name": "Logos",
"bytes": "21054"
},
{
"name": "Makefile",
"bytes": "2733844"
},
{
"name": "Matlab",
"bytes": "67316"
},
{
"name": "Max",
"bytes": "3698"
},
{
"name": "NSIS",
"bytes": "421625"
},
{
"name": "Objective-C",
"bytes": "877657"
},
{
"name": "Objective-C++",
"bytes": "737713"
},
{
"name": "PHP",
"bytes": "17415"
},
{
"name": "Pascal",
"bytes": "6780"
},
{
"name": "Perl",
"bytes": "1153180"
},
{
"name": "Perl6",
"bytes": "1255"
},
{
"name": "PostScript",
"bytes": "1139"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Protocol Buffer",
"bytes": "26553"
},
{
"name": "Python",
"bytes": "8453201"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3481"
},
{
"name": "Ruby",
"bytes": "5116"
},
{
"name": "Scilab",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "3383832"
},
{
"name": "SourcePawn",
"bytes": "23661"
},
{
"name": "TeX",
"bytes": "879606"
},
{
"name": "WebIDL",
"bytes": "1902"
},
{
"name": "XSLT",
"bytes": "13134"
},
{
"name": "Yacc",
"bytes": "112744"
}
],
"symlink_target": ""
}
|
"""Configure py.test."""
import pytest
from pyvizio.const import DEVICE_CLASS_SPEAKER, MAX_VOLUME
from .const import (
ACCESS_TOKEN,
APP_LIST,
CH_TYPE,
CURRENT_APP_CONFIG,
CURRENT_EQ,
CURRENT_INPUT,
EQ_LIST,
INPUT_LIST,
INPUT_LIST_WITH_APPS,
MODEL,
RESPONSE_TOKEN,
UNIQUE_ID,
VERSION,
MockCompletePairingResponse,
MockStartPairingResponse,
)
from tests.async_mock import patch
class MockInput:
"""Mock Vizio device input."""
def __init__(self, name):
"""Initialize mock Vizio device input."""
self.meta_name = name
self.name = name
def get_mock_inputs(input_list):
"""Return list of MockInput."""
return [MockInput(input) for input in input_list]
@pytest.fixture(name="skip_notifications", autouse=True)
def skip_notifications_fixture():
"""Skip notification calls."""
with patch("homeassistant.components.persistent_notification.async_create"), patch(
"homeassistant.components.persistent_notification.async_dismiss"
):
yield
@pytest.fixture(name="vizio_connect")
def vizio_connect_fixture():
"""Mock valid vizio device and entry setup."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.validate_ha_config",
return_value=True,
), patch(
"homeassistant.components.vizio.config_flow.VizioAsync.get_unique_id",
return_value=UNIQUE_ID,
):
yield
@pytest.fixture(name="vizio_complete_pairing")
def vizio_complete_pairing_fixture():
"""Mock complete vizio pairing workflow."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.start_pair",
return_value=MockStartPairingResponse(CH_TYPE, RESPONSE_TOKEN),
), patch(
"homeassistant.components.vizio.config_flow.VizioAsync.pair",
return_value=MockCompletePairingResponse(ACCESS_TOKEN),
):
yield
@pytest.fixture(name="vizio_start_pairing_failure")
def vizio_start_pairing_failure_fixture():
"""Mock vizio start pairing failure."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.start_pair",
return_value=None,
):
yield
@pytest.fixture(name="vizio_invalid_pin_failure")
def vizio_invalid_pin_failure_fixture():
"""Mock vizio failure due to invalid pin."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.start_pair",
return_value=MockStartPairingResponse(CH_TYPE, RESPONSE_TOKEN),
), patch(
"homeassistant.components.vizio.config_flow.VizioAsync.pair", return_value=None,
):
yield
@pytest.fixture(name="vizio_bypass_setup")
def vizio_bypass_setup_fixture():
"""Mock component setup."""
with patch("homeassistant.components.vizio.async_setup_entry", return_value=True):
yield
@pytest.fixture(name="vizio_bypass_update")
def vizio_bypass_update_fixture():
"""Mock component update."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.can_connect_with_auth_check",
return_value=True,
), patch("homeassistant.components.vizio.media_player.VizioDevice.async_update"):
yield
@pytest.fixture(name="vizio_guess_device_type")
def vizio_guess_device_type_fixture():
"""Mock vizio async_guess_device_type function."""
with patch(
"homeassistant.components.vizio.config_flow.async_guess_device_type",
return_value="speaker",
):
yield
@pytest.fixture(name="vizio_cant_connect")
def vizio_cant_connect_fixture():
"""Mock vizio device can't connect with valid auth."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.validate_ha_config",
return_value=False,
):
yield
@pytest.fixture(name="vizio_update")
def vizio_update_fixture():
"""Mock valid updates to vizio device."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.can_connect_with_auth_check",
return_value=True,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_all_settings",
return_value={
"volume": int(MAX_VOLUME[DEVICE_CLASS_SPEAKER] / 2),
"eq": CURRENT_EQ,
"mute": "Off",
},
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_setting_options",
return_value=EQ_LIST,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_input",
return_value=CURRENT_INPUT,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_inputs_list",
return_value=get_mock_inputs(INPUT_LIST),
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_power_state",
return_value=True,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_model_name",
return_value=MODEL,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_version",
return_value=VERSION,
):
yield
@pytest.fixture(name="vizio_update_with_apps")
def vizio_update_with_apps_fixture(vizio_update: pytest.fixture):
"""Mock valid updates to vizio device that supports apps."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_inputs_list",
return_value=get_mock_inputs(INPUT_LIST_WITH_APPS),
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_apps_list",
return_value=APP_LIST,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_input",
return_value="CAST",
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_app_config",
return_value=CURRENT_APP_CONFIG,
):
yield
|
{
"content_hash": "8d406aabbce89abadf1e1050cc36f5da",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 93,
"avg_line_length": 31.535135135135135,
"alnum_prop": 0.673808707576277,
"repo_name": "robbiet480/home-assistant",
"id": "f7448a71c3168af12638e2dfed6518817b50b0f5",
"size": "5834",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/vizio/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
debug = True
debug = False
# from CommonClasses import * # hxl: comment out this line for submission
class Solution:
# @param A, a list of integers
# @param target, an integer to be searched
# @return an integer
def search(self, A, target):
# hxl: add this IF block to handle cases like [1,1,1,1,1,...,2,......1,1,1,1],
# which can only be resolved by brute force
if A[0] == A[-1]:
for i in range(len(A)):
if A[i] == target:
return True
return False
pivot = self.searchForPivotPoint(A, 0, len(A) - 1)
r = self.binarySearch(A, pivot, len(A) - 1, target)
if r == -1 and 0 < pivot:
r = self.binarySearch(A, 0, pivot - 1, target)
return r != -1
def searchForPivotPoint(self, A, start, end):
if start == end or A[start] <= A[end]: # hxl: the only one element or elements are in order
return start
else:
if end - start < 2: # hxl: two elementes left
if A[start] <= A[end]:
return start
else:
return end
else: # hxl: more than two elements
mid = start + (end - start) / 2
if A[start] > A[mid]:
return self.searchForPivotPoint(A, start, mid)
else:
return self.searchForPivotPoint(A, mid, end)
def binarySearch(self, A, start, end, target):
mid = start + (end - start) / 2
if end - start < 2: # at most two elements left
if A[start] == target:
return start
elif A[end] == target:
return end
else:
return -1 # hxl: not found
if A[mid] == target:
return mid
elif target < A[mid]:
return self.binarySearch(A, start, mid, target)
else:
return self.binarySearch(A, mid, end, target)
|
{
"content_hash": "0620cde16c6409d28467c326ed5ef0df",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 100,
"avg_line_length": 34.096774193548384,
"alnum_prop": 0.47445600756859035,
"repo_name": "54lihaoxin/leetcode_python",
"id": "467ac0ac2adb528888b7c097d0e6c55b39a4bf8f",
"size": "2376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SearchInRotatedSortedArrayII/solution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "715933"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0016_auto_20160413_1649'),
]
operations = [
migrations.RenameField(
model_name='account',
old_name='test_repos',
new_name='test_repos_name',
),
]
|
{
"content_hash": "595c13a1b805d9fbf5b55aa152694089",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 47,
"avg_line_length": 20.5,
"alnum_prop": 0.5799457994579946,
"repo_name": "iModels/ffci",
"id": "e0d37c9396daaa2a9eb5bab0587af1163c534621",
"size": "441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "account/migrations/0017_auto_20160413_1706.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1067"
},
{
"name": "HTML",
"bytes": "10922"
},
{
"name": "JavaScript",
"bytes": "9416"
},
{
"name": "Python",
"bytes": "1183721"
},
{
"name": "Shell",
"bytes": "519"
}
],
"symlink_target": ""
}
|
"""
Parser for ACE files output by PHRAP.
Written by Frank Kauff (fkauff@duke.edu) and
Cymon J. Cox (cymon@duke.edu)
Uses the Biopython Parser interface: ParserSupport.py
Usage:
There are two ways of reading an ace file:
1) The function 'read' reads the whole file at once;
2) The function 'parse' reads the file contig after contig.
1) Parse whole ace file at once:
from Bio.Sequencing import Ace
acefilerecord=Ace.read(open('my_ace_file.ace'))
This gives you:
acefilerecord.ncontigs (the number of contigs in the ace file)
acefilerecord.nreads (the number of reads in the ace file)
acefilerecord.contigs[] (one instance of the Contig class for each contig)
The Contig class holds the info of the CO tag, CT and WA tags, and all the reads used
for this contig in a list of instances of the Read class, e.g.:
contig3=acefilerecord.contigs[2]
read4=contig3.reads[3]
RD_of_read4=read4.rd
DS_of_read4=read4.ds
CT, WA, RT tags from the end of the file can appear anywhere are automatically
sorted into the right place.
see _RecordConsumer for details.
2) Or you can iterate over the contigs of an ace file one by one in the ususal way:
from Bio.Sequencing import Ace
contigs=Ace.parse(open('my_ace_file.ace'))
for contig in contigs:
print contig.name
...
Please note that for memory efficiency, when using the iterator approach, only one
contig is kept in memory at once. However, there can be a footer to the ACE file
containing WA, CT, RT or WR tags which contain additional meta-data on the contigs.
Because the parser doesn't see this data until the final record, it cannot be added to
the appropriate records. Instead these tags will be returned with the last contig record.
Thus an ace file does not entirerly suit the concept of iterating. If WA, CT, RT, WR tags
are needed, the 'read' function rather than the 'parse' function might be more appropriate.
"""
class rd(object):
"""RD (reads), store a read with its name, sequence etc.
The location and strand each read is mapped to is held in the AF lines.
"""
def __init__(self):
self.name=''
self.padded_bases=None
self.info_items=None
self.read_tags=None
self.sequence=''
class qa(object):
"""QA (read quality), including which part if any was used as the consensus."""
def __init__(self, line=None):
self.qual_clipping_start=None
self.qual_clipping_end=None
self.align_clipping_start=None
self.align_clipping_end=None
if line:
header=map(eval,line.split()[1:])
self.qual_clipping_start=header[0]
self.qual_clipping_end=header[1]
self.align_clipping_start=header[2]
self.align_clipping_end=header[3]
class ds(object):
"""DS lines, include file name of a read's chromatogram file."""
def __init__(self, line=None):
self.chromat_file=''
self.phd_file=''
self.time=''
self.chem=''
self.dye=''
self.template=''
self.direction=''
if line:
tags=['CHROMAT_FILE','PHD_FILE','TIME','CHEM','DYE','TEMPLATE','DIRECTION']
poss=map(line.find,tags)
tagpos=dict(zip(poss,tags))
if -1 in tagpos:
del tagpos[-1]
ps=tagpos.keys()
ps.sort()
for (p1,p2) in zip(ps,ps[1:]+[len(line)+1]):
setattr(self,tagpos[p1].lower(),line[p1+len(tagpos[p1])+1:p2].strip())
class af(object):
"""AF lines, define the location of the read within the contig.
Note attribute coru is short for complemented (C) or uncomplemented (U),
since the strand information is stored in an ACE file using either the
C or U character.
"""
def __init__(self, line=None):
self.name=''
self.coru=None
self.padded_start=None
if line:
header = line.split()
self.name = header[1]
self.coru = header[2]
self.padded_start = int(header[3])
class bs(object):
""""BS (base segment), which read was chosen as the consensus at each position."""
def __init__(self, line=None):
self.name=''
self.padded_start=None
self.padded_end=None
if line:
header = line.split()
self.padded_start = int(header[1])
self.padded_end = int(header[2])
self.name = header[3]
class rt(object):
"""RT (transient read tags), generated by crossmatch and phrap."""
def __init__(self, line=None):
self.name=''
self.tag_type=''
self.program=''
self.padded_start=None
self.padded_end=None
self.date=''
self.comment=[]
if line:
header=line.split()
self.name=header[0]
self.tag_type=header[1]
self.program=header[2]
self.padded_start=int(header[3])
self.padded_end=int(header[4])
self.date=header[5]
class ct(object):
"""CT (consensus tags)."""
def __init__(self, line=None):
self.name=''
self.tag_type=''
self.program=''
self.padded_start=None
self.padded_end=None
self.date=''
self.notrans=''
self.info=[]
self.comment=[]
if line:
header=line.split()
self.name = header[0]
self.tag_type = header[1]
self.program = header[2]
self.padded_start = int(header[3])
self.padded_end = int(header[4])
self.date = header[5]
if len(header)==7:
self.notrans = header[6]
class wa(object):
"""WA (whole assembly tag), holds the assembly program name, version, etc."""
def __init__(self, line=None):
self.tag_type=''
self.program=''
self.date=''
self.info=[]
if line:
header = line.split()
self.tag_type = header[0]
self.program = header[1]
self.date = header[2]
class wr(object):
"""WR lines."""
def __init__(self, line=None):
self.name=''
self.aligned=''
self.program=''
self.date=[]
if line:
header = line.split()
self.name = header[0]
self.aligned = header[1]
self.program = header[2]
self.date = header[3]
class Reads(object):
"""Holds information about a read supporting an ACE contig."""
def __init__(self, line=None):
self.rd=None # one per read
self.qa=None # one per read
self.ds=None # none or one per read
self.rt=None # none or many per read
self.wr=None # none or many per read
if line:
self.rd = rd()
header = line.split()
self.rd.name = header[1]
self.rd.padded_bases = int(header[2])
self.rd.info_items = int(header[3])
self.rd.read_tags = int(header[4])
class Contig(object):
"""Holds information about a contig from an ACE record."""
def __init__(self, line=None):
self.name = ''
self.nbases=None
self.nreads=None
self.nsegments=None
self.uorc=None
self.sequence=""
self.quality=[]
self.af=[]
self.bs=[]
self.reads=[]
self.ct=None # none or many
self.wa=None # none or many
if line:
header = line.split()
self.name = header[1]
self.nbases = int(header[2])
self.nreads = int(header[3])
self.nsegments = int(header[4])
self.uorc = header[5]
def parse(handle):
"""parse(handle)
where handle is a file-like object.
This function returns an iterator that allows you to iterate
over the ACE file record by record:
records = parse(handle)
for record in records:
# do something with the record
where each record is a Contig object.
"""
handle = iter(handle)
line = ""
while True:
# at beginning, skip the AS and look for first CO command
try:
while True:
if line.startswith('CO'):
break
line = handle.next()
except StopIteration:
return
record = Contig(line)
for line in handle:
line = line.strip()
if not line:
break
record.sequence+=line
for line in handle:
if line.strip():
break
if not line.startswith("BQ"):
raise ValueError("Failed to find BQ line")
for line in handle:
if not line.strip():
break
record.quality.extend(map(int,line.split()))
for line in handle:
if line.strip():
break
while True:
if not line.startswith("AF "):
break
record.af.append(af(line))
try:
line = handle.next()
except StopIteration:
raise ValueError("Unexpected end of AF block")
while True:
if line.strip():
break
try:
line = handle.next()
except StopIteration:
raise ValueError("Unexpected end of file")
while True:
if not line.startswith("BS "):
break
record.bs.append(bs(line))
try:
line = handle.next()
except StopIteration:
raise ValueError("Failed to find end of BS block")
# now read all the read data
# it starts with a 'RD', and then a mandatory QA
# then follows an optional DS
# CT,RT,WA,WR may or may not be there in unlimited quantity. They might refer to the actual read or contig,
# or, if encountered at the end of file, to any previous read or contig. the sort() method deals
# with that later.
while True:
# each read must have a rd and qa
try:
while True:
# If I've met the condition, then stop reading the line.
if line.startswith("RD "):
break
line = handle.next()
except StopIteration:
raise ValueError("Failed to find RD line")
record.reads.append(Reads(line))
for line in handle:
line = line.strip()
if not line:
break
record.reads[-1].rd.sequence+=line
for line in handle:
if line.strip():
break
if not line.startswith("QA "):
raise ValueError("Failed to find QA line")
record.reads[-1].qa = qa(line)
# now one ds can follow
for line in handle:
if line.strip():
break
else:
break
if line.startswith("DS "):
record.reads[-1].ds = ds(line)
line = ""
# the file could just end, or there's some more stuff. In ace files, anything can happen.
# the following tags are interspersed between reads and can appear multiple times.
while True:
# something left
try:
while True:
if line.strip():
break
line = handle.next()
except StopIteration:
# file ends here
break
if line.startswith("RT{"):
# now if we're at the end of the file, this rt could
# belong to a previous read, not the actual one.
# we store it here were it appears, the user can sort later.
if record.reads[-1].rt is None:
record.reads[-1].rt=[]
for line in handle:
line=line.strip()
#if line=="COMMENT{":
if line.startswith("COMMENT{"):
if line[8:].strip():
#MIRA 3.0.5 would miss the new line out :(
record.reads[-1].rt[-1].comment.append(line[8:])
for line in handle:
line = line.strip()
if line.endswith("C}"):
break
record.reads[-1].rt[-1].comment.append(line)
elif line=='}':
break
else:
record.reads[-1].rt.append(rt(line))
line = ""
elif line.startswith("WR{"):
if record.reads[-1].wr is None:
record.reads[-1].wr=[]
for line in handle:
line=line.strip()
if line=='}': break
record.reads[-1].wr.append(wr(line))
line = ""
elif line.startswith("WA{"):
if record.wa is None:
record.wa=[]
try:
line = handle.next()
except StopIteration:
raise ValueError("Failed to read WA block")
record.wa.append(wa(line))
for line in handle:
line=line.strip()
if line=='}': break
record.wa[-1].info.append(line)
line = ""
elif line.startswith("CT{"):
if record.ct is None:
record.ct=[]
try:
line = handle.next()
except StopIteration:
raise ValueError("Failed to read CT block")
record.ct.append(ct(line))
for line in handle:
line=line.strip()
if line=="COMMENT{":
for line in handle:
line = line.strip()
if line.endswith("C}"):
break
record.ct[-1].comment.append(line)
elif line=='}':
break
else:
record.ct[-1].info.append(line)
line = ""
else:
break
if not line.startswith('RD'): # another read?
break
yield record
class ACEFileRecord(object):
"""Holds data of an ACE file.
"""
def __init__(self):
self.ncontigs=None
self.nreads=None
self.contigs=[]
self.wa=None # none or many
def sort(self):
"""Sorts wr, rt and ct tags into the appropriate contig / read instance, if possible. """
ct=[]
rt=[]
wr=[]
# search for tags that aren't in the right position
for i in range(len(self.contigs)):
c = self.contigs[i]
if c.wa:
if not self.wa:
self.wa=[]
self.wa.extend(c.wa)
if c.ct:
newcts=[ct_tag for ct_tag in c.ct if ct_tag.name!=c.name]
for x in newcts:
self.contigs[i].ct.remove(x)
ct.extend(newcts)
for j in range(len(c.reads)):
r = c.reads[j]
if r.rt:
newrts=[rt_tag for rt_tag in r.rt if rt_tag.name!=r.rd.name]
for x in newrts:
self.contigs[i].reads[j].rt.remove(x)
rt.extend(newrts)
if r.wr:
newwrs=[wr_tag for wr_tag in r.wr if wr_tag.name!=r.rd.name]
for x in newwrs:
self.contigs[i].reads[j].wr.remove(x)
wr.extend(newwrs)
# now sort them into their proper place
for i in range(len(self.contigs)):
c = self.contigs[i]
for ct_tag in ct:
if ct_tag.name==c.name:
if self.contigs[i].ct is None:
self.contigs[i].ct=[]
self.contigs[i].ct.append(ct_tag)
if rt or wr:
for j in range(len(c.reads)):
r = c.reads[j]
for rt_tag in rt:
if rt_tag.name==r.rd.name:
if self.contigs[i].reads[j].rt is None:
self.contigs[i].reads[j].rt=[]
self.contigs[i].reads[j].rt.append(rt_tag)
for wr_tag in wr:
if wr_tag.name==r.rd.name:
if self.contigs[i].reads[j].wr is None:
self.contigs[i].reads[j].wr=[]
self.contigs[i].reads[j].wr.append(wr_tag)
def read(handle):
"""Parses the full ACE file in list of contigs.
"""
handle = iter(handle)
record=ACEFileRecord()
try:
line = handle.next()
except StopIteration:
raise ValueError("Premature end of file")
# check if the file starts correctly
if not line.startswith('AS'):
raise ValueError("File does not start with 'AS'.")
words = line.split()
record.ncontigs, record.nreads = map(int, words[1:3])
# now read all the records
record.contigs = list(parse(handle))
# wa, ct, rt rags are usually at the end of the file, but not necessarily (correct?).
# If the iterator is used, the tags are returned with the contig or the read after which they appear,
# if all tags are at the end, they are read with the last contig. The concept of an
# iterator leaves no other choice. But if the user uses the ACEParser, we can check
# them and put them into the appropriate contig/read instance.
# Conclusion: An ACE file is not a filetype for which iteration is 100% suitable...
record.sort()
return record
|
{
"content_hash": "d9686206036f3746c163cb18ed6ce855",
"timestamp": "",
"source": "github",
"line_count": 537,
"max_line_length": 115,
"avg_line_length": 34.787709497206706,
"alnum_prop": 0.5000802954873936,
"repo_name": "LyonsLab/coge",
"id": "0025ecd5e501b7a1a3ea2da282c5e2d2a7a09aa7",
"size": "18921",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/last_wrapper/Bio/Sequencing/Ace.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1469592"
},
{
"name": "C++",
"bytes": "156708"
},
{
"name": "CSS",
"bytes": "65405"
},
{
"name": "HTML",
"bytes": "105418"
},
{
"name": "Haxe",
"bytes": "111359"
},
{
"name": "Java",
"bytes": "55110"
},
{
"name": "JavaScript",
"bytes": "762653"
},
{
"name": "Makefile",
"bytes": "7838"
},
{
"name": "Perl",
"bytes": "5054463"
},
{
"name": "Python",
"bytes": "4394136"
},
{
"name": "Raku",
"bytes": "15140"
},
{
"name": "RobotFramework",
"bytes": "15841"
},
{
"name": "Roff",
"bytes": "3514"
},
{
"name": "Rust",
"bytes": "1507"
},
{
"name": "Shell",
"bytes": "13600"
},
{
"name": "TSQL",
"bytes": "24440"
}
],
"symlink_target": ""
}
|
def fatal_error(error):
"""Print out the error message that gets passed, then quit the program.
Inputs:
error = error message text
:param error: str
:return:
"""
raise RuntimeError(error)
|
{
"content_hash": "edb410546fe0362441d70e8016176a42",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 75,
"avg_line_length": 19.90909090909091,
"alnum_prop": 0.6438356164383562,
"repo_name": "stiphyMT/plantcv",
"id": "5b2869096f841514f4e673f56648bd0ecbd4a2ed",
"size": "238",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "plantcv/plantcv/fatal_error.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "955647"
},
{
"name": "R",
"bytes": "1327"
},
{
"name": "Shell",
"bytes": "3348"
}
],
"symlink_target": ""
}
|
from django.conf import settings
OVERRIDE_MIDDLEWARE = getattr(
settings, 'STATICTEMPLATE_OVERRIDE_MIDDLEWARE', True
)
|
{
"content_hash": "5cce7d228a415b153c89d166a13163b5",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 56,
"avg_line_length": 24.8,
"alnum_prop": 0.782258064516129,
"repo_name": "ojii/django-statictemplate",
"id": "3f94083e8fae0af88fd8908acfa0a294459fe0cb",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statictemplate/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "976"
},
{
"name": "Python",
"bytes": "11226"
}
],
"symlink_target": ""
}
|
import os
from i3pystatus.mail import Backend
class MaildirMail(Backend):
"""
Checks for local mail in Maildir
"""
settings = (
"directory",
)
required = ("directory",)
directory = ""
def init(self):
self.directory = os.path.expanduser(self.directory)
@property
def unread(self):
def check_seen_flag(msgname):
"""
Return false if (S)een flag set
The code of this funciton was partialy extrated from
Pythons Maildir and MaildirMessage classes. Which are not used
because they cannot read the message flags without reading the entire message.
"""
maildir_info = msgname.split(':')[-1]
# This is a logical implication if maildir_info starts with '2,'
# it must not contain S if it does not start with '2,' the rest of
# its content does not matter because no flags are set
return not maildir_info.startswith('2,') or 'S' not in maildir_info[2:]
path_new = os.path.join(self.directory, "new")
new_messages = len(os.listdir(path_new))
path_cur = os.path.join(self.directory, "cur")
unread_messages = len(list(filter(check_seen_flag, os.listdir(path_cur))))
return new_messages + unread_messages
Backend = MaildirMail
|
{
"content_hash": "462cfb6cf93fc63ada778fd622f3f19c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 90,
"avg_line_length": 30.22222222222222,
"alnum_prop": 0.6110294117647059,
"repo_name": "Arvedui/i3pystatus",
"id": "0f21422fd39b5962ef9d50ef71435682583cad10",
"size": "1360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i3pystatus/mail/maildir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "607826"
},
{
"name": "Shell",
"bytes": "757"
}
],
"symlink_target": ""
}
|
"""
Base Driver Module
"""
import asyncpg
from autobahn.wamp.types import SubscribeOptions, PublishOptions
from eventify.exceptions import SENTRY_CLIENT, SENTRY_DSN
from eventify.persist.constants import EVENT_DB_HOST, \
EVENT_DB_USER, \
EVENT_DB_PASS, \
EVENT_DB_NAME
from eventify.util import objdict
class BaseComponent(object):
"""
Base class for driver components
"""
async def onConnect(self):
"""
Configure the component
"""
# Add extra attribute
# This allows for following crossbar/autobahn spec
# without changing legacy configuration
if not hasattr(self.config, 'extra'):
original_config = {'config': self.config}
self.config = objdict(self.config)
setattr(self.config, 'extra', original_config)
self.config.extra['handlers'] = self.handlers
# setup transport host
self.transport_host = self.config.extra['config']['transport_host']
# subscription setup
self.subscribe_options = SubscribeOptions(**self.config.extra['config']['sub_options'])
self.replay_events = self.config.extra['config']['replay_events']
# publishing setup
self.publish_topic = self.config.extra['config']['publish_topic']['topic']
self.publish_options = PublishOptions(**self.config.extra['config']['pub_options'])
# setup callback
self.handlers = self.config.extra['handlers']
# optional subscribed topics from config.json
self.subscribed_topics = self.config.extra['config']['subscribed_topics']
# put name on session
self.name = self.config.extra['config']['name']
# setup db pool - optionally
if self.config.extra['config']['pub_options']['retain'] is True:
self.pool = await asyncpg.create_pool(
user=EVENT_DB_USER,
password=EVENT_DB_PASS,
host=EVENT_DB_HOST,
database=EVENT_DB_NAME
)
# Handle non crossbar drivers
try:
self.join(self.config.realm)
except AttributeError:
pass
@staticmethod
def capture_exception():
"""
Track exception in configured remote
exception tracking system
"""
if SENTRY_DSN is not None:
SENTRY_CLIENT.captureException()
SENTRY_CLIENT.remote.get_transport().close()
|
{
"content_hash": "4594218ec980c91cf5f1f83fed75bef1",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 95,
"avg_line_length": 32.91025641025641,
"alnum_prop": 0.5909622126996494,
"repo_name": "eventifyio/eventify",
"id": "b5be7217400d014188dfed2052eccdffda079c2a",
"size": "2567",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eventify/drivers/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "70"
},
{
"name": "Python",
"bytes": "43348"
},
{
"name": "Shell",
"bytes": "475"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import, division, unicode_literals
# TEST_UNICODE_LITERALS
import numpy as np
import pytest
import os
from astropy.units import Quantity
from astropy import units as u
from frb.dlas import approx_avgDM
from frb.dlas import monte_DM
from frb.dlas import monte_tau
def test_approx_avgDM():
DM = approx_avgDM(1.)
assert isinstance(DM, Quantity)
assert DM.unit == (u.pc/u.cm**3)
assert np.isclose(DM.value, 0.00651401)
# Array
DMs = approx_avgDM(np.array([0., 1., 2.]))
assert len(DMs) == 3
# Error
with pytest.raises(IOError):
approx_avgDM(10.)
def test_monte_DM():
""" Monte-carlo of DM values
"""
zeval = np.array([0.,1.,2.])
DMs = monte_DM(np.array(zeval))
assert DMs.shape[0] == 100
assert DMs.shape[1] == zeval.size
def test_monte_tau():
""" Monte-carlo of temporal broadening
"""
zeval = np.array([0.,1.,2.])
taus = monte_tau(np.array(zeval))
assert taus.shape[1] == len(zeval)
|
{
"content_hash": "0bfd37e67e312b17c1502e2390b51012",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 82,
"avg_line_length": 23.34090909090909,
"alnum_prop": 0.6465433300876339,
"repo_name": "FRBs/DM",
"id": "5ce586463b76644df5bb6f0b37199b016ced47c1",
"size": "1080",
"binary": false,
"copies": "1",
"ref": "refs/heads/new_spectra",
"path": "frb/tests/test_frbdlas.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8616"
}
],
"symlink_target": ""
}
|
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownTimer
from direct.task import Task
from otp.namepanel import NameTumbler
from otp.otpbase import OTPGlobals
from otp.otpbase import OTPLocalizer
from toontown.fishing import FishSellGUI
from toontown.pets import Pet, PetConstants
from toontown.pets import PetDNA
from toontown.pets import PetUtil
from toontown.pets import PetDetail
from toontown.pets import PetTraits
from toontown.pets import PetNameGenerator
from toontown.hood import ZoneUtil
import string
import random
Dialog_MainMenu = 0
Dialog_AdoptPet = 1
Dialog_ChoosePet = 2
Dialog_ReturnPet = 3
Dialog_SellFish = 4
Dialog_NamePicker = 5
Dialog_GoHome = 6
disabledImageColor = Vec4(0.6, 0.6, 0.6, 1)
text0Color = Vec4(0.65, 0, 0.87, 1)
text1Color = Vec4(0.65, 0, 0.87, 1)
text2Color = Vec4(1, 1, 0.5, 1)
text3Color = Vec4(0.4, 0.4, 0.4, 1)
class PetshopGUI(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGui')
class GoHomeDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('GoHomeDlg')
def __init__(self, doneEvent):
DirectFrame.__init__(self, pos=(0.0, 0.0, 0.0), image_color=ToontownGlobals.GlobalDialogColor, image_scale=(1.0, 1.0, 0.6), text='', text_wordwrap=13.5, text_scale=0.06, text_pos=(0.0, 0.13))
self['image'] = DGG.getDefaultDialogGeom()
self['text'] = TTLocalizer.PetshopGoHomeText
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
gui = loader.loadModel('phase_3.5/models/gui/avatar_panel_gui')
self.bYes = DirectButton(self, image=(buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr')), relief=None, text=TTLocalizer.TutorialYes, text_scale=0.05, text_pos=(0.0, -0.1), pos=(-0.15, 0.0, -0.1), command=lambda : messenger.send(doneEvent, [1]))
self.bNo = DirectButton(self, image=(buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')), relief=None, text=TTLocalizer.TutorialNo, text_scale=0.05, text_pos=(0.0, -0.1), pos=(0.15, 0.0, -0.1), command=lambda : messenger.send(doneEvent, [0]))
buttons.removeNode()
gui.removeNode()
return
class NamePicker(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.NamePicker')
def __init__(self, doneEvent, petSeed, gender):
zoneId = ZoneUtil.getCanonicalSafeZoneId(base.localAvatar.getZoneId())
name, dna, traitSeed = PetUtil.getPetInfoFromSeed(petSeed, zoneId)
self.gui = loader.loadModel('phase_4/models/gui/PetNamePanel')
self.guiScale = 0.09
DirectFrame.__init__(self, relief=None, geom=self.gui, geom_scale=self.guiScale, state='normal', frameSize=(-1, 1, -1, 1))
self.initialiseoptions(PetshopGUI.NamePicker)
self.petView = self.attachNewNode('petView')
self.petView.setPos(-0.21, 0, -0.04)
self.petModel = Pet.Pet(forGui=1)
self.petModel.setDNA(dna)
self.petModel.fitAndCenterHead(0.435, forGui=1)
self.petModel.reparentTo(self.petView)
self.petModel.setH(225)
self.petModel.enterNeutralHappy()
self.ng = PetNameGenerator.PetNameGenerator()
if gender == 1:
self.allNames = self.ng.boyFirsts
else:
self.allNames = self.ng.girlFirsts
self.allNames += self.ng.neutralFirsts
self.allNames.sort()
self.checkNames()
self.letters = []
for name in self.allNames:
if name[0:TTLocalizer.PGUIcharLength] not in self.letters:
self.letters.append(name[0:TTLocalizer.PGUIcharLength])
self.curLetter = self.letters[0]
self.curNames = []
self.curName = ''
self.alphabetList = self.makeScrollList(self.gui, (-0.012, 0, -0.075), (1, 0.8, 0.8, 1), self.letters, self.makeLabel, [TextNode.ACenter, 'alphabet'], 6)
self.nameList = None
self.rebuildNameList()
self.randomButton = DirectButton(parent=self, relief=None, image=(self.gui.find('**/RandomUpButton'), self.gui.find('**/RandomDownButton'), self.gui.find('**/RandomRolloverButton')), scale=self.guiScale, text=TTLocalizer.RandomButton, text_pos=(-0.8, -5.7), text_scale=0.8, text_fg=text2Color, pressEffect=False, command=self.randomName)
self.nameResult = DirectLabel(parent=self, relief=None, scale=self.guiScale, text='', text_align=TextNode.ACenter, text_pos=(-1.85, 2.6), text_fg=text0Color, text_scale=0.6, text_wordwrap=8)
self.submitButton = DirectButton(parent=self, relief=None, image=(self.gui.find('**/SubmitUpButton'), self.gui.find('**/SubmitDownButton'), self.gui.find('**/SubmitRolloverButton')), scale=self.guiScale, text=TTLocalizer.PetshopAdopt, text_pos=(3.3, -5.7), text_scale=TTLocalizer.PGUIsubmitButton, text_fg=text0Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [self.ng.returnUniqueID(self.curName)]))
model = loader.loadModel('phase_4/models/gui/PetShopInterface')
modelScale = 0.1
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/CancelButtonDown'), model.find('**/CancelButtonRollover'))
cancelIcon = model.find('**/CancelIcon')
self.cancelButton = DirectButton(parent=self, relief=None, pos=(-0.04, 0, -0.47), image=cancelImageList, geom=cancelIcon, scale=modelScale, pressEffect=False, command=lambda : messenger.send(doneEvent, [-1]))
self.randomName()
return
def checkNames(self):
if __dev__:
for name in self.allNames:
if not name.replace(' ', '').isalpha():
self.notify.warning('Bad name:%s' % name)
def destroy(self):
self.petModel.delete()
DirectFrame.destroy(self)
def rebuildNameList(self):
self.curNames = []
for name in self.allNames:
if name[0:TTLocalizer.PGUIcharLength] == self.curLetter:
self.curNames += [name]
if self.nameList:
self.nameList.destroy()
self.nameList = self.makeScrollList(self.gui, (0.277, 0, -0.075), (1, 0.8, 0.8, 1), self.curNames, self.makeLabel, [TextNode.ACenter, 'name'], 5)
def updateNameText(self):
self.nameResult['text'] = self.curName
def nameClickedOn(self, listType, index):
if listType == 'alphabet':
self.curLetter = self.letters[index]
self.rebuildNameList()
elif listType == 'name':
self.curName = self.curNames[index]
self.updateNameText()
def makeLabel(self, te, index, others):
alig = others[0]
listName = others[1]
if alig == TextNode.ARight:
newpos = (0.44, 0, 0)
elif alig == TextNode.ALeft:
newpos = (0, 0, 0)
else:
newpos = (0.2, 0, 0)
df = DirectButton(parent=self, state='normal', relief=None, text=te, text_scale=0.1, text_pos=(0.2, 0, 0), text_align=alig, textMayChange=0, command=lambda : self.nameClickedOn(listName, index))
return df
def makeScrollList(self, gui, ipos, mcolor, nitems, nitemMakeFunction, nitemMakeExtraArgs, nVisibleItems):
decScale = self.guiScale / 0.44
incScale = (decScale, decScale, -decScale)
it = nitems[:]
listType = nitemMakeExtraArgs[1]
if listType == 'alphabet':
arrowList = (gui.find('**/ArrowSmUpButton'),
gui.find('**/ArrowSmUpRollover'),
gui.find('**/ArrowSmUpRollover'),
gui.find('**/ArrowSmUpButton'))
fHeight = 0.09
elif listType == 'name':
arrowList = (gui.find('**/ArrowUpBigButton'),
gui.find('**/ArrowUpBigRollover'),
gui.find('**/ArrowUpBigRollover'),
gui.find('**/ArrowUpBigButton'))
fHeight = 0.119
ds = DirectScrolledList(parent=self, items=it, itemMakeFunction=nitemMakeFunction, itemMakeExtraArgs=nitemMakeExtraArgs, relief=None, command=None, pos=ipos, scale=0.44, incButton_image=arrowList, incButton_image_pos=(1.015, 0, 3.32), incButton_relief=None, incButton_scale=incScale, incButton_image3_color=Vec4(0.4, 0.4, 0.4, 1), decButton_image=arrowList, decButton_image_pos=(1.015, 0, 1.11), decButton_relief=None, decButton_scale=decScale, decButton_image3_color=Vec4(0.4, 0.4, 0.4, 1), numItemsVisible=nVisibleItems, forceHeight=fHeight)
return ds
def randomName(self):
numNames = len(self.allNames)
self.curName = self.allNames[random.randrange(numNames)]
self.curLetter = self.curName[0:TTLocalizer.PGUIcharLength]
self.rebuildNameList()
self.updateNameText()
self.alphabetList.scrollTo(self.letters.index(self.curLetter))
self.nameList.scrollTo(self.curNames.index(self.curName))
class MainMenuDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.MainMenuDlg')
def __init__(self, doneEvent):
model = loader.loadModel('phase_4/models/gui/AdoptReturnSell')
modelPos = (0, 0, -0.3)
modelScale = 0.055
DirectFrame.__init__(self, relief=None, state='normal', geom=model, geom_scale=(modelScale, modelScale, modelScale), pos=modelPos, frameSize=(-1, 1, -1, 1))
self.initialiseoptions(PetshopGUI.MainMenuDlg)
textScale = TTLocalizer.PGUItextScale
sellFishImageList = (model.find('**/SellButtonUp'),
model.find('**/SellButtonDown'),
model.find('**/SellButtonRollover'),
model.find('**/SellButtonDown'))
fishLogoImageList = model.find('**/Fish')
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/cancelButtonDown'), model.find('**/CancelButtonRollover'))
XImageList = model.find('**/CancelIcon')
adoptImageList = (model.find('**/AdoptButtonUp'), model.find('**/AdoptButtonDown'), model.find('**/AdoptButtonRollover'))
pawLogoAdoptImageList = model.find('**/PawPink')
returnImageList = (model.find('**/ReturnButtonUp'),
model.find('**/ReturnButtonDown'),
model.find('**/ReturnButtonRollover'),
model.find('**/ReturnButtonDown'))
pawLogoReturnImageList = model.find('**/PawYellow')
self.cancelButton = DirectButton(parent=self, relief=None, scale=(modelScale, modelScale, modelScale), geom=XImageList, image=cancelImageList, text=('', TTLocalizer.PetshopCancel), text_pos=TTLocalizer.PGUIcancelButtonPos, text_scale=0.8, pressEffect=False, command=lambda : messenger.send(doneEvent, [0]))
self.sellFishButton = DirectButton(parent=self, relief=None, image=sellFishImageList, image3_color=disabledImageColor, geom=fishLogoImageList, scale=(modelScale, modelScale, modelScale), text=TTLocalizer.PetshopSell, text_scale=textScale, text_pos=(0, 6), text0_fg=text2Color, text1_fg=text2Color, text2_fg=text0Color, text3_fg=text3Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [1]))
fishValue = base.localAvatar.fishTank.getTotalValue()
if fishValue == 0:
self.sellFishButton['state'] = DGG.DISABLED
self.adoptPetButton = DirectButton(parent=self, relief=None, image=adoptImageList, geom=pawLogoAdoptImageList, scale=(modelScale, modelScale, modelScale), text=TTLocalizer.PetshopAdoptAPet, text_scale=textScale, text_pos=(0, 12.5), text0_fg=text0Color, text1_fg=text1Color, text2_fg=text2Color, text3_fg=text3Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [2]))
self.returnPetButton = DirectButton(parent=self, relief=None, image=returnImageList, geom=pawLogoReturnImageList, image3_color=disabledImageColor, scale=(modelScale, modelScale, modelScale), text=TTLocalizer.PetshopReturnPet, text_scale=textScale, text_pos=(-0.6, 9.2), text0_fg=text2Color, text1_fg=text2Color, text2_fg=text0Color, text3_fg=text3Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [3]))
if not base.localAvatar.hasPet():
self.returnPetButton['state'] = DGG.DISABLED
model.removeNode()
return
class AdoptPetDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.AdoptPetDlg')
def __init__(self, doneEvent, petSeed, petNameIndex):
zoneId = ZoneUtil.getCanonicalSafeZoneId(base.localAvatar.getZoneId())
name, dna, traitSeed = PetUtil.getPetInfoFromSeed(petSeed, zoneId)
name = PetNameGenerator.PetNameGenerator().getName(petNameIndex)
cost = PetUtil.getPetCostFromSeed(petSeed, zoneId)
model = loader.loadModel('phase_4/models/gui/AdoptPet')
modelPos = (0, 0, -0.3)
modelScale = 0.055
DirectFrame.__init__(self, relief=None, state='normal', geom=model, geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=modelScale, frameSize=(-1, 1, -1, 1), pos=modelPos, text=TTLocalizer.PetshopAdoptConfirm % (name, cost), text_wordwrap=12, text_scale=0.05, text_pos=(0, 0.55), text_fg=text0Color)
self.initialiseoptions(PetshopGUI.AdoptPetDlg)
self.petView = self.attachNewNode('petView')
self.petView.setPos(-0.13, 0, 0.8)
self.petModel = Pet.Pet(forGui=1)
self.petModel.setDNA(dna)
self.petModel.fitAndCenterHead(0.395, forGui=1)
self.petModel.reparentTo(self.petView)
self.petModel.setH(130)
self.petModel.enterNeutralHappy()
self.moneyDisplay = DirectLabel(parent=self, relief=None, text=str(base.localAvatar.getTotalMoney()), text_scale=0.075, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0.225, 0.33), text_font=ToontownGlobals.getSignFont())
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.accept(localAvatar.uniqueName('bankMoneyChange'), self.__moneyChange)
okImageList = (model.find('**/CheckButtonUp'), model.find('**/CheckButtonDown'), model.find('**/CheckButtonRollover'))
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/CancelButtonDown'), model.find('**/CancelRollover'))
cancelIcon = model.find('**/CancelIcon')
checkIcon = model.find('**/CheckIcon')
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, geom=cancelIcon, scale=modelScale, text=('', TTLocalizer.PetshopGoBack), text_pos=(-5.8, 4.4), text_scale=0.7, pressEffect=False, command=lambda : messenger.send(doneEvent, [0]))
self.okButton = DirectButton(parent=self, relief=None, image=okImageList, geom=checkIcon, scale=modelScale, text=('', TTLocalizer.PetshopAdopt), text_pos=(5.8, 4.4), text_scale=0.7, pressEffect=False, command=lambda : messenger.send(doneEvent, [1]))
model.removeNode()
return
def destroy(self):
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('bankMoneyChange'))
self.petModel.delete()
DirectFrame.destroy(self)
def __moneyChange(self, money):
self.moneyDisplay['text'] = str(base.localAvatar.getTotalMoney())
class ReturnPetDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.ReturnPetDlg')
def __init__(self, doneEvent):
def showDialog(avatar):
model = loader.loadModel('phase_4/models/gui/ReturnPet')
modelPos = (0, 0, -0.3)
modelScale = (0.055, 0.055, 0.055)
base.r = self
DirectFrame.__init__(self, relief=None, state='normal', geom=model, geom_scale=modelScale, frameSize=(-1, 1, -1, 1), pos=modelPos, text=TTLocalizer.PetshopReturnConfirm % avatar.getName(), text_wordwrap=12, text_scale=TTLocalizer.PGUIreturnConfirm, text_pos=(0, 0.45), text_fg=text2Color)
self.initialiseoptions(PetshopGUI.ReturnPetDlg)
okImageList = (model.find('**/CheckButtonUp'), model.find('**/CheckButtonDown'), model.find('**/CheckRollover'))
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/CancelButtonDown'), model.find('**/CancelRollover'))
cancelIcon = model.find('**/CancelIcon')
checkIcon = model.find('**/CheckIcon')
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, geom=cancelIcon, scale=modelScale, text=('', TTLocalizer.PetshopGoBack), text_pos=(-5.8, 4.4), text_scale=0.7, pressEffect=False, command=lambda : messenger.send(doneEvent, [0]))
self.okButton = DirectButton(parent=self, relief=None, image=okImageList, geom=checkIcon, scale=modelScale, text=('', TTLocalizer.PetshopReturn), text_pos=(5.8, 4.4), text_scale=0.7, pressEffect=False, command=lambda : messenger.send(doneEvent, [1]))
self.petView = self.attachNewNode('petView')
self.petView.setPos(-0.15, 0, 0.8)
avatar.announceGenerate()
self.petModel = Pet.Pet(forGui=1)
self.petModel.setDNA(avatar.getDNA())
self.petModel.fitAndCenterHead(0.395, forGui=1)
self.petModel.reparentTo(self.petView)
self.petModel.setH(130)
self.petModel.enterNeutralSad()
model.removeNode()
self.initialized = True
return
self.initialized = False
self.petPanel = PetDetail.PetDetail(base.localAvatar.getPetId(), showDialog)
def destroy(self):
if self.initialized:
self.petPanel.avatar.disable()
self.petPanel.avatar.delete()
self.petPanel.avatar = None
self.PetPanel = None
self.petModel.delete()
DirectFrame.destroy(self)
return
class ChoosePetDlg(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('PetshopGUI.ChoosePetDlg')
def __init__(self, doneEvent, petSeeds):
model = loader.loadModel('phase_4/models/gui/PetShopInterface')
modelPos = (0, 0, -0.9)
modelScale = (0.185, 0.185, 0.185)
DirectFrame.__init__(self, relief=None, state='normal', geom=model, geom_scale=modelScale, frameSize=(-1, 1, -1, 1), pos=modelPos, text=TTLocalizer.PetshopChooserTitle, text_wordwrap=26, text_scale=TTLocalizer.PGUIchooserTitle, text_fg=Vec4(0.36, 0.94, 0.93, 1), text_pos=(0, 1.58))
self.initialiseoptions(PetshopGUI.ChoosePetDlg)
adoptImageList = (model.find('**/AdoptButtonUp'),
model.find('**/AdoptButtonDown'),
model.find('**/AdoptButtonRollover'),
model.find('**/AdoptButtonRollover'))
cancelImageList = (model.find('**/CancelButtonUp'), model.find('**/CancelButtonDown'), model.find('**/CancelButtonRollover'))
cancelIcon = model.find('**/CancelIcon')
pawLImageList = (model.find('**/Paw1Up'), model.find('**/Paw1Down'), model.find('**/Paw1Rollover'))
pawLArrowImageList = model.find('**/Arrow1')
pawRImageList = (model.find('**/Paw2Up'), model.find('**/Paw2Down'), model.find('**/Paw2Rollover'))
pawRArrowImageList = model.find('**/Arrow2')
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, geom=cancelIcon, scale=modelScale, pressEffect=False, command=lambda : messenger.send(doneEvent, [-1]))
self.pawLButton = DirectButton(parent=self, relief=None, image=pawLImageList, geom=pawLArrowImageList, scale=modelScale, pressEffect=False, command=lambda : self.__handlePetChange(-1))
self.pawRButton = DirectButton(parent=self, relief=None, image=pawRImageList, geom=pawRArrowImageList, scale=modelScale, pressEffect=False, command=lambda : self.__handlePetChange(1))
self.okButton = DirectButton(parent=self, relief=None, image=adoptImageList, image3_color=disabledImageColor, scale=modelScale, text=TTLocalizer.PetshopAdopt, text_scale=TTLocalizer.PGUIokButton, text_pos=TTLocalizer.PGUIokButtonPos, text0_fg=text0Color, text1_fg=text1Color, text2_fg=text2Color, text3_fg=text3Color, pressEffect=False, command=lambda : messenger.send(doneEvent, [self.curPet]))
self.moneyDisplay = DirectLabel(parent=self, relief=None, text=str(base.localAvatar.getTotalMoney()), text_scale=0.1, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0.34, 0.12), text_font=ToontownGlobals.getSignFont())
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.accept(localAvatar.uniqueName('bankMoneyChange'), self.__moneyChange)
self.petView = self.attachNewNode('petView')
self.petView.setPos(-0.05, 0, 1.15)
model.removeNode()
self.petSeeds = petSeeds
self.makePetList()
self.showPet()
return
def makePetList(self):
self.numPets = len(self.petSeeds)
self.curPet = 0
self.petDNA = []
self.petName = []
self.petDesc = []
self.petCost = []
for i in xrange(self.numPets):
random.seed(self.petSeeds[i])
zoneId = ZoneUtil.getCanonicalSafeZoneId(base.localAvatar.getZoneId())
name, dna, traitSeed = PetUtil.getPetInfoFromSeed(self.petSeeds[i], zoneId)
cost = PetUtil.getPetCostFromSeed(self.petSeeds[i], zoneId)
traits = PetTraits.PetTraits(traitSeed, zoneId)
traitList = traits.getExtremeTraitDescriptions()
numGenders = len(PetDNA.PetGenders)
gender = i % numGenders
PetDNA.setGender(dna, gender)
self.petDNA.append(dna)
self.petName.append(TTLocalizer.PetshopUnknownName)
descList = []
descList.append(TTLocalizer.PetshopDescGender % PetDNA.getGenderString(gender=gender))
if traitList:
descList.append(TTLocalizer.PetshopDescTrait % traitList[0])
else:
descList.append(TTLocalizer.PetshopDescTrait % TTLocalizer.PetshopDescStandard)
traitList.extend(['',
'',
'',
''])
for trait in traitList[1:4]:
descList.append('\t%s' % trait)
descList.append(TTLocalizer.PetshopDescCost % cost)
self.petDesc.append('\n'.join(descList))
self.petCost.append(cost)
def destroy(self):
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('bankMoneyChange'))
self.petModel.delete()
DirectFrame.destroy(self)
def __handlePetChange(self, nDir):
self.curPet = (self.curPet + nDir) % self.numPets
self.nameLabel.destroy()
self.petModel.delete()
self.descLabel.destroy()
self.showPet()
def showPet(self):
self.nameLabel = DirectLabel(parent=self, pos=(0, 0, 1.35), relief=None, text=self.petName[self.curPet], text_fg=Vec4(0.45, 0, 0.61, 1), text_pos=(0, 0), text_scale=0.08, text_shadow=(1, 1, 1, 1))
self.petModel = Pet.Pet(forGui=1)
self.petModel.setDNA(self.petDNA[self.curPet])
self.petModel.fitAndCenterHead(0.57, forGui=1)
self.petModel.reparentTo(self.petView)
self.petModel.setH(130)
self.petModel.enterNeutralHappy()
self.descLabel = DirectLabel(parent=self, pos=(-0.4, 0, 0.72), relief=None, scale=0.05, text=self.petDesc[self.curPet], text_align=TextNode.ALeft, text_wordwrap=TTLocalizer.PGUIwordwrap, text_scale=TTLocalizer.PGUIdescLabel)
if self.petCost[self.curPet] > base.localAvatar.getTotalMoney():
self.okButton['state'] = DGG.DISABLED
else:
self.okButton['state'] = DGG.NORMAL
return
def __moneyChange(self, money):
self.moneyDisplay['text'] = str(base.localAvatar.getTotalMoney())
def __init__(self, eventDict, petSeeds):
self.eventDict = eventDict
self.mainMenuDoneEvent = 'MainMenuGuiDone'
self.adoptPetDoneEvent = 'AdoptPetGuiDone'
self.returnPetDoneEvent = 'ReturnPetGuiDone'
self.petChooserDoneEvent = 'PetChooserGuiDone'
self.fishGuiDoneEvent = 'MyFishGuiDone'
self.namePickerDoneEvent = 'NamePickerGuiDone'
self.goHomeDlgDoneEvent = 'GoHomeDlgDone'
self.dialog = None
self.dialogStack = []
self.petSeeds = petSeeds
self.timer = ToontownTimer.ToontownTimer()
self.timer.reparentTo(aspect2d)
self.timer.posInTopRightCorner()
self.timer.countdown(PetConstants.PETCLERK_TIMER, self.__timerExpired)
self.doDialog(Dialog_MainMenu)
return
def __timerExpired(self):
messenger.send(self.eventDict['guiDone'], [True])
def destroy(self):
self.destroyDialog()
self.timer.destroy()
del self.timer
self.ignore(self.mainMenuDoneEvent)
self.ignore(self.adoptPetDoneEvent)
self.ignore(self.returnPetDoneEvent)
self.ignore(self.petChooserDoneEvent)
self.ignore(self.fishGuiDoneEvent)
self.ignore(self.namePickerDoneEvent)
self.ignore(self.goHomeDlgDoneEvent)
def destroyDialog(self):
if self.dialog != None:
self.dialog.destroy()
self.dialog = None
return
def popDialog(self):
self.dialogStack.pop()
self.doDialog(self.dialogStack.pop())
def doDialog(self, nDialog):
self.destroyDialog()
self.dialogStack.append(nDialog)
if nDialog == Dialog_MainMenu:
self.acceptOnce(self.mainMenuDoneEvent, self.__handleMainMenuDlg)
self.dialog = self.MainMenuDlg(self.mainMenuDoneEvent)
elif nDialog == Dialog_AdoptPet:
self.acceptOnce(self.adoptPetDoneEvent, self.__handleAdoptPetDlg)
self.dialog = self.AdoptPetDlg(self.adoptPetDoneEvent, self.petSeeds[self.adoptPetNum], self.adoptPetNameIndex)
elif nDialog == Dialog_ChoosePet:
self.acceptOnce(self.petChooserDoneEvent, self.__handleChoosePetDlg)
self.dialog = self.ChoosePetDlg(self.petChooserDoneEvent, self.petSeeds)
elif nDialog == Dialog_ReturnPet:
self.acceptOnce(self.returnPetDoneEvent, self.__handleReturnPetDlg)
self.dialog = self.ReturnPetDlg(self.returnPetDoneEvent)
elif nDialog == Dialog_SellFish:
self.acceptOnce(self.fishGuiDoneEvent, self.__handleFishSellDlg)
self.dialog = FishSellGUI.FishSellGUI(self.fishGuiDoneEvent)
elif nDialog == Dialog_NamePicker:
self.acceptOnce(self.namePickerDoneEvent, self.__handleNamePickerDlg)
self.dialog = self.NamePicker(self.namePickerDoneEvent, self.petSeeds[self.adoptPetNum], gender=self.adoptPetNum % 2)
elif nDialog == Dialog_GoHome:
self.acceptOnce(self.goHomeDlgDoneEvent, self.__handleGoHomeDlg)
self.dialog = self.GoHomeDlg(self.goHomeDlgDoneEvent)
def __handleMainMenuDlg(self, exitVal):
if exitVal == 0:
messenger.send(self.eventDict['guiDone'])
elif exitVal == 1:
self.doDialog(Dialog_SellFish)
elif exitVal == 2:
self.doDialog(Dialog_ChoosePet)
elif exitVal == 3:
self.doDialog(Dialog_ReturnPet)
def __handleFishSellDlg(self, exitVal):
if exitVal == 0:
self.popDialog()
elif exitVal == 1:
self.destroyDialog()
messenger.send(self.eventDict['fishSold'])
def __handleChoosePetDlg(self, exitVal):
if exitVal == -1:
self.popDialog()
else:
self.adoptPetNum = exitVal
self.doDialog(Dialog_NamePicker)
def __handleNamePickerDlg(self, exitVal):
if exitVal == -1:
self.popDialog()
else:
self.adoptPetNameIndex = exitVal
if base.localAvatar.hasPet():
self.doDialog(Dialog_ReturnPet)
else:
self.doDialog(Dialog_AdoptPet)
def __handleAdoptPetDlg(self, exitVal):
if exitVal == 0:
self.popDialog()
elif exitVal == 1:
self.destroyDialog()
messenger.send(self.eventDict['petAdopted'], [self.adoptPetNum, self.adoptPetNameIndex])
messenger.send(self.eventDict['guiDone'])
def __handleGoHomeDlg(self, exitVal):
if exitVal == 0:
messenger.send(self.eventDict['guiDone'])
elif exitVal == 1:
messenger.send(self.eventDict['guiDone'])
place = base.cr.playGame.getPlace()
if place == None:
self.notify.warning('Tried to go home, but place is None.')
return
place.goHomeNow(base.localAvatar.lastHood)
return
def __handleReturnPetDlg(self, exitVal):
if exitVal == 0:
self.popDialog()
elif exitVal == 1:
if self.dialogStack[len(self.dialogStack) - 2] == Dialog_NamePicker:
self.doDialog(Dialog_AdoptPet)
else:
self.destroyDialog()
messenger.send(self.eventDict['petReturned'])
messenger.send(self.eventDict['guiDone'])
|
{
"content_hash": "d58fce570ca50dad224af726840bf2eb",
"timestamp": "",
"source": "github",
"line_count": 528,
"max_line_length": 555,
"avg_line_length": 57.75568181818182,
"alnum_prop": 0.635284472864404,
"repo_name": "Spiderlover/Toontown",
"id": "affb7ee9c825631aaeca090664ffaf8a1e93b7f3",
"size": "30495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/pets/PetshopGUI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
}
|
"""
Support for Russound multizone controllers using RIO Protocol.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.russound_rio/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.media_player import (
SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE, MediaPlayerDevice, PLATFORM_SCHEMA,
MEDIA_TYPE_MUSIC)
from homeassistant.const import (
CONF_HOST, CONF_PORT, STATE_OFF, STATE_ON,
CONF_NAME, EVENT_HOMEASSISTANT_STOP)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['russound_rio==0.1.4']
_LOGGER = logging.getLogger(__name__)
SUPPORT_RUSSOUND = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=9621): cv.port,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Russound RIO platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
from russound_rio import Russound
russ = Russound(hass.loop, host, port)
yield from russ.connect()
# Discover sources
sources = yield from russ.enumerate_sources()
# Discover zones
valid_zones = yield from russ.enumerate_zones()
devices = []
for zone_id, name in valid_zones:
yield from russ.watch_zone(zone_id)
dev = RussoundZoneDevice(russ, zone_id, name, sources)
devices.append(dev)
@callback
def on_stop(event):
"""Shutdown cleanly when hass stops."""
hass.loop.create_task(russ.close())
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_stop)
async_add_devices(devices)
class RussoundZoneDevice(MediaPlayerDevice):
"""Representation of a Russound Zone."""
def __init__(self, russ, zone_id, name, sources):
"""Initialize the zone device."""
super().__init__()
self._name = name
self._russ = russ
self._zone_id = zone_id
self._sources = sources
def _zone_var(self, name, default=None):
return self._russ.get_cached_zone_variable(self._zone_id,
name,
default)
def _source_var(self, name, default=None):
current = int(self._zone_var('currentsource', 0))
if current:
return self._russ.get_cached_source_variable(
current, name, default)
return default
def _source_na_var(self, name):
"""Will replace invalid values with None."""
current = int(self._zone_var('currentsource', 0))
if current:
value = self._russ.get_cached_source_variable(
current, name, None)
if value in (None, "", "------"):
return None
return value
else:
return None
def _zone_callback_handler(self, zone_id, *args):
if zone_id == self._zone_id:
self.schedule_update_ha_state()
def _source_callback_handler(self, source_id, *args):
current = int(self._zone_var('currentsource', 0))
if source_id == current:
self.schedule_update_ha_state()
@asyncio.coroutine
def async_added_to_hass(self):
"""Register callback handlers."""
self._russ.add_zone_callback(self._zone_callback_handler)
self._russ.add_source_callback(self._source_callback_handler)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the zone."""
return self._zone_var('name', self._name)
@property
def state(self):
"""Return the state of the device."""
status = self._zone_var('status', "OFF")
if status == 'ON':
return STATE_ON
elif status == 'OFF':
return STATE_OFF
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_RUSSOUND
@property
def source(self):
"""Get the currently selected source."""
return self._source_na_var('name')
@property
def source_list(self):
"""Return a list of available input sources."""
return [x[1] for x in self._sources]
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media."""
return self._source_na_var('songname')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._source_na_var('artistname')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._source_na_var('albumname')
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._source_na_var('coverarturl')
@property
def volume_level(self):
"""Volume level of the media player (0..1).
Value is returned based on a range (0..50).
Therefore float divide by 50 to get to the required range.
"""
return float(self._zone_var('volume', 0)) / 50.0
def async_turn_off(self):
"""Turn off the zone."""
return self._russ.send_zone_event(self._zone_id,
"ZoneOff")
def async_turn_on(self):
"""Turn on the zone."""
return self._russ.send_zone_event(self._zone_id,
"ZoneOn")
def async_set_volume_level(self, volume):
"""Set the volume level."""
rvol = int(volume * 50.0)
return self._russ.send_zone_event(self._zone_id,
"KeyPress",
"Volume",
rvol)
def async_select_source(self, source):
"""Select the source input for this zone."""
for source_id, name in self._sources:
if name.lower() != source.lower():
continue
return self._russ.send_zone_event(
self._zone_id, "SelectSource", source_id)
|
{
"content_hash": "62d121ae2d5df3cbb6977d839fc27cec",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 31.323943661971832,
"alnum_prop": 0.5912769784172662,
"repo_name": "stefan-jonasson/home-assistant",
"id": "31b04ceb3cdee8a7116aef9fe85dd4fdaae38593",
"size": "6672",
"binary": false,
"copies": "9",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/russound_rio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4056"
},
{
"name": "Python",
"bytes": "8360711"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12658"
}
],
"symlink_target": ""
}
|
"""Base class for iRobot devices."""
from __future__ import annotations
import asyncio
import logging
from homeassistant.components.vacuum import (
ATTR_STATUS,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_RETURNING,
StateVacuumEntity,
VacuumEntityFeature,
)
from homeassistant.const import STATE_IDLE, STATE_PAUSED
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.entity import DeviceInfo, Entity
import homeassistant.util.dt as dt_util
from . import roomba_reported_state
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_CLEANING_TIME = "cleaning_time"
ATTR_CLEANED_AREA = "cleaned_area"
ATTR_ERROR = "error"
ATTR_ERROR_CODE = "error_code"
ATTR_POSITION = "position"
ATTR_SOFTWARE_VERSION = "software_version"
# Commonly supported features
SUPPORT_IROBOT = (
VacuumEntityFeature.BATTERY
| VacuumEntityFeature.PAUSE
| VacuumEntityFeature.RETURN_HOME
| VacuumEntityFeature.SEND_COMMAND
| VacuumEntityFeature.START
| VacuumEntityFeature.STATE
| VacuumEntityFeature.STATUS
| VacuumEntityFeature.STOP
| VacuumEntityFeature.LOCATE
)
STATE_MAP = {
"": STATE_IDLE,
"charge": STATE_DOCKED,
"evac": STATE_RETURNING, # Emptying at cleanbase
"hmMidMsn": STATE_CLEANING, # Recharging at the middle of a cycle
"hmPostMsn": STATE_RETURNING, # Cycle finished
"hmUsrDock": STATE_RETURNING,
"pause": STATE_PAUSED,
"run": STATE_CLEANING,
"stop": STATE_IDLE,
"stuck": STATE_ERROR,
}
class IRobotEntity(Entity):
"""Base class for iRobot Entities."""
_attr_should_poll = False
def __init__(self, roomba, blid):
"""Initialize the iRobot handler."""
self.vacuum = roomba
self._blid = blid
self.vacuum_state = roomba_reported_state(roomba)
self._name = self.vacuum_state.get("name")
self._version = self.vacuum_state.get("softwareVer")
self._sku = self.vacuum_state.get("sku")
@property
def robot_unique_id(self):
"""Return the uniqueid of the vacuum cleaner."""
return f"roomba_{self._blid}"
@property
def unique_id(self):
"""Return the uniqueid of the vacuum cleaner."""
return self.robot_unique_id
@property
def device_info(self):
"""Return the device info of the vacuum cleaner."""
connections = None
if mac_address := self.vacuum_state.get("hwPartsRev", {}).get(
"wlan0HwAddr", self.vacuum_state.get("mac")
):
connections = {(dr.CONNECTION_NETWORK_MAC, mac_address)}
return DeviceInfo(
connections=connections,
identifiers={(DOMAIN, self.robot_unique_id)},
manufacturer="iRobot",
model=self._sku,
name=str(self._name),
sw_version=self._version,
)
@property
def _battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self.vacuum_state.get("batPct")
@property
def _robot_state(self):
"""Return the state of the vacuum cleaner."""
clean_mission_status = self.vacuum_state.get("cleanMissionStatus", {})
cycle = clean_mission_status.get("cycle")
phase = clean_mission_status.get("phase")
try:
state = STATE_MAP[phase]
except KeyError:
return STATE_ERROR
if cycle != "none" and state in (STATE_IDLE, STATE_DOCKED):
state = STATE_PAUSED
return state
async def async_added_to_hass(self):
"""Register callback function."""
self.vacuum.register_on_message_callback(self.on_message)
def new_state_filter(self, new_state):
"""Filter out wifi state messages."""
return len(new_state) > 1 or "signal" not in new_state
def on_message(self, json_data):
"""Update state on message change."""
state = json_data.get("state", {}).get("reported", {})
if self.new_state_filter(state):
self.schedule_update_ha_state()
class IRobotVacuum(IRobotEntity, StateVacuumEntity):
"""Base class for iRobot robots."""
def __init__(self, roomba, blid):
"""Initialize the iRobot handler."""
super().__init__(roomba, blid)
self._cap_position = self.vacuum_state.get("cap", {}).get("pose") == 1
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_IROBOT
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self._battery_level
@property
def state(self):
"""Return the state of the vacuum cleaner."""
return self._robot_state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True # Always available, otherwise setup will fail
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
state = self.vacuum_state
# Roomba software version
software_version = state.get("softwareVer")
# Set properties that are to appear in the GUI
state_attrs = {ATTR_SOFTWARE_VERSION: software_version}
# Set legacy status to avoid break changes
state_attrs[ATTR_STATUS] = self.vacuum.current_state
# Only add cleaning time and cleaned area attrs when the vacuum is
# currently on
if self.state == STATE_CLEANING:
# Get clean mission status
(
state_attrs[ATTR_CLEANING_TIME],
state_attrs[ATTR_CLEANED_AREA],
) = self.get_cleaning_status(state)
# Error
if self.vacuum.error_code != 0:
state_attrs[ATTR_ERROR] = self.vacuum.error_message
state_attrs[ATTR_ERROR_CODE] = self.vacuum.error_code
# Not all Roombas expose position data
# https://github.com/koalazak/dorita980/issues/48
if self._cap_position:
pos_state = state.get("pose", {})
position = None
pos_x = pos_state.get("point", {}).get("x")
pos_y = pos_state.get("point", {}).get("y")
theta = pos_state.get("theta")
if all(item is not None for item in (pos_x, pos_y, theta)):
position = f"({pos_x}, {pos_y}, {theta})"
state_attrs[ATTR_POSITION] = position
return state_attrs
def get_cleaning_status(self, state) -> tuple[int, int]:
"""Return the cleaning time and cleaned area from the device."""
if not (mission_state := state.get("cleanMissionStatus")):
return (0, 0)
if cleaning_time := mission_state.get("mssnM", 0):
pass
elif start_time := mission_state.get("mssnStrtTm"):
now = dt_util.as_timestamp(dt_util.utcnow())
if now > start_time:
cleaning_time = (now - start_time) // 60
if cleaned_area := mission_state.get("sqft", 0): # Imperial
# Convert to m2 if the unit_system is set to metric
if self.hass.config.units.is_metric:
cleaned_area = round(cleaned_area * 0.0929)
return (cleaning_time, cleaned_area)
def on_message(self, json_data):
"""Update state on message change."""
state = json_data.get("state", {}).get("reported", {})
if self.new_state_filter(state):
_LOGGER.debug("Got new state from the vacuum: %s", json_data)
self.schedule_update_ha_state()
async def async_start(self):
"""Start or resume the cleaning task."""
if self.state == STATE_PAUSED:
await self.hass.async_add_executor_job(self.vacuum.send_command, "resume")
else:
await self.hass.async_add_executor_job(self.vacuum.send_command, "start")
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner."""
await self.hass.async_add_executor_job(self.vacuum.send_command, "stop")
async def async_pause(self):
"""Pause the cleaning cycle."""
await self.hass.async_add_executor_job(self.vacuum.send_command, "pause")
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
if self.state == STATE_CLEANING:
await self.async_pause()
for _ in range(0, 10):
if self.state == STATE_PAUSED:
break
await asyncio.sleep(1)
await self.hass.async_add_executor_job(self.vacuum.send_command, "dock")
async def async_locate(self, **kwargs):
"""Located vacuum."""
await self.hass.async_add_executor_job(self.vacuum.send_command, "find")
async def async_send_command(self, command, params=None, **kwargs):
"""Send raw command."""
_LOGGER.debug("async_send_command %s (%s), %s", command, params, kwargs)
await self.hass.async_add_executor_job(
self.vacuum.send_command, command, params
)
|
{
"content_hash": "e4d123869267fca6dafdc5934f7db435",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 86,
"avg_line_length": 34.129629629629626,
"alnum_prop": 0.6119370591427021,
"repo_name": "nkgilley/home-assistant",
"id": "f443f72279f44576ae4b93524f08947f359f4e62",
"size": "9215",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/roomba/irobot_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import os
import sys
import csv
import json
OUTPUT_FORMATS = ('csv', 'json', 'yara', 'data', 'netflow', )
def getHandler(output_format):
output_format = output_format.lower()
if output_format not in OUTPUT_FORMATS:
print("[WARNING] Invalid output format specified.. using CSV")
output_format = 'csv'
handler_format = "OutputHandler_" + output_format
handler_class = getattr(sys.modules[__name__], handler_format)
return handler_class()
class OutputHandler(object):
def print_match(self, fpath, page, name, match):
pass
def print_header(self, fpath):
pass
def print_footer(self, fpath):
pass
def print_error(self, fpath, exception):
print("[ERROR] %s" % (exception))
class OutputHandler_csv(OutputHandler):
def __init__(self):
self.csv_writer = csv.writer(sys.stdout, delimiter = '\t')
def print_match(self, fpath, page, name, match):
self.csv_writer.writerow((fpath, page, name, match))
def print_error(self, fpath, exception):
self.csv_writer.writerow((fpath, '0', 'error', exception))
class OutputHandler_json(OutputHandler):
def print_match(self, fpath, page, name, match):
data = {
'path' : fpath,
'file' : os.path.basename(fpath),
'page' : page,
'type' : name,
'match': match
}
print(json.dumps(data))
def print_error(self, fpath, exception):
data = {
'path' : fpath,
'file' : os.path.basename(fpath),
'type' : 'error',
'exception' : exception
}
print(json.dumps(data))
class OutputHandler_yara(OutputHandler):
def __init__(self):
self.rule_enc = ''.join(chr(c) if chr(c).isupper() or chr(c).islower() or chr(c).isdigit() else '_' for c in range(256))
def print_match(self, fpath, page, name, match):
if name in self.cnt:
self.cnt[name] += 1
else:
self.cnt[name] = 1
string_id = "$%s%d" % (name, self.cnt[name])
self.sids.append(string_id)
string_value = match.replace('\\', '\\\\')
print("\t\t%s = \"%s\"" % (string_id, string_value))
def print_header(self, fpath):
rule_name = os.path.splitext(os.path.basename(fpath))[0].translate(self.rule_enc)
print("rule %s" % (rule_name))
print("{")
print("\tstrings:")
self.cnt = {}
self.sids = []
def print_footer(self, fpath):
cond = ' or '.join(self.sids)
print("\tcondition:")
print("\t\t" + cond)
print("}")
class OutputHandler_data(OutputHandler):
def __init__(self):
self.ioc_records = []
def print_match(self, fpath, page, name, match):
self.ioc_records.append({
'path' : fpath,
'file' : os.path.basename(fpath),
'page' : page,
'type' : name,
'match': match
})
def get_iocs(self):
return self.ioc_records
class OutputHandler_netflow(OutputHandler):
def __init__(self):
print "host 255.255.255.255"
def print_match(self, fpath, page, name, match):
data = {
'type' : name,
'match': match
}
if data["type"] == "IP":
print " or host %s " % data["match"]
|
{
"content_hash": "ddedec115c1ca75cb28e21bf3f93b2ba",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 128,
"avg_line_length": 27.770491803278688,
"alnum_prop": 0.5481109799291618,
"repo_name": "ttufts/ioc_parser",
"id": "c4297033bf65d9df47c939b9fac9a30cbda04967",
"size": "3388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17638"
}
],
"symlink_target": ""
}
|
import sqliteDefaults
import os
import extraction_text_manip
os.system("reset")
conn = sqliteDefaults.get_conn("article_extract_db.db")
conn.execute('''CREATE TABLE IF NOT EXISTS `articles_clean` (
`company_or_sector` TEXT,
`article_url` TEXT,
PRIMARY KEY(company_or_sector, article_url)
);
''')
conn.commit()
company_name = 'Infosys'
articles = sqliteDefaults.verified_select_sqlite(conn,
"SELECT DISTINCT article_url, company_name, article_headline, article_text, article_date \
FROM articles \
WHERE company_name='%s' \
and article_url not in (select article_url from articles_clean)\
ORDER BY article_url ASC\
"%(company_name)
)
conn2 = sqliteDefaults.get_conn("extracted_search_urls.db")
company_dict = {}
temp_table = sqliteDefaults.verified_select_sqlite(conn2,"SELECT DISTINCT ArticleTopic from articleUrls order by ArticleTopic asc")
for i in range(0,len(temp_table)):
company_dict[i+1]=temp_table[i][0]
company_dict[100]="Financial Services sector"
company_dict[200]="IT sector"
company_dict[300]="Energy sector"
company_dict[400]="Consumer goods sector"
company_dict[500]="Automobiles sector"
company_dict[600]="Pharma sector"
company_dict[700]="Construction sector"
company_dict[800]="Cement products sector"
company_dict[900]="Metals sector"
company_dict[1000]="Telecom sector"
company_dict[1100]="Services sector"
company_dict[1200]="Media and Entertainment sector"
company_dict[1300]="Industrial Manufacturing sector"
# print company_dict
## Print list of comapnies and sectors:
print "\n\nList of companies: \n"
for j in company_dict:
if j < 100:
print "%s : %s"%(j, company_dict[j])
print "\n\nList of sectors: \n"
temp_list = list(sorted([j for j in company_dict]))
for j in temp_list:
if j >= 100:
print "%s : %s"%(j, company_dict[j])
print "\n\n\nNumber of articles left to go = %s\n"%len(articles)
x = raw_input("\n\n\nPress 'Enter' to start...")
i=0
while i < len(articles):
os.system("reset")
print "#--------------------------------------INSTRUCTIONS--------------------------------------#"
print "\n\tENTER THE COMPANY NUMBER FOR THE BODY OF THE ARTICLE."
print "\tIF THERE ARE MORE THAN ONE, ENTER THE NUMBERS SEPARETED BY SPACES, E.G.:"
print "\t\t3 4 8"
print "IF THE ARTICLE IS NOT ABOUT ANY OF THE COMPANIES LISTED, ENTER '0' (ZERO)."
print "IF THE ARTICLE IS ____ONLY____ ABOUT THE TOPIC FOR WHICH IT IS LISTED, PRESS ENTER."
print "YOU CAN GO BACK TO THE PREVIOUS ARTICLE WITH 'P', 'p', 'b' OR 'B'. \n\tNote: for every time you go back, you must re-mark the articles.\n"
print "#----------------------------------END OF INSTRUCTIONS-----------------------------------#"
print "\n\n\nArticle #%s of this session:\n\n"%(i+1)
print "Topic:\t\t\t%s\n"%(articles[i][1])
print "URL:\t\t\t%s\n\n"%(articles[i][0])
print "Date:\t\t\t%s\n"%(articles[i][4])
print "Headline:\t\t%s\n\n\n\n"%(articles[i][2])
print "Body:\n\t\t%s\n"%(articles[i][3])
## Print list of comapnies and sectors:
print "\n\nList of companies: \n"
for j in company_dict:
if j < 100:
print "%s : %s"%(j, company_dict[j])
print "\n\nList of sectors: \n"
temp_list = list(sorted([j for j in company_dict]))
for j in temp_list:
if j >= 100:
print "%s : %s"%(j, company_dict[j])
while True:
select = raw_input("\n>")
if not select:
ensure_not_double_query = "SELECT * from articles_clean where article_url='%s'"%(articles[i][0])
if len(sqliteDefaults.verified_select_sqlite(conn,ensure_not_double_query)) == 0:
sqliteDefaults.insert_table_sqlite(conn,
'articles_clean',
('company_or_sector', 'article_url'),
[ (articles[i][1], articles[i][0]) ]
)
i+=1 ## Go to next entry
else:
print "ERROR #1: pair already exists in table"
y = raw_input()
break;
elif select.lower() == 'p' or select.lower() == 'b':
i-=2 ## Go to previous entry
delete_query = "DELETE from articles_clean where article_url='%s'"%(articles[i+1][0])
print delete_query
conn.execute(delete_query)
conn.commit()
## Delete listing for previous entry
break;
else:
## Step 1: convert input string to a list of numbers
nums_str= extraction_text_manip.remove_empty_from_list(select.split(' '))
try:
input_companies_nums=list(set([int(x) for x in nums_str]))
except Exception:
print "\n\tERROR #2: please enter an integer."
continue
# print input_companies_nums
if input_companies_nums == [0]:
try:
sqliteDefaults.insert_table_sqlite(conn,
'articles_clean',
('company_or_sector', 'article_url'),
[ ('None', articles[i][0]) ]
)
break;
except Exception:
print "\tERROR: Duplicate entry for pair: (%s , %s)"%("None", articles[i][0])
y = raw_input()
## Step 2: make a list of the companies / sectors, insert them into the database.
try:
input_companies_and_sectors = [company_dict[j] for j in input_companies_nums]
except Exception:
print "\n\tERROR #3: please enter an a number from the list above."
continue
# print input_companies_and_sectors
for company_or_sector in input_companies_and_sectors:
try:
sqliteDefaults.insert_table_sqlite(conn,
'articles_clean',
('company_or_sector', 'article_url'),
[ (company_or_sector, articles[i][0]) ]
)
except Exception:
print "\tDuplicate entry for pair: (%s , %s)"%(company, articles[i][0])
y = raw_input()
break;
i+=1
|
{
"content_hash": "821fea827415afe51f0df712ed6f8766",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 146,
"avg_line_length": 28.63917525773196,
"alnum_prop": 0.6364290856731462,
"repo_name": "ARDivekar/SearchDistribute",
"id": "eeba0389cf1fb38257f7b99d1735fa3f81e10ecf",
"size": "5556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "other/Legacy/article_clean.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3791384"
},
{
"name": "Python",
"bytes": "608509"
}
],
"symlink_target": ""
}
|
"""Contains WmsGetMapRequest."""
import logging
import StringIO
import wms.ogc.common.geom as geom
import wms.ogc.common.image_specs as image_specs
import wms.ogc.common.tiles as tiles
import wms.ogc.common.utils as utils
OPERATIONS = set(["GetMap", "GetCapabilities"])
# These are all of the 1.3.0 official codes. 1.1.1's are the same
# except with 2 fewer, as noted below. (Any other errors are fine,
# they just don't get a code.)
_INVALID_FORMAT = "InvalidFormat"
_INVALID_CRS = "InvalidCRS"
_LAYER_NOT_DEFINED = "LayerNotDefined"
_STYLE_NOT_DEFINED = "StyleNotDefined"
_LAYER_NOT_QUERYABLE = "LayerNotQueryable"
# not 1.1.1
_INVALID_POINT = "InvalidPoint"
_CURRENT_UPDATE_SEQUENCE = "CurrentUpdateSequence"
_INVALID_UPDATE_SEQUENCE = "InvalidUpdateSequence"
_MISSING_DIMENSION_VALUE = "MissingDimensionValue"
_INVALID_DIMENSION_VALUE = "InvalidDimensionValue"
# not 1.1.1
# exported, for wms_130
OPERATION_NOT_SUPPORTED = "OperationNotSupported"
# Get logger
logger = logging.getLogger("wms_maps")
class WmsGetMapRequest(object):
"""Base class of WMS GetMapRequest objects particular to their version."""
def __init__(self, layer_obj, parameters, required_param_names):
self.layer_obj = layer_obj
self.parameters = parameters
self.required_param_names = required_param_names
def GenerateOutputCommon(self):
"""Produces the WMS bitmap image. Used by both versions.
Returns:
The image composed of tiles.
"""
logger.debug("Generating the bitmap image")
image_format = utils.GetValue(self.parameters, "format")
image_spec = image_specs.GetImageSpec(image_format)
# TRANSPARENT parameter from GIS client's.
# It can take "TRUE"/"FALSE" values.
# Default value is "FALSE" as per spec.
# GIS clients either send TRANSPARENT(=TRUE) attribute or don't send it
# at all, if it's not required.
# If this parameter is absent, GetValue() method returns None.
# We default it to "FALSE" if this parameter is not available in the
# GIS client requests.
is_transparent = utils.GetValue(self.parameters, "transparent")
if not is_transparent:
is_transparent = "FALSE"
# BGCOLOR parameter is a string that specifies the colour to be
# used as the background (non-data) pixels of the map.
# The format is 0xRRGGBB.The default value is 0xFFFFFF
bgcolor = utils.GetValue(self.parameters, "bgcolor")
if bgcolor:
# Convert HEX string to python tuple.
# Ignore the 0x at the beginning of the hex string.
# Otherwise str.decode("hex") will throw
# "TypeError: Non-hexadecimal digit found" error.
if bgcolor[:2] == "0x":
bgcolor = bgcolor[2:]
bgcolor = tuple(ord(c) for c in bgcolor.decode("hex"))
else:
bgcolor = tiles.ALL_WHITE_PIXELS
# Add the user requested image format, transparency
# and bgcolor to the layer python object.
self.requested_layer_obj.image_format = image_format
self.requested_layer_obj.is_transparent = is_transparent
self.requested_layer_obj.bgcolor = bgcolor
im_user = tiles.ProduceImage(
self.requested_layer_obj,
# There's a weird border artifact that MapServer shows when we
# ask for more than 360 laterally, and which natively we show
# fine. It's probably not us, but there's room for doubt. If
# it was our calculations, it's likely the error would be most
# extreme for a large final image, and few tiles.
self.user_log_rect,
self.user_width, self.user_height)
buf = StringIO.StringIO()
im_user.save(buf, image_spec.pil_format, **im_user.info)
logger.debug("Image content type is :%s", image_spec.content_type)
headers = [("Content-Type", image_spec.content_type)]
logger.debug("Done generating the bitmap image")
return headers, buf.getvalue()
def _ServiceException(self, code, message):
# Call down to the derived class for its specific form of
# ServiceException.
self._ServiceExceptionImpl(code, message)
def _EnsureRequiredParameters(self):
"""Mechanically produces a ServiceException if a required parm. is missing.
Checks if the required parameter is available and is non-empty.
"""
for reqd in self.required_param_names:
# GIS clients send an empty value for "styles" parameter by default.
# Empty values for styles parameter should be accepted.
if reqd == "styles":
continue
if utils.GetValue(self.parameters, reqd) is None:
error_message = "Missing required parameter: \'%s\'" % reqd
logger.debug(error_message)
self._ServiceException(None, error_message)
def _CheckParameters(self):
"""Checks if required parameters are available in the request."""
self._EnsureRequiredParameters()
# presence is established - now we're looking in more detail.
lite_checkers = {
# version already checked by this point
# request already checked by this point
"layers": self._CheckLayers,
"styles": self._CheckStyles,
"crs": self._CheckCrs,
"srs": self._CheckSrs,
"bbox": self._CheckBbox,
"width": self._CheckWidth,
"height": self._CheckHeight,
"format": self._CheckFormat
}
for name, checker in lite_checkers.iteritems():
parameter_value = utils.GetValue(self.parameters, name)
checker(parameter_value)
def _ProcessCommon(self):
"""Processes the GetMapRequest parameters and prepares the GEE tile URL.
Returns:
Nothing, but might raise a ServiceException of the right version.
"""
logger.debug("Processing common information for the GetMap request")
self._CheckParameters()
# We should by rights check that CRS/SRS matches the layer's, but
# we don't because (at least in jeffdonner's experience with
# 'uppermidwest.map') we have to let MapServer feed us ones we
# don't support in order to work with it. Luckily MapServer
# produces the right result despite this confusion.
# TODO: nice-to-have: add format flexibility (raster ->
# png, vectorrastermap -> jpg) since PIL can transform them. At
# least, ImageMaps could be png.
logger.debug("Done processing common information for the GetMap request")
####################################################################
# The _CheckXXX methods raise a ServiceException if they detect
# something wrong; otherwise they do nothing, and should have no
# side effects (except on caching the server's layers).
####################################################################
def _CheckLayers(self, layers_text):
"""Checks if the requested layer is available."""
logger.debug("Processing the target layer")
layer_names = layers_text.split(",")
if 1 < len(layer_names):
# We only handle a single layer at a time (jeffdonner is not sure
# what handling multiple ones would mean - compositing them per
# STYLES perhaps? Sending them in sequence, somehow?)
logger.warning(
"Received request for multiple layers. "
"We only handle one at a time, the first.")
# Just check the first, that's all we'll use.
layer_name = layer_names[0]
server_layers_by_name = self.layer_obj.GetLayers(
utils.GetValue(self.parameters, "server-url"),
utils.GetValue(self.parameters, "TargetPath"))
self.requested_layer_obj = server_layers_by_name.get(layer_name, None)
if not self.requested_layer_obj:
logger.error("Layer %s doesn't exist", layer_name)
self._ServiceExceptionImpl(_LAYER_NOT_DEFINED,
"No layer matching \'%s\' found" % layer_name)
# By this point we know it's there.
logger.debug("Target layer: " + layer_name)
def _CheckStyles(self, styles_text):
pass
def _CheckCrs(self, crs_text):
if self.parameters["version"].startswith("1.3."):
self.projection_type = crs_text
def _CheckSrs(self, srs_text):
if self.parameters["version"].startswith("1.1.1"):
self.projection_type = srs_text
def _CheckBbox(self, bbox_text):
"""Checks if the bounding box information is proper."""
coords = bbox_text.split(",")
if len(coords) != 4:
self._ServiceExceptionImpl(None, "Expected 4 BBOX coordinates")
# BBOX inputs are provided in (xy0.y, xy0.x, xy1.y, xy1.x) format
# for a flat projection when the WMS version is 1.3.0, which
# otherwise is in (xy0.x, xy0.y, xy1.x, xy1.y) format.
if (self.requested_layer_obj.projection.name == "flat" and
self.parameters["version"].startswith("1.3.")):
self.user_log_rect = geom.Rect(float(coords[1]), float(coords[0]),
float(coords[3]), float(coords[2]))
else:
self.user_log_rect = geom.Rect(float(coords[0]), float(coords[1]),
float(coords[2]), float(coords[3]))
# The requirement on relative sizes of the params in BBOX, even
# the equality part, is per the spec.
if self.user_log_rect.y1 <= self.user_log_rect.y0:
self._ServiceExceptionImpl(None, "BBOX.ymax <= BBOX.ymin")
if self.user_log_rect.x1 <= self.user_log_rect.x0:
self._ServiceExceptionImpl(None, "BBOX.xmax <= BBOX.xmin")
def _CheckIsPositiveInt(self, maybe_numeric_text, param_name):
"""Checks if the parameter is positive integer."""
must_raise = False
try:
value = int(maybe_numeric_text)
# Allowing 0 might be a good test of our math. But not negative.
must_raise = value < 0
except ValueError:
must_raise = True
if must_raise:
self._ServiceExceptionImpl(None, param_name + " must be positive integer")
def _CheckWidth(self, width_text):
self._CheckIsPositiveInt(width_text, "WIDTH")
self.user_width = int(width_text)
def _CheckHeight(self, height_text):
self._CheckIsPositiveInt(height_text, "HEIGHT")
self.user_height = int(height_text)
def _CheckFormat(self, image_format):
if not image_specs.IsKnownFormat(image_format):
self._ServiceExceptionImpl(
_INVALID_FORMAT,
"Unsupported format \'%s\'" % image_format)
def main():
map_req_obj = WmsGetMapRequest({"format": "image/jpeg"}, ["format"], [])
is_valid_format = map_req_obj.GenerateOutputCommon()
print is_valid_format
if __name__ == "__main__":
main()
|
{
"content_hash": "c0c0ad9124ee2b54fe97693de68462cb",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 80,
"avg_line_length": 37.10320284697509,
"alnum_prop": 0.6654517552273164,
"repo_name": "iparanza/earthenterprise",
"id": "d7ad1ef3e82db7bb4d7a2f9e985dc071fd2e4607",
"size": "11023",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "earth_enterprise/src/server/wsgi/wms/ogc/implementation/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "4637"
},
{
"name": "Batchfile",
"bytes": "18319"
},
{
"name": "C",
"bytes": "150115"
},
{
"name": "C++",
"bytes": "11492121"
},
{
"name": "CSS",
"bytes": "379769"
},
{
"name": "HTML",
"bytes": "4044528"
},
{
"name": "Java",
"bytes": "9028"
},
{
"name": "JavaScript",
"bytes": "106222565"
},
{
"name": "Makefile",
"bytes": "3425"
},
{
"name": "PLpgSQL",
"bytes": "13346"
},
{
"name": "Perl",
"bytes": "432631"
},
{
"name": "Perl6",
"bytes": "3506"
},
{
"name": "Prolog",
"bytes": "1423"
},
{
"name": "Protocol Buffer",
"bytes": "457500"
},
{
"name": "Python",
"bytes": "3720319"
},
{
"name": "QMake",
"bytes": "5293"
},
{
"name": "Roff",
"bytes": "31717"
},
{
"name": "Shell",
"bytes": "191537"
},
{
"name": "TeX",
"bytes": "70"
}
],
"symlink_target": ""
}
|
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("Ridge" , "freidman2" , "postgresql")
|
{
"content_hash": "668de6b2cc74c8ab03dbcda4725588ba",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 66,
"avg_line_length": 31.5,
"alnum_prop": 0.7619047619047619,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "73147cfde4ee1784fa6d8e8b1faceefb2fc1c876",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regression/freidman2/ws_freidman2_Ridge_postgresql_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
}
|
"""
WSGI config for bugTracking project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bugTracking.settings")
application = get_wsgi_application()
|
{
"content_hash": "dbb8372dd8b2d365dd9d2d1ea1757e18",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.9375,
"alnum_prop": 0.7744360902255639,
"repo_name": "YikaiLee/bugTracker",
"id": "83782d29c3e9b0f273ad0b84e0e1c81d273f8b14",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bugTracking/bugTracking/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11495"
},
{
"name": "Python",
"bytes": "10274"
}
],
"symlink_target": ""
}
|
"""
sentry.management.commands.create_sample_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.core.management.base import BaseCommand, CommandError, make_option
class Command(BaseCommand):
help = 'Creates a sample event in Sentry (if applicable)'
option_list = BaseCommand.option_list + (
make_option('--project', dest='project', help="project ID or team-slug/project-slug"),
make_option('--platform', dest='platform'),
)
def handle(self, **options):
from django.conf import settings
from sentry.models import Project
from sentry.utils.samples import create_sample_event
if not options['project']:
project = Project.objects.get(id=settings.SENTRY_PROJECT)
else:
if options['project'].isdigit():
project = Project.objects.get(id=options['project'])
elif '/' in options['project']:
t_slug, p_slug = options['project'].split('/', 1)
project = Project.objects.get(slug=p_slug, team__slug=t_slug)
else:
raise CommandError('Project must be specified as team-slug/project-slug or a project id')
platform = options['platform']
event = create_sample_event(project, platform)
if not event:
raise CommandError('Unable to create an event for platform %r' % (str(platform),))
self.stdout.write('Event created: %s' % (event.group.get_absolute_url(),))
|
{
"content_hash": "5c0754c77f70bc6069368fe9b0fcf97f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 105,
"avg_line_length": 39.285714285714285,
"alnum_prop": 0.6206060606060606,
"repo_name": "BayanGroup/sentry",
"id": "b617a1ce31f5fb3c0cac608d2b74f82f5c080218",
"size": "1650",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/sentry/management/commands/create_sample_event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "156607"
},
{
"name": "HTML",
"bytes": "188852"
},
{
"name": "JavaScript",
"bytes": "443758"
},
{
"name": "Makefile",
"bytes": "4647"
},
{
"name": "Python",
"bytes": "7069971"
}
],
"symlink_target": ""
}
|
'''
Created on 11 Nov 2014
@author: maxz
'''
from observable import Observable
class Updateable(Observable):
"""
A model can be updated or not.
Make sure updates can be switched on and off.
"""
_updates = True
def __init__(self, *args, **kwargs):
super(Updateable, self).__init__(*args, **kwargs)
def update_model(self, updates=None):
"""
Get or set, whether automatic updates are performed. When updates are
off, the model might be in a non-working state. To make the model work
turn updates on again.
:param bool|None updates:
bool: whether to do updates
None: get the current update state
"""
if updates is None:
p = getattr(self, '_highest_parent_', None)
if p is not None:
self._updates = p._updates
return self._updates
assert isinstance(updates, bool), "updates are either on (True) or off (False)"
p = getattr(self, '_highest_parent_', None)
if p is not None:
p._updates = updates
self._updates = updates
self.trigger_update()
def toggle_update(self):
self.update_model(not self.update_model())
def trigger_update(self, trigger_parent=True):
"""
Update the model from the current state.
Make sure that updates are on, otherwise this
method will do nothing
:param bool trigger_parent: Whether to trigger the parent, after self has updated
"""
if not self.update_model() or (hasattr(self, "_in_init_") and self._in_init_):
#print "Warning: updates are off, updating the model will do nothing"
return
self._trigger_params_changed(trigger_parent)
|
{
"content_hash": "435aa9ef9f6c19ee2476dbde59aeafb5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 89,
"avg_line_length": 32.43636363636364,
"alnum_prop": 0.5997757847533632,
"repo_name": "strongh/GPy",
"id": "593f3c0509027c769a3f96fc386e682000c0d5d4",
"size": "1784",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "GPy/core/parameterization/updateable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "707"
},
{
"name": "C++",
"bytes": "1605"
},
{
"name": "Python",
"bytes": "1387764"
}
],
"symlink_target": ""
}
|
"""This example gets all disapproved ads for a given campaign.
To add an ad, run add_ads.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
PAGE_SIZE = 100
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201806')
# Construct selector and get all ads for a given ad group.
offset = 0
selector = {
'fields': ['Id', 'PolicySummary'],
'predicates': [
{
'field': 'AdGroupId',
'operator': 'EQUALS',
'values': [ad_group_id]
},
{
'field': 'CombinedApprovalStatus',
'operator': 'EQUALS',
'values': 'DISAPPROVED'
}
],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
more_pages = True
disapproved_count = 0
# Display results.
while more_pages:
page = ad_group_ad_service.get(selector)
if 'entries' in page:
for ad in page['entries']:
disapproved_count += 1
policy_summary = ad['policySummary']
print ('Ad with id "%s" was disapproved with the following policy '
'topic entries:' % ad['ad']['id'])
# Display the policy topic entries related to the ad disapproval.
for policy_topic_entry in policy_summary['policyTopicEntries']:
print ' topic ID: %s, topic name: %s, Help Center URL: %s' % (
policy_topic_entry['policyTopicId'],
policy_topic_entry['policyTopicName'],
policy_topic_entry['policyTopicHelpCenterUrl'])
# Display the attributes and values that triggered the policy topic.
policy_topic_evidences = policy_topic_entry['policyTopicEvidences']
if policy_topic_evidences:
for evidence in policy_topic_entry['policyTopicEvidences']:
print (' evidence type: %s'
% evidence['policyTopicEvidenceType'])
evidence_text_list = evidence['evidenceTextList']
if evidence_text_list:
for index, evidence_text in enumerate(evidence_text_list):
print ' evidence text[%d]: %s' % (index, evidence_text)
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
print '%d disapproved ads were found.' % disapproved_count
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
{
"content_hash": "4fc53de52c64d54da6f1062a365d02cf",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 80,
"avg_line_length": 33.229885057471265,
"alnum_prop": 0.612590799031477,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "a9fbc25832cfbd4776c01937ae8c85c2e3fbc62c",
"size": "3513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201806/campaign_management/get_all_disapproved_ads.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
}
|
from .sub_resource import SubResource
class BackendAddressPool(SubResource):
"""Pool of backend IP addresses.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar backend_ip_configurations: Gets collection of references to IP
addresses defined in network interfaces.
:vartype backend_ip_configurations: list of
:class:`NetworkInterfaceIPConfiguration
<azure.mgmt.network.v2017_06_01.models.NetworkInterfaceIPConfiguration>`
:ivar load_balancing_rules: Gets load balancing rules that use this
backend address pool.
:vartype load_balancing_rules: list of :class:`SubResource
<azure.mgmt.network.v2017_06_01.models.SubResource>`
:ivar outbound_nat_rule: Gets outbound rules that use this backend address
pool.
:vartype outbound_nat_rule: :class:`SubResource
<azure.mgmt.network.v2017_06_01.models.SubResource>`
:param provisioning_state: Get provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'backend_ip_configurations': {'readonly': True},
'load_balancing_rules': {'readonly': True},
'outbound_nat_rule': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'outbound_nat_rule': {'key': 'properties.outboundNatRule', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, provisioning_state=None, name=None, etag=None):
super(BackendAddressPool, self).__init__(id=id)
self.backend_ip_configurations = None
self.load_balancing_rules = None
self.outbound_nat_rule = None
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
{
"content_hash": "9cb735f5451a0caebe1254212b4e9e27",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 128,
"avg_line_length": 43.23728813559322,
"alnum_prop": 0.6675813406507252,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "011aa60e2ff5b4ffd0f055eaadc857d0f302563a",
"size": "3025",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/backend_address_pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
def get_data(entry, source_database, config):
if entry == 'structures':
return {
'description': "A structure.",
"properties": {
"id": "An entry's ID",
"modification_date": "A date representing when the entry was last modified.",
"elements": "names of the elements found in the structure.",
"nelements": "number of elements.",
"chemical_formula": "The chemical formula for a structure.",
"formula_prototype": ("The formula prototype obtained by sorting "
"the elements by the occurence number in the reduced chemical formula and "
"replace them with subsequent alphabet letters A, B, C and so on.")
},
"formats": ["json"],
"output_fields_by_format": {
"json": [
"id",
"modification_date",
"elements",
"nelements",
"chemical_formula",
"formula_prototype",
]
}
}
elif entry == 'calculations':
return {
'description': "A computation.",
"properties": {
"id": "An entry's ID.",
"modification_date": "A date representing when the entry was last modified.",
},
"formats": ["json"],
"output_fields_by_format": {
"json": [
"id",
"modification_date",
]
}
}
else:
raise Exception("Unknown entry type requested.")
|
{
"content_hash": "be1c0adab47b46c4e949a2ad92799ee9",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 113,
"avg_line_length": 40.02272727272727,
"alnum_prop": 0.43668370244179444,
"repo_name": "rartino/optimadeapi",
"id": "98cd48d6a6e5b3bcbe69b54da683e84132e0a45c",
"size": "1973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/optimadeapi/entry_introspection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13602"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _ # noqa
import horizon
from openstack_dashboard.dashboards.admin import dashboard
class Quotas(horizon.Panel):
name = _("System Info")
slug = 'info'
dashboard.Admin.register(Quotas)
|
{
"content_hash": "665c95f7ff40e209e9ae798f665f98e6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 19.384615384615383,
"alnum_prop": 0.746031746031746,
"repo_name": "deepakselvaraj/federated-horizon",
"id": "413903143cafe9e61b421eb80fbe2408646d5ace",
"size": "1061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/info/panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
scrapy.linkextractors
This package contains a collection of Link Extractors.
For more info see docs/topics/link-extractors.rst
"""
import re
from six.moves.urllib.parse import urlparse
from parsel.csstranslator import HTMLTranslator
from w3lib.url import canonicalize_url
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.url import (
url_is_from_any_domain, url_has_any_extension,
)
# common file extensions that are not followed if they occur in links
IGNORED_EXTENSIONS = [
# images
'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',
# audio
'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',
# video
'3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
'm4a', 'm4v',
# office suites
'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',
'odp',
# other
'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',
]
_re_type = type(re.compile("", 0))
_matches = lambda url, regexs: any(r.search(url) for r in regexs)
_is_valid_url = lambda url: url.split('://', 1)[0] in {'http', 'https', 'file'}
class FilteringLinkExtractor(object):
_csstranslator = HTMLTranslator()
def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,
restrict_xpaths, canonicalize, deny_extensions, restrict_css):
self.link_extractor = link_extractor
self.allow_res = [x if isinstance(x, _re_type) else re.compile(x)
for x in arg_to_iter(allow)]
self.deny_res = [x if isinstance(x, _re_type) else re.compile(x)
for x in arg_to_iter(deny)]
self.allow_domains = set(arg_to_iter(allow_domains))
self.deny_domains = set(arg_to_iter(deny_domains))
self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))
self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,
arg_to_iter(restrict_css)))
self.canonicalize = canonicalize
if deny_extensions is None:
deny_extensions = IGNORED_EXTENSIONS
self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)}
def _link_allowed(self, link):
if not _is_valid_url(link.url):
return False
if self.allow_res and not _matches(link.url, self.allow_res):
return False
if self.deny_res and _matches(link.url, self.deny_res):
return False
parsed_url = urlparse(link.url)
if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):
return False
if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):
return False
return True
def matches(self, url):
if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
return False
allowed = (regex.search(url) for regex in self.allow_res) if self.allow_res else [True]
denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []
return any(allowed) and not any(denied)
def _process_links(self, links):
links = [x for x in links if self._link_allowed(x)]
if self.canonicalize:
for link in links:
link.url = canonicalize_url(link.url)
links = self.link_extractor._process_links(links)
return links
def _extract_links(self, *args, **kwargs):
return self.link_extractor._extract_links(*args, **kwargs)
# Top-level imports
from .lxmlhtml import LxmlLinkExtractor as LinkExtractor
|
{
"content_hash": "03a96efc2a712455a73935ad44e5c392",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 95,
"avg_line_length": 34.84070796460177,
"alnum_prop": 0.6121412242824485,
"repo_name": "ssteo/scrapy",
"id": "2d7115cc50475bf962af3ab38340ebd65dcc182b",
"size": "3937",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scrapy/linkextractors/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Groff",
"bytes": "2008"
},
{
"name": "HTML",
"bytes": "1961"
},
{
"name": "Python",
"bytes": "1394152"
},
{
"name": "Shell",
"bytes": "258"
}
],
"symlink_target": ""
}
|
from string import Template
from flask_mail import Mail, Message
from ifsApprover import db, config
from ifsApprover.Log import get_logger
from ifsApprover.web import app
#
# mail templates
# (subject, body)
#
NEW_IMAGE_MAIL = (u"Neues IfS ($amount)!", u"""\
Hallo,
$owner hat ein neues Image from Space hochgeladen.
$url
Bitte prüfe es schalte es ggf. frei.
Es gibt insgesamt $amount zu bearbeitende Bilder.
Viele Grüße,
IFS
""")
IMAGE_APPROVED = (u"Bild freigeschaltet :)", u"""\
Hallo,
dein Bild "$image" ist jetzt freigeschaltet.
Viele Grüße,
IFS
""")
IMAGE_REJECTED = (u"Bild zurückgewiesen :(", u"""\
Hallo,
dein Bild "$image" wurde NICHT freigeschaltet. Grund:
$reason
Viele Grüße,
IFS
""")
IMAGE_ACTION_ADMIN = (u"Bild $action ($amount)", u"""\
Hallo,
$admin hat das Bild "$image" von "$owner" bearbeitet. Ergebnis: $action
Es sind noch $amount Bilder offen.
Viele Grüße,
IFS
""")
logger = get_logger("mail")
def send_new_image_mail(ifs_image_owner):
login_list = list(map(lambda entry: entry["login"], db.get_users_list()))
image_pending = db.get_pending_images_count()
subject = Template(NEW_IMAGE_MAIL[0]).substitute(amount=image_pending)
body = Template(NEW_IMAGE_MAIL[1]).substitute(owner=ifs_image_owner, url=config["WEB_UI_URL"], amount=image_pending)
logger.info("Sending new-image-mail (image from %s)" % ifs_image_owner)
_send(recipients=login_list, subject=subject, body=body)
def send_approve_mail(ifs_image_owner, image_filename, user_login):
body = Template(IMAGE_APPROVED[1]).substitute(image=image_filename)
logger.info("Sending approve-mail (image %s from %s)" % (image_filename, ifs_image_owner))
_send(recipients=[ifs_image_owner], subject=IMAGE_APPROVED[0], body=body)
_send_other_admin_notification("APPROVED", ifs_image_owner, image_filename, user_login)
def send_reject_mail(ifs_image_owner, image_filename, reject_reason, user_login):
body = Template(IMAGE_REJECTED[1]).substitute(image=image_filename, reason=reject_reason)
logger.info("Sending reject-mail (image %s from %s)" % (image_filename, ifs_image_owner))
_send(recipients=[ifs_image_owner], subject=IMAGE_REJECTED[0], body=body)
_send_other_admin_notification("REJECT", ifs_image_owner, image_filename, user_login)
def _send_other_admin_notification(action, ifs_image_owner, image_filename, user_login):
admins = db.get_users_list()
other_admins = []
for user in admins:
if user["login"] == user_login.lower() or db.is_system_user(user):
continue
other_admins.append(user["login"])
if len(other_admins) == 0:
logger.info("There are no other admins to receive notification.")
return
image_pending = db.get_pending_images_count()
subject = Template(IMAGE_ACTION_ADMIN[0]).substitute(action=action, amount=image_pending)
body = Template(IMAGE_ACTION_ADMIN[1]).substitute(image=image_filename, owner=ifs_image_owner, admin=user_login, action=action,
amount=image_pending)
logger.info("Sending notification to other admins (%s)" % other_admins)
_send(recipients=other_admins, subject=subject, body=body)
def _send(recipients, subject, body):
mail = Mail(app)
msg = Message(subject, sender=config["MAIL_SENDER"])
msg.charset = "utf8"
msg.recipients = recipients
msg.body = body
with app.app_context():
if config["MAIL_SUPPRESS_SEND"] is True:
logger.debug(f"Mail (not sent):\n{msg}")
else:
mail.send(msg)
|
{
"content_hash": "4f81a7f84ae622b56e6158d4cc45828b",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 131,
"avg_line_length": 29.883333333333333,
"alnum_prop": 0.6837702175125489,
"repo_name": "ktt-ol/ifs-approver",
"id": "b2ce34fc99462e615bf4fee8b7ce9f0e348d9535",
"size": "3619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/ifsApprover/Mail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1713"
},
{
"name": "HTML",
"bytes": "6753"
},
{
"name": "JavaScript",
"bytes": "21239"
},
{
"name": "Python",
"bytes": "32136"
}
],
"symlink_target": ""
}
|
import array
import struct
import zlib
from enum import Enum
from pkg_resources import parse_version
from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
if parse_version(ks_version) < parse_version('0.7'):
raise Exception(
"Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
from .cfg_2 import Cfg2
from .header import Header
from .data import Data
from .cfg_3 import Cfg3
from .command import Command
def _kaitai_repr(self):
_repr_list = []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str, bytes, bool):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def _enum_repr(self):
_repr_list = []
for item in ("name", "value"):
_r = getattr(self, item)
_repr_list.append("=".join((item, _r.__repr__())))
return "<" + self.__class__.__name__[:-4] + " |" + ", ".join(_repr_list) + ">"
def _kaitai_show(self, parent_path=' '):
if type(self) in (int, float, str, bytes, bool):
print(" == ".join((parent_path, self.__repr__())))
elif type(self) == list:
for i, item in enumerate(self):
try:
item.show('{}[{}]'.format(parent_path,i))
except:
_kaitai_show(item,'{}[{}]'.format(parent_path,i))
else:
for item in sorted(vars(self)):
if not item.startswith('_'):
_r = getattr(self, item)
try:
_r.show(parent_path+'.'+item)
except:
_kaitai_show(_r,parent_path+'.'+item)
def _enum_show(self, parent_path=' '):
for item in ("name", "value"):
_r = getattr(self, item)
print(parent_path+'.'+item+' == '+_r.__repr__())
KaitaiStruct.__repr__ = _kaitai_repr
Enum.__repr__ = _enum_repr
KaitaiStruct.show = _kaitai_show
Enum.show = _enum_show
#msg.show()
class PhasorMessage(KaitaiStruct):
def __repr__(self):
_repr_list = [
"time=" + str(self.time)] if self.fracsec.fraction_of_second else []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str, bytes):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def show(self, parent_path=' '):
if self.fracsec.fraction_of_second:
print(parent_path+'.time == '+str(self.time))
_kaitai_show(self, parent_path)
def __init__(self, _io, _parent=None, _root=None, _mini_cfgs=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._pkt_pos = self._io.pos()
self.sync = self._root.SyncWord(self._io, self, self._root)
self.framesize = self._io.read_u2be()
self.idcode = self._io.read_u2be()
self._mini_cfg = _mini_cfgs.mini_cfg[self.idcode]
self.soc = self._io.read_u4be()
self.fracsec = self._root.Fracsec(self._io, self, self._root,
self._mini_cfg.time_base.time_base if self._mini_cfg else None)
_on = self.sync.frame_type.value
if _on == 0:
if self._mini_cfg:
self.data = Data(self._io, _mini_cfg=self._mini_cfg)
else:
self.data = self._io.read_bytes((self.framesize - 16))
elif _on == 3:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg2(io)
_mini_cfgs.add_cfg(self.idcode, self.data)
elif _on == 4:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Command(io)
elif _on == 5:
_mini_cfgs.add_cfg(self.raw_pkt)
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg3(io)
elif _on == 2:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg2(io)
elif _on == 1:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Header(io)
self.chk = self._io.read_u2be()
class SyncWord(KaitaiStruct):
class FrameTypeEnum(Enum):
data = 0
header = 1
cfg1 = 2
cfg2 = 3
cmd = 4
cfg3 = 5
class VersionNumberEnum(Enum):
c_37_118_2005 = 1
c_37_118_2_2011 = 2
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.ensure_fixed_contents(struct.pack('1b', -86))
self.reserved = self._io.read_bits_int(1) != 0
self.frame_type = self._root.SyncWord.FrameTypeEnum(
self._io.read_bits_int(3))
self.version_number = self._root.SyncWord.VersionNumberEnum(
self._io.read_bits_int(4))
class Fracsec(KaitaiStruct):
def __repr__(self):
_repr_list = ["fraction_of_second=" +
str(self.fraction_of_second)] if self.fraction_of_second else []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def show(self, parent_path):
if self.fraction_of_second:
print(parent_path+'.fraction_of_second == ' + str(self.fraction_of_second))
_kaitai_show(self, parent_path)
class LeapSecondDirectionEnum(Enum):
add = 0
delete = 1
class MsgTqEnum(Enum):
normal_operation_clock_locked_to_utc_traceable_source = 0
time_within_10_to_9_s_of_utc = 1
time_within_10_to_8_s_of_utc = 2
time_within_10_to_7_s_of_utc = 3
time_within_10_to_6_s_of_utc = 4
time_within_10_to_5_s_of_utc = 5
time_within_10_to_4_s_of_utc = 6
time_within_10_to_3_s_of_utc = 7
time_within_10_to_2_s_of_utc = 8
time_within_10_to_1_s_of_utc = 9
time_within_1_s_of_utc = 10
time_within_10_s_of_utc = 11
fault_clock_failure_time_not_reliable = 15
def __init__(self, _io, _parent=None, _root=None, _time_base=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._time_base = _time_base
self.reserved = self._io.read_bits_int(1) != 0
self.leap_second_direction = self._root.Fracsec.LeapSecondDirectionEnum(
self._io.read_bits_int(1))
self.leap_second_occurred = self._io.read_bits_int(1) != 0
self.leap_second_pending = self._io.read_bits_int(1) != 0
self.time_quality = self._root.Fracsec.MsgTqEnum(
self._io.read_bits_int(4))
self.raw_fraction_of_second = self._io.read_bits_int(24)
@property
def fraction_of_second(self):
if hasattr(self, '_m_fraction_of_second'):
return self._m_fraction_of_second if hasattr(self, '_m_fraction_of_second') else None
if self._time_base:
self._m_fraction_of_second = self.raw_fraction_of_second / self._time_base
return self._m_fraction_of_second if hasattr(self, '_m_fraction_of_second') else None
@property
def time(self):
if hasattr(self, '_m_time'):
return self._m_time if hasattr(self, '_m_time') else None
self._m_time = self.soc + self.fracsec.fraction_of_second
return self._m_time if hasattr(self, '_m_time') else None
@property
def chk_body(self):
if hasattr(self, '_m_chk_body'):
return self._m_chk_body if hasattr(self, '_m_chk_body') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_chk_body = self._io.read_bytes((self.framesize - 2))
self._io.seek(_pos)
return self._m_chk_body if hasattr(self, '_m_chk_body') else None
@property
def raw_pkt(self):
if hasattr(self, '_m_pkt'):
return self._m_pkt if hasattr(self, '_m_pkt') else None
_pos = self._io.pos()
self._io.seek(self._pkt_pos)
self._m_pkt = self._io.read_bytes(self.framesize)
self._io.seek(_pos)
return self._m_pkt if hasattr(self, '_m_pkt') else None
|
{
"content_hash": "e85d61c2fde0dfee76842d7c467d9372",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 106,
"avg_line_length": 38.59836065573771,
"alnum_prop": 0.5262263750265449,
"repo_name": "sonusz/PhasorToolBox",
"id": "cef3ac1b673ed50a5efd8fdc567a73fcf8fa4fe9",
"size": "9531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phasortoolbox/parser/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135499"
}
],
"symlink_target": ""
}
|
from portality.lib import dates, plugin
from datetime import datetime
import json
DO_TYPE_TO_JSON_TYPE = {
"str": "string",
"utcdatetime": "timestamp",
"integer": 0,
"bool": True,
"float": 0.0,
"isolang": "string",
"url": "string",
"isolang_2letter": "string",
"bigenddate" : "datestamp"
}
DO_TYPE_TO_DATATYPE = {
"str": "str",
"utcdatetime": "str",
"integer": "int",
"bool": "bool",
"float": "float",
"isolang": "str",
"url": "str",
"isolang_2letter": "str",
"bigenddate" : "str"
}
DO_TYPE_TO_FORMAT = {
"str": "",
"utcdatetime": "UTC ISO formatted date: YYYY-MM-DDTHH:MM:SSZ",
"integer": "",
"bool": "",
"float": "",
"isolang": "3 letter ISO language code",
"url": "URL",
"isolang_2letter": "2 letter ISO language code",
"bigenddate" : "Date, year first: YYYY-MM-DD"
}
def format(klazz, example, fields):
title = "# " + klazz.__name__
intro = "The JSON structure of the model is as follows:"
struct = "```json\n" + json.dumps(example, indent=4, sort_keys=True) + "\n```"
table_intro = "Each of the fields is defined as laid out in the table below. All fields are optional unless otherwise specified:"
table = "| Field | Description | Datatype | Format | Allowed Values |\n"
table += "| ----- | ----------- | -------- | ------ | -------------- |\n"
keys = list(fields.keys())
keys.sort()
for k in keys:
desc, datatype, format, values = fields.get(k)
table += "| {field} | {desc} | {datatype} | {format} | {values} |\n".format(field=k, desc=desc, datatype=datatype, format=format, values=values)
return title + "\n\n" + intro + "\n\n" + struct + "\n\n" + table_intro + "\n\n" + table
def document(klazz, field_descriptions):
inst = klazz()
base_struct = inst.__seamless_struct__.raw
fields = {}
def do_document(path, struct, fields):
example = {}
# first do all the fields at this level
for simple_field, instructions in struct.get('fields', {}).items():
example[simple_field] = type_map(instructions.get("coerce"))
fields[path + simple_field] = (field_descriptions.get(path + simple_field, ""), datatype(instructions.get("coerce")), form(instructions.get("coerce")), values_or_range(instructions.get("allowed_values"), instructions.get("allowed_range")))
# now do all the objects at this level
for obj in struct.get('objects', []):
newpath = obj + "." if not path else path + obj + "."
instructions = struct.get('structs', {}).get(obj, {})
example[obj] = do_document(newpath, instructions, fields)
# finally do all the lists at this level
for l, instructions in struct.get('lists', {}).items():
if instructions['contains'] == 'field':
example[l] = [type_map(instructions.get("coerce"))]
fields[path + l] = (field_descriptions.get(path + l, ""), datatype(instructions.get("coerce")), form(instructions.get("coerce")), values_or_range(instructions.get("allowed_values"), instructions.get("allowed_range")))
elif instructions['contains'] == 'object':
newpath = l + "." if not path else path + l + "."
inst = struct.get('structs', {}).get(l, {})
example[l] = [do_document(newpath, inst, fields)]
return example
example = do_document("", base_struct, fields)
return example, fields
def type_map(t):
type = DO_TYPE_TO_JSON_TYPE.get(t, "string")
if type == "timestamp":
return dates.now()
elif type == "datestamp":
return dates.format(datetime.utcnow(), "%Y-%m-%d")
return type
def datatype(t):
return DO_TYPE_TO_DATATYPE.get(t, "str")
def form(t):
return DO_TYPE_TO_FORMAT.get(t, "")
def values_or_range(vals, range):
if vals is not None:
return ", ".join(vals)
if range is not None:
lower, upper = range
if lower is not None and upper is not None:
return lower + " to " + upper
elif lower is not None and upper is None:
return "less than " + lower
elif lower is None and upper is not None:
return "greater than " + upper
return ""
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-k", "--klazz", help="class to document")
parser.add_argument("-o", "--out", help="output file")
parser.add_argument("-f", "--fields", action="append", help="field descriptions table(s)")
args = parser.parse_args()
descriptions = {}
if args.fields:
for field_file in args.fields:
with open(field_file) as f:
fds = f.read()
lines = fds.split("\n")
for line in lines:
sep = line.find(":")
descriptions[line[:sep]] = line[sep + 1:].strip()
k = plugin.load_class_raw(args.klazz)
example, fields = document(k, descriptions)
doc = format(k, example, fields)
with open(args.out, "w") as f:
f.write(doc)
|
{
"content_hash": "fcc064111e6011aefa55b5aeb5517689",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 251,
"avg_line_length": 34.52348993288591,
"alnum_prop": 0.5746500777604977,
"repo_name": "DOAJ/doaj",
"id": "dafb8b8d957ad51b113da95d8e4b38320ae10c26",
"size": "5144",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portality/lib/seamlessdoc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2399"
},
{
"name": "Dockerfile",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "483733"
},
{
"name": "JavaScript",
"bytes": "952971"
},
{
"name": "Jinja",
"bytes": "15292"
},
{
"name": "Python",
"bytes": "3195030"
},
{
"name": "SCSS",
"bytes": "75276"
},
{
"name": "Shell",
"bytes": "28415"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="funnel.hoverlabel.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "9cc49b6ded0f88effd6dc80426886f91",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 34.4375,
"alnum_prop": 0.573502722323049,
"repo_name": "plotly/python-api",
"id": "57782598c7b1f6819a9766979d9c596198011dff",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/hoverlabel/font/_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from pptx import Presentation
from pptx.util import Inches
import io
from os.path import join as opj
import os
__all__ = ['BasicPresentation']
""" (text, fig, table) """
layouts = {(True, True, True):0,
(True, True, False):1,
(True, False, True):2,
(True, False, False):3,
(False, True, False):4,
(False, False, True):5}
"""TODO:
- Add title slide with date, name and title"""
class BasicPresentation():
def __init__(self, filename):
"""Create a basic slide deck.
Note: Figures should be
6.5" x 6" (W x H) w/o a table
6.5" x 3.5" w/ table
12.5" x 6" alone
or will need resizing"""
self.filename = filename
try:
self.prs = Presentation(opj(os.path.dirname(__file__), 'basic_pptx.pptx'))
except NameError:
self.prs = Presentation()
def add_slide(self, title=None, text=None, figure=None, table=None):
lo = (not text is None, not figure is None, not table is None)
slide = self.prs.slides.add_slide(self.prs.slide_layouts[layouts[lo]])
if not title is None:
title_shape = slide.shapes.title.text = title
if not text is None:
body_shape = slide.shapes.placeholders[1].text_frame.text = text[0]
for t in text[1:]:
p = slide.shapes.placeholders[1].text_frame.add_paragraph()
p.text = t
p.level = 0
if not figure is None:
streamio = io.BytesIO()
figure.savefig(streamio, format='png', dpi=200)
streamio.seek(0)
# pic = slide.shapes.add_picture(streamio, Inches(5), Inches(0.5))
pic = slide.shapes.placeholders[10].insert_picture(streamio)
if not table is None:
tab = slide.shapes.placeholders[11].insert_table(table.shape[0] + 1, table.shape[1]).table
"""tab = slide.shapes.add_table(table.shape[0] + 1, table.shape[1],
left=Inches(1),
top=Inches(4),
width=Inches(6),
height=Inches(4)).table"""
for j in range(table.shape[1]):
if type(table.columns[j]) is tuple:
col = '|'.join([str(c) for c in table.columns[j]])
else:
col = str(table.columns[j])
tab.cell(0, j).text = col
for i in range(table.shape[0]):
for j in range(table.shape[1]):
tab.cell(i + 1, j).text = str(table.values[i, j])
def save(self):
self.prs.save(self.filename)
def test_basic():
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from fg_shared import _git
df = pd.DataFrame({'dude':np.arange(5), 'yo':np.linspace(4, 5, 5)})
bp = BasicPresentation(opj(_git, 'test.pptx'))
figh = plt.figure(figsize=(13, 7))
plt.plot(np.random.randn(50), np.random.rand(50), 'o', alpha=0.5)
bp.add_slide(title='Dude, title!',
text=['Bullet 1', 'Bullet 2', 'Bullet 3'],
table=df,
figure=figh)
bp.add_slide(title='Dude, title!',
text=['Bullet 1', 'Bullet 2', 'Bullet 3'])
bp.add_slide(title='Dude, title!',
text=['Bullet 1', 'Bullet 2', 'Bullet 3'],
table=df)
figh = plt.figure(figsize=(6.5, 6))
plt.plot(np.random.randn(50), np.random.rand(50), 'o', alpha=0.5)
bp.add_slide(title='Dude, title!',
text=['Bullet 1', 'Bullet 2', 'Bullet 3'],
figure=figh)
figh = plt.figure(figsize=(12.5, 6))
plt.plot(np.random.randn(50), np.random.rand(50), 'o', alpha=0.5)
bp.add_slide(title='Dude, title!',
figure=figh)
bp.add_slide(title='Dude, title!',
table=df)
bp.save()
|
{
"content_hash": "bf420edfe7de76e751f9bf924068da09",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 102,
"avg_line_length": 35.08620689655172,
"alnum_prop": 0.5120393120393121,
"repo_name": "agartland/utils",
"id": "bbe8596274ce3c85201c251f5b6ddc9358572d7d",
"size": "4070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic_pptx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "892465"
},
{
"name": "R",
"bytes": "934"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from django_de.apps.authors.models import Author
urlpatterns = patterns('django.views.generic.list_detail',
(r'^$', 'object_list',
dict(
queryset = Author.objects.order_by('name', 'slug'),
template_object_name = 'author',
allow_empty=True,
),
)
)
|
{
"content_hash": "483323a5d33f79b4bb469faacdcee603",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 63,
"avg_line_length": 29,
"alnum_prop": 0.6005747126436781,
"repo_name": "django-de/django-de-v2",
"id": "ec6fc365fba5cf24eaa65c0fc66ac4f868483a76",
"size": "348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_de/apps/authors/urls.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "172"
},
{
"name": "JavaScript",
"bytes": "21778"
},
{
"name": "Python",
"bytes": "58803"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python
# IPC - Instructions Per Cycles using Perf Events and
# uprobes
# 24-Apr-2020 Saleem Ahmad Created this.
from bcc import BPF, utils
from optparse import OptionParser
# load BPF program
code="""
#include <uapi/linux/ptrace.h>
struct perf_delta {
u64 clk_delta;
u64 inst_delta;
u64 time_delta;
};
/*
Perf Arrays to read counter values for open
perf events.
*/
BPF_PERF_ARRAY(clk, MAX_CPUS);
BPF_PERF_ARRAY(inst, MAX_CPUS);
// Perf Output
BPF_PERF_OUTPUT(output);
// Per Cpu Data to store start values
BPF_PERCPU_ARRAY(data, u64);
#define CLOCK_ID 0
#define INSTRUCTION_ID 1
#define TIME_ID 2
void trace_start(struct pt_regs *ctx) {
u32 clk_k = CLOCK_ID;
u32 inst_k = INSTRUCTION_ID;
u32 time = TIME_ID;
int cpu = bpf_get_smp_processor_id();
/*
perf_read may return negative values for errors.
If cpu id is greater than BPF_PERF_ARRAY size,
counters values will be very large negative number.
NOTE: Use bpf_perf_event_value is recommended over
bpf_perf_event_read or map.perf_read() due to
issues in ABI. map.perf_read_value() need to be
implemented in future.
*/
u64 clk_start = clk.perf_read(cpu);
u64 inst_start = inst.perf_read(cpu);
u64 time_start = bpf_ktime_get_ns();
u64* kptr = NULL;
kptr = data.lookup(&clk_k);
if (kptr) {
data.update(&clk_k, &clk_start);
} else {
data.insert(&clk_k, &clk_start);
}
kptr = data.lookup(&inst_k);
if (kptr) {
data.update(&inst_k, &inst_start);
} else {
data.insert(&inst_k, &inst_start);
}
kptr = data.lookup(&time);
if (kptr) {
data.update(&time, &time_start);
} else {
data.insert(&time, &time_start);
}
}
void trace_end(struct pt_regs* ctx) {
u32 clk_k = CLOCK_ID;
u32 inst_k = INSTRUCTION_ID;
u32 time = TIME_ID;
int cpu = bpf_get_smp_processor_id();
/*
perf_read may return negative values for errors.
If cpu id is greater than BPF_PERF_ARRAY size,
counters values will be very large negative number.
NOTE: Use bpf_perf_event_value is recommended over
bpf_perf_event_read or map.perf_read() due to
issues in ABI. map.perf_read_value() need to be
implemented in future.
*/
u64 clk_end = clk.perf_read(cpu);
u64 inst_end = inst.perf_read(cpu);
u64 time_end = bpf_ktime_get_ns();
struct perf_delta perf_data = {} ;
u64* kptr = NULL;
kptr = data.lookup(&clk_k);
// Find elements in map, if not found return
if (kptr) {
perf_data.clk_delta = clk_end - *kptr;
} else {
return;
}
kptr = data.lookup(&inst_k);
if (kptr) {
perf_data.inst_delta = inst_end - *kptr;
} else {
return;
}
kptr = data.lookup(&time);
if (kptr) {
perf_data.time_delta = time_end - *kptr;
} else {
return;
}
output.perf_submit(ctx, &perf_data, sizeof(struct perf_delta));
}
"""
usage='Usage: ipc.py [options]\nexample ./ipc.py -l c -s strlen'
parser = OptionParser(usage)
parser.add_option('-l', '--lib', dest='lib_name', help='lib name containing symbol to trace, e.g. c for libc', type=str)
parser.add_option('-s', '--sym', dest='sym', help='symbol to trace', type=str)
(options, args) = parser.parse_args()
if (not options.lib_name or not options.sym):
parser.print_help()
exit()
num_cpus = len(utils.get_online_cpus())
b = BPF(text=code, cflags=['-DMAX_CPUS=%s' % str(num_cpus)])
# Attach Probes at start and end of the trace function
# NOTE: When attaching to a function for tracing, during runtime relocation
# stage by linker, function will be called once to return a different function
# address, which will be called by the process. e.g. in case of strlen
# after relocation stage, __strlen_sse2 is called instread of strlen.
# NOTE: There will be a context switch from userspace to kernel space,
# on caputring counters on USDT probes, so actual IPC might be slightly different.
# This example is to give a reference on how to use perf events with tracing.
b.attach_uprobe(name=options.lib_name, sym=options.sym, fn_name="trace_start")
b.attach_uretprobe(name=options.lib_name, sym=options.sym, fn_name="trace_end")
def print_data(cpu, data, size):
e = b["output"].event(data)
print("%-8d %-12d %-8.2f %-8s %d" % (e.clk_delta, e.inst_delta,
1.0* e.inst_delta/e.clk_delta, str(round(e.time_delta * 1e-3, 2)) + ' us', cpu))
print("Counters Data")
print("%-8s %-12s %-8s %-8s %s" % ('CLOCK', 'INSTRUCTION', 'IPC', 'TIME', 'CPU'))
b["output"].open_perf_buffer(print_data)
# Perf Event for Unhalted Cycles, The hex value is
# combination of event, umask and cmask. Read Intel
# Doc to find the event and cmask. Or use
# perf list --details to get event, umask and cmask
# NOTE: Events can be multiplexed by kernel in case the
# number of counters is greater than supported by CPU
# performance monitoring unit, which can result in inaccurate
# results. Counter values need to be normalized for a more
# accurate value.
PERF_TYPE_RAW = 4
# Unhalted Clock Cycles
b["clk"].open_perf_event(PERF_TYPE_RAW, 0x0000003C)
# Instruction Retired
b["inst"].open_perf_event(PERF_TYPE_RAW, 0x000000C0)
while True:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
{
"content_hash": "b24d56b05d07e9153b2bd9af01be4b17",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 120,
"avg_line_length": 29.605555555555554,
"alnum_prop": 0.6535935447551136,
"repo_name": "tuxology/bcc",
"id": "51d15f6c0119f937f56e1d81a7a09a1aeb6e8c89",
"size": "5329",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/perf/ipc.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "119767"
},
{
"name": "C++",
"bytes": "744166"
},
{
"name": "CMake",
"bytes": "27864"
},
{
"name": "HTML",
"bytes": "2979"
},
{
"name": "LLVM",
"bytes": "4379"
},
{
"name": "Limbo",
"bytes": "6069"
},
{
"name": "Lua",
"bytes": "230597"
},
{
"name": "Makefile",
"bytes": "1480"
},
{
"name": "Objective-C",
"bytes": "20501"
},
{
"name": "P4",
"bytes": "9242"
},
{
"name": "Python",
"bytes": "325821"
},
{
"name": "Shell",
"bytes": "9047"
},
{
"name": "Yacc",
"bytes": "19817"
}
],
"symlink_target": ""
}
|
"""
[01/14/13] Challenge #117 [Easy] Hexdump to ASCII
https://www.reddit.com/r/dailyprogrammer/comments/16jiuq/011413_challenge_117_easy_hexdump_to_ascii/
# [](#EasyIcon) *(Easy)*: Hexdump to ASCII
Hexadecimal is a base-16 representation of a number. A single byte of information, as an unsigned integer, can have a
value of 0 to 255 in decimal. This byte can be represented in hexadecimal, from a range of 0x0 to 0xFF in hexadecimal.
Your job is to open a given file (using the given file name) and print every byte's hexadecimal value.
*Author: PoppySeedPlehzr*
# Formal Inputs & Outputs
## Input Description
As a program command-line argument to the program, accept a valid file name.
## Output Description
Print the given file's contents, where each byte of the file must be printed in hexadecimal form. Your program must
print 16 bytes per line, where there is a space between each hexadecimal byte. Each line must start with the line
number, starting from line 0, and must also count in hexadecimal.
# Sample Inputs & Outputs
## Sample Input
"MyFile.txt" (This file is an arbitrary file as an example)
## Sample Output
00000000 37 7A BC AF 27 1C 00 03 38 67 83 24 70 00 00 00
00000001 00 00 00 00 49 00 00 00 00 00 00 00 64 FC 7F 06
00000002 00 28 12 BC 60 28 97 D5 68 12 59 8C 17 8F FE D8
00000003 0E 5D 2C 27 BC D1 87 F6 D2 BE 9B 92 90 E8 FD BA
00000004 A2 B8 A9 F4 BE A6 B8 53 10 E3 BD 60 05 2B 5C 95
00000005 C4 50 B4 FC 10 DE 58 80 0C F5 E1 C0 AC 36 30 74
00000006 82 8B 42 7A 06 A5 D0 0F C2 4F 7B 27 6C 5D 96 24
00000007 25 4F 3A 5D F4 B2 C0 DB 79 3C 86 48 AB 2D 57 11
00000008 53 27 50 FF 89 02 20 F6 31 C2 41 72 84 F7 C9 00
00000009 01 04 06 00 01 09 70 00 07 0B 01 00 01 23 03 01
0000000A 01 05 5D 00 00 01 00 0C 80 F5 00 08 0A 01 A8 3F
0000000B B1 B7 00 00 05 01 11 0B 00 64 00 61 00 74 00 61
0000000C 00 00 00 14 0A 01 00 68 6E B8 CF BC A0 CD 01 15
0000000D 06 01 00 20 00 00 00 00 00
# Challenge Input
Give your program its own binary file, and have it print itself out!
## Challenge Input Solution
This is dependent on how you write your code and what platform you are on.
# Note
* As an added bonus, attempt to print out any ASCII strings, if such data is found in your given file.
"""
def main():
pass
if __name__ == "__main__":
main()
|
{
"content_hash": "a20c49ce2dd528bc4795579d45009e55",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 118,
"avg_line_length": 45.72549019607843,
"alnum_prop": 0.7165523156089194,
"repo_name": "DayGitH/Python-Challenges",
"id": "8628832167b30d3bcaeadd49e22faceea7b74669",
"size": "2332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20130114A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from itertools import chain
from operator import attrgetter
from .._compat import ffilter
from ._summarizer import AbstractSummarizer
class EdmundsonLocationMethod(AbstractSummarizer):
def __init__(self, stemmer, null_words):
super(EdmundsonLocationMethod, self).__init__(stemmer)
self._null_words = null_words
def __call__(self, document, sentences_count, w_h, w_p1, w_p2, w_s1, w_s2):
significant_words = self._compute_significant_words(document)
ratings = self._rate_sentences(document, significant_words, w_h, w_p1,
w_p2, w_s1, w_s2)
return self._get_best_sentences(document.sentences, sentences_count, ratings)
def _compute_significant_words(self, document):
headings = document.headings
significant_words = chain(*map(attrgetter("words"), headings))
significant_words = map(self.stem_word, significant_words)
significant_words = ffilter(self._is_null_word, significant_words)
return frozenset(significant_words)
def _is_null_word(self, word):
return word in self._null_words
def _rate_sentences(self, document, significant_words, w_h, w_p1, w_p2, w_s1, w_s2):
rated_sentences = {}
paragraphs = document.paragraphs
for paragraph_order, paragraph in enumerate(paragraphs):
sentences = paragraph.sentences
for sentence_order, sentence in enumerate(sentences):
rating = self._rate_sentence(sentence, significant_words)
rating *= w_h
if paragraph_order == 0:
rating += w_p1
elif paragraph_order == len(paragraphs) - 1:
rating += w_p2
if sentence_order == 0:
rating += w_s1
elif sentence_order == len(sentences) - 1:
rating += w_s2
rated_sentences[sentence] = rating
return rated_sentences
def _rate_sentence(self, sentence, significant_words):
words = map(self.stem_word, sentence.words)
return sum(w in significant_words for w in words)
def rate_sentences(self, document, w_h=1, w_p1=1, w_p2=1, w_s1=1, w_s2=1):
significant_words = self._compute_significant_words(document)
return self._rate_sentences(document, significant_words, w_h, w_p1, w_p2, w_s1, w_s2)
|
{
"content_hash": "38aa771af905c1fa4453a8785272dcb6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 93,
"avg_line_length": 38.9375,
"alnum_prop": 0.6292134831460674,
"repo_name": "miso-belica/sumy",
"id": "04ec5db84ee15322b899cf1d02922a0e6177a506",
"size": "2517",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sumy/summarizers/edmundson_location.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "396"
},
{
"name": "Python",
"bytes": "205470"
}
],
"symlink_target": ""
}
|
print f['adieresis'].components[0].baseGlyph
print f['adieresis'].components[1].baseGlyph
# move the component in the base glyph
f['adieresis'].components[1].offset = (100,100)
# scale the component in the base glyph
f['adieresis'].components[0].scale = (.5, .25)
|
{
"content_hash": "acb57ee64d53787e79c6a37519d9ecea",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 47,
"avg_line_length": 33.25,
"alnum_prop": 0.7255639097744361,
"repo_name": "daltonmaag/robofab",
"id": "b8e9fe291ae6fe415dff20c8133e09e310e49ee2",
"size": "324",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Docs/Examples/objects/RComponent_01.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8714"
},
{
"name": "HTML",
"bytes": "4597"
},
{
"name": "Makefile",
"bytes": "6776"
},
{
"name": "Python",
"bytes": "918844"
}
],
"symlink_target": ""
}
|
from .th import *
|
{
"content_hash": "498a3e819723517a8bd5f61047f8150a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 17,
"avg_line_length": 18,
"alnum_prop": 0.6666666666666666,
"repo_name": "hangzhaomit/semantic-segmentation-pytorch",
"id": "abe3cbe49477fe37d4fc16249de8a10f4fb4a013",
"size": "18",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112512"
},
{
"name": "Shell",
"bytes": "221"
}
],
"symlink_target": ""
}
|
import requests, xmltodict
def getChannels():
''' This function gets all the channels and saves them in a dictionairy and returns the channels '''
r = requests.get("http://api.sr.se/api/v2/channels/")
channelsDict = xmltodict.parse(r.content)
formattedDict = channelsDict['sr']['channels']
return formattedDict
def getChannel(channelID):
''' This function get the channels' id so that we can choose channel and get current info '''
urlLink = "http://api.sr.se/api/v2/channels/" + str(channelID)
r = requests.get(urlLink)
#SR lämnar tillbaka channelID 0 om kanalen inte finns
if r.status_code == 404:
return {'error': "Channel does not exist"}
channelDict = xmltodict.parse(r.content)
formattedDict = channelDict['sr']['channel']
return formattedDict
def previousPlaying(channelID):
''' This function gets the channels' playlist which includes information about the previous song and artist.
If there is no information available an error-message will appear '''
urlLink = "http://api.sr.se/api/v2/playlists/rightnow?channelid=" + str(channelID)
r = requests.get(urlLink)
playlistDict = xmltodict.parse(r.content)
formattedDict = playlistDict['sr']['playlist']
if 'previoussong' in formattedDict:
return formattedDict['previoussong']
else:
return {'error': 'No information available'}
def getPlaying(channelID):
''' This function gets the channels' playlist which includes information about the current song and artist.
If there is no information available an error-message will appear '''
urlLink = "http://api.sr.se/api/v2/playlists/rightnow?channelid=" + str(channelID)
r = requests.get(urlLink)
playlistDict = xmltodict.parse(r.content)
formattedDict = playlistDict['sr']['playlist']
if 'song' in formattedDict:
return formattedDict['song']
else:
return {'error': 'No information available'}
def nextPlaying(channelID):
''' This function gets the channels' playlist which includes information about the next song and artist.
If there is no information available an error-message will appear '''
urlLink = "http://api.sr.se/api/v2/playlists/rightnow?channelid=" + str(channelID)
r = requests.get(urlLink)
playlistDict = xmltodict.parse(r.content)
formattedDict = playlistDict['sr']['playlist']
if 'nextsong' in formattedDict:
return formattedDict['nextsong']
else:
return {'error': 'No information available'}
|
{
"content_hash": "ce721e5c8f089b784949120909dc9d4b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 112,
"avg_line_length": 39.765625,
"alnum_prop": 0.6978388998035363,
"repo_name": "mariaholmberg283/WebbtjansterGruppDiggarn",
"id": "c1f372cfddab4a5dda7b7df62c1513f72026a891",
"size": "2588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sr_communication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2064"
},
{
"name": "HTML",
"bytes": "16412"
},
{
"name": "Python",
"bytes": "11197"
}
],
"symlink_target": ""
}
|
from .base import View
class EchoView(View):
"""returns the content, debug use"""
def get(self):
"""
returns the query string as JSON
.. note::
.. code-block:: text
/api/echo?a=b&a=c&c=d
gives:
.. code-block:: json
{"a": ["b", "c"], "c": "d"}
"""
return self.request.params.mixed()
def post(self):
"""
returns JSON body POSTed
:statuscode 400: on invalid json
"""
return self.request.json_body
def put(self):
"""
returns JSON body PUT'd
:statuscode 400: on invalid json
"""
return self.request.json_body
|
{
"content_hash": "48f16743a84aff48cd4caecb0bb7ed11",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 43,
"avg_line_length": 18.487179487179485,
"alnum_prop": 0.47295423023578365,
"repo_name": "nthuion/nthuion-backend",
"id": "aac36258a0b569685a9e6510ffd4fe665eb8429b",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nthuion/views/echo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "124916"
}
],
"symlink_target": ""
}
|
"""MB - the Meta-Build wrapper around GYP and GN
MB is a wrapper script for GYP and GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
from __future__ import print_function
import argparse
import ast
import errno
import json
import os
import pipes
import pprint
import shlex
import shutil
import sys
import subprocess
import tempfile
def main(args):
mbw = MetaBuildWrapper()
mbw.ParseArgs(args)
return mbw.args.func()
class MetaBuildWrapper(object):
def __init__(self):
p = os.path
d = os.path.dirname
self.chromium_src_dir = p.normpath(d(d(d(p.abspath(__file__)))))
self.default_config = p.join(self.chromium_src_dir, 'tools', 'mb',
'mb_config.pyl')
self.platform = sys.platform
self.args = argparse.Namespace()
self.configs = {}
self.masters = {}
self.mixins = {}
self.private_configs = []
self.common_dev_configs = []
self.unsupported_configs = []
def ParseArgs(self, argv):
def AddCommonOptions(subp):
subp.add_argument('-b', '--builder',
help='builder name to look up config from')
subp.add_argument('-m', '--master',
help='master name to look up config from')
subp.add_argument('-c', '--config',
help='configuration to analyze')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.add_argument('-g', '--goma-dir', default=self.ExpandUser('~/goma'),
help='path to goma directory (default is %(default)s).')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
subp.add_argument('-q', '--quiet', action='store_true',
help='Do not print anything on success, '
'just return an exit code.')
subp.add_argument('-v', '--verbose', action='count',
help='verbose logging (may specify multiple times).')
parser = argparse.ArgumentParser(prog='mb')
subps = parser.add_subparsers()
subp = subps.add_parser('analyze',
help='analyze whether changes to a set of files '
'will cause a set of binaries to be rebuilt.')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path build was generated into.')
subp.add_argument('input_path', nargs=1,
help='path to a file containing the input arguments '
'as a JSON object.')
subp.add_argument('output_path', nargs=1,
help='path to a file containing the output arguments '
'as a JSON object.')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('gen',
help='generate a new set of build files')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
subp = subps.add_parser('lookup',
help='look up the command for a given config or '
'builder')
AddCommonOptions(subp)
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser('validate',
help='validate the config file')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.add_argument('-q', '--quiet', action='store_true',
help='Do not print anything on success, '
'just return an exit code.')
subp.set_defaults(func=self.CmdValidate)
subp = subps.add_parser('help',
help='Get help on a subcommand.')
subp.add_argument(nargs='?', action='store', dest='subcommand',
help='The command to get help for.')
subp.set_defaults(func=self.CmdHelp)
self.args = parser.parse_args(argv)
def CmdAnalyze(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
return self.RunGNAnalyze(vals)
elif vals['type'] == 'gyp':
return self.RunGYPAnalyze(vals)
else:
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
def CmdGen(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
return self.RunGNGen(vals)
if vals['type'] == 'gyp':
return self.RunGYPGen(vals)
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
def CmdLookup(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
cmd = self.GNCmd('gen', '<path>', vals['gn_args'])
elif vals['type'] == 'gyp':
cmd = self.GYPCmd('<path>', vals['gyp_defines'], vals['gyp_config'])
else:
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
self.PrintCmd(cmd)
return 0
def CmdHelp(self):
if self.args.subcommand:
self.ParseArgs([self.args.subcommand, '--help'])
else:
self.ParseArgs(['--help'])
def CmdValidate(self):
errs = []
# Read the file to make sure it parses.
self.ReadConfigFile()
# Figure out the whole list of configs and ensure that no config is
# listed in more than one category.
all_configs = {}
for config in self.common_dev_configs:
all_configs[config] = 'common_dev_configs'
for config in self.private_configs:
if config in all_configs:
errs.append('config "%s" listed in "private_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'private_configs'
for config in self.unsupported_configs:
if config in all_configs:
errs.append('config "%s" listed in "unsupported_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'unsupported_configs'
for master in self.masters:
for builder in self.masters[master]:
config = self.masters[master][builder]
if config in all_configs and all_configs[config] not in self.masters:
errs.append('Config "%s" used by a bot is also listed in "%s".' %
(config, all_configs[config]))
else:
all_configs[config] = master
# Check that every referenced config actually exists.
for config, loc in all_configs.items():
if not config in self.configs:
errs.append('Unknown config "%s" referenced from "%s".' %
(config, loc))
# Check that every actual config is actually referenced.
for config in self.configs:
if not config in all_configs:
errs.append('Unused config "%s".' % config)
# Figure out the whole list of mixins, and check that every mixin
# listed by a config or another mixin actually exists.
referenced_mixins = set()
for config, mixins in self.configs.items():
for mixin in mixins:
if not mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by config "%s".' %
(mixin, config))
referenced_mixins.add(mixin)
for mixin in self.mixins:
for sub_mixin in self.mixins[mixin].get('mixins', []):
if not sub_mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
(sub_mixin, mixin))
referenced_mixins.add(sub_mixin)
# Check that every mixin defined is actually referenced somewhere.
for mixin in self.mixins:
if not mixin in referenced_mixins:
errs.append('Unreferenced mixin "%s".' % mixin)
if errs:
raise MBErr('mb config file %s has problems:\n ' + '\n '.join(errs))
if not self.args.quiet:
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
def GetConfig(self):
self.ReadConfigFile()
config = self.ConfigFromArgs()
if not config in self.configs:
raise MBErr('Config "%s" not found in %s' %
(config, self.args.config_file))
return self.FlattenConfig(config)
def ReadConfigFile(self):
if not self.Exists(self.args.config_file):
raise MBErr('config file not found at %s' % self.args.config_file)
try:
contents = ast.literal_eval(self.ReadFile(self.args.config_file))
except SyntaxError as e:
raise MBErr('Failed to parse config file "%s": %s' %
(self.args.config_file, e))
self.common_dev_configs = contents['common_dev_configs']
self.configs = contents['configs']
self.masters = contents['masters']
self.mixins = contents['mixins']
self.private_configs = contents['private_configs']
self.unsupported_configs = contents['unsupported_configs']
def ConfigFromArgs(self):
if self.args.config:
if self.args.master or self.args.builder:
raise MBErr('Can not specific both -c/--config and -m/--master or '
'-b/--builder')
return self.args.config
if not self.args.master or not self.args.builder:
raise MBErr('Must specify either -c/--config or '
'(-m/--master and -b/--builder)')
if not self.args.master in self.masters:
raise MBErr('Master name "%s" not found in "%s"' %
(self.args.master, self.args.config_file))
if not self.args.builder in self.masters[self.args.master]:
raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' %
(self.args.builder, self.args.master, self.args.config_file))
return self.masters[self.args.master][self.args.builder]
def FlattenConfig(self, config):
mixins = self.configs[config]
vals = {
'type': None,
'gn_args': [],
'gyp_config': [],
'gyp_defines': [],
}
visited = []
self.FlattenMixins(mixins, vals, visited)
return vals
def FlattenMixins(self, mixins, vals, visited):
for m in mixins:
if m not in self.mixins:
raise MBErr('Unknown mixin "%s"' % m)
# TODO: check for cycles in mixins.
visited.append(m)
mixin_vals = self.mixins[m]
if 'type' in mixin_vals:
vals['type'] = mixin_vals['type']
if 'gn_args' in mixin_vals:
if vals['gn_args']:
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
if 'gyp_config' in mixin_vals:
vals['gyp_config'] = mixin_vals['gyp_config']
if 'gyp_defines' in mixin_vals:
if vals['gyp_defines']:
vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines']
else:
vals['gyp_defines'] = mixin_vals['gyp_defines']
if 'mixins' in mixin_vals:
self.FlattenMixins(mixin_vals['mixins'], vals, visited)
return vals
def RunGNGen(self, vals):
path = self.args.path[0]
cmd = self.GNCmd('gen', path, vals['gn_args'])
swarming_targets = []
if self.args.swarming_targets_file:
# We need GN to generate the list of runtime dependencies for
# the compile targets listed (one per line) in the file so
# we can run them via swarming. We use ninja_to_gn.pyl to convert
# the compile targets to the matching GN labels.
contents = self.ReadFile(self.args.swarming_targets_file)
swarming_targets = contents.splitlines()
gn_isolate_map = ast.literal_eval(self.ReadFile(os.path.join(
self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl')))
gn_labels = []
for target in swarming_targets:
if not target in gn_isolate_map:
raise MBErr('test target "%s" not found in %s' %
(target, '//testing/buildbot/gn_isolate_map.pyl'))
gn_labels.append(gn_isolate_map[target]['label'])
gn_runtime_deps_path = self.ToAbsPath(path, 'runtime_deps')
# Since GN hasn't run yet, the build directory may not even exist.
self.MaybeMakeDirectory(self.ToAbsPath(path))
self.WriteFile(gn_runtime_deps_path, '\n'.join(gn_labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
ret, _, _ = self.Run(cmd)
for target in swarming_targets:
if sys.platform == 'win32':
deps_path = self.ToAbsPath(path, target + '.exe.runtime_deps')
else:
deps_path = self.ToAbsPath(path, target + '.runtime_deps')
if not self.Exists(deps_path):
raise MBErr('did not generate %s' % deps_path)
command, extra_files = self.GetIsolateCommand(target, vals,
gn_isolate_map)
runtime_deps = self.ReadFile(deps_path).splitlines()
isolate_path = self.ToAbsPath(path, target + '.isolate')
self.WriteFile(isolate_path,
pprint.pformat({
'variables': {
'command': command,
'files': sorted(runtime_deps + extra_files),
}
}) + '\n')
self.WriteJSON(
{
'args': [
'--isolated',
self.ToSrcRelPath('%s%s%s.isolated' % (path, os.sep, target)),
'--isolate',
self.ToSrcRelPath('%s%s%s.isolate' % (path, os.sep, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
return ret
def GNCmd(self, subcommand, path, gn_args=''):
if self.platform == 'linux2':
gn_path = os.path.join(self.chromium_src_dir, 'buildtools', 'linux64',
'gn')
elif self.platform == 'darwin':
gn_path = os.path.join(self.chromium_src_dir, 'buildtools', 'mac',
'gn')
else:
gn_path = os.path.join(self.chromium_src_dir, 'buildtools', 'win',
'gn.exe')
cmd = [gn_path, subcommand, path]
gn_args = gn_args.replace("$(goma_dir)", self.args.goma_dir)
if gn_args:
cmd.append('--args=%s' % gn_args)
return cmd
def RunGYPGen(self, vals):
path = self.args.path[0]
output_dir, gyp_config = self.ParseGYPConfigPath(path)
if gyp_config != vals['gyp_config']:
raise MBErr('The last component of the path (%s) must match the '
'GYP configuration specified in the config (%s), and '
'it does not.' % (gyp_config, vals['gyp_config']))
cmd = self.GYPCmd(output_dir, vals['gyp_defines'], config=gyp_config)
ret, _, _ = self.Run(cmd)
return ret
def RunGYPAnalyze(self, vals):
output_dir, gyp_config = self.ParseGYPConfigPath(self.args.path[0])
if gyp_config != vals['gyp_config']:
raise MBErr('The last component of the path (%s) must match the '
'GYP configuration specified in the config (%s), and '
'it does not.' % (gyp_config, vals['gyp_config']))
if self.args.verbose:
inp = self.GetAnalyzeInput()
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
cmd = self.GYPCmd(output_dir, vals['gyp_defines'], config=gyp_config)
cmd.extend(['-G', 'config_path=%s' % self.args.input_path[0],
'-G', 'analyzer_output_path=%s' % self.args.output_path[0]])
ret, _, _ = self.Run(cmd)
if not ret and self.args.verbose:
outp = json.loads(self.ReadFile(self.args.output_path[0]))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return ret
def RunGNIsolate(self, vals):
build_path = self.args.path[0]
inp = self.ReadInputJSON(['targets'])
if self.args.verbose:
self.Print()
self.Print('isolate input:')
self.PrintJSON(inp)
self.Print()
output_path = self.args.output_path[0]
for target in inp['targets']:
runtime_deps_path = self.ToAbsPath(build_path, target + '.runtime_deps')
if not self.Exists(runtime_deps_path):
self.WriteFailureAndRaise('"%s" does not exist' % runtime_deps_path,
output_path)
command, extra_files = self.GetIsolateCommand(target, vals, None)
runtime_deps = self.ReadFile(runtime_deps_path).splitlines()
isolate_path = self.ToAbsPath(build_path, target + '.isolate')
self.WriteFile(isolate_path,
pprint.pformat({
'variables': {
'command': command,
'files': sorted(runtime_deps + extra_files),
}
}) + '\n')
self.WriteJSON(
{
'args': [
'--isolated',
self.ToSrcRelPath('%s/%s.isolated' % (build_path, target)),
'--isolate',
self.ToSrcRelPath('%s/%s.isolate' % (build_path, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
return 0
def GetIsolateCommand(self, target, vals, gn_isolate_map):
# This needs to mirror the settings in //build/config/ui.gni:
# use_x11 = is_linux && !use_ozone.
# TODO(dpranke): Figure out how to keep this in sync better.
use_x11 = (sys.platform == 'linux2' and
not 'target_os="android"' in vals['gn_args'] and
not 'use_ozone=true' in vals['gn_args'])
asan = 'is_asan=true' in vals['gn_args']
msan = 'is_msan=true' in vals['gn_args']
tsan = 'is_tsan=true' in vals['gn_args']
executable_suffix = '.exe' if sys.platform == 'win32' else ''
test_type = gn_isolate_map[target]['type']
cmdline = []
extra_files = []
if use_x11 and test_type == 'windowed_test_launcher':
extra_files = [
'xdisplaycheck',
'../../testing/test_env.py',
'../../testing/xvfb.py',
]
cmdline = [
'../../testing/xvfb.py',
'.',
'./' + str(target),
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type in ('windowed_test_launcher', 'console_test_launcher'):
extra_files = [
'../../testing/test_env.py'
]
cmdline = [
'../../testing/test_env.py',
'./' + str(target) + executable_suffix,
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type in ('raw'):
extra_files = []
cmdline = [
'./' + str(target) + executable_suffix,
] + gn_isolate_map[target].get('args')
else:
self.WriteFailureAndRaise('No command line for %s found (test type %s).'
% (target, test_type), output_path=None)
return cmdline, extra_files
def ToAbsPath(self, build_path, *comps):
return os.path.join(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
*comps)
def ToSrcRelPath(self, path):
"""Returns a relative path from the top of the repo."""
# TODO: Support normal paths in addition to source-absolute paths.
assert(path.startswith('//'))
return path[2:].replace('/', os.sep)
def ParseGYPConfigPath(self, path):
rpath = self.ToSrcRelPath(path)
output_dir, _, config = rpath.rpartition('/')
self.CheckGYPConfigIsSupported(config, path)
return output_dir, config
def CheckGYPConfigIsSupported(self, config, path):
if config not in ('Debug', 'Release'):
if (sys.platform in ('win32', 'cygwin') and
config not in ('Debug_x64', 'Release_x64')):
raise MBErr('Unknown or unsupported config type "%s" in "%s"' %
config, path)
def GYPCmd(self, output_dir, gyp_defines, config):
gyp_defines = gyp_defines.replace("$(goma_dir)", self.args.goma_dir)
cmd = [
sys.executable,
os.path.join('build', 'gyp_chromium'),
'-G',
'output_dir=' + output_dir,
'-G',
'config=' + config,
]
for d in shlex.split(gyp_defines):
cmd += ['-D', d]
return cmd
def RunGNAnalyze(self, vals):
# analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
ret = self.RunGNGen(vals)
if ret:
return ret
inp = self.ReadInputJSON(['files', 'targets'])
if self.args.verbose:
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
output_path = self.args.output_path[0]
# Bail out early if a GN file was modified, since 'gn refs' won't know
# what to do about it.
if any(f.endswith('.gn') or f.endswith('.gni') for f in inp['files']):
self.WriteJSON({'status': 'Found dependency (all)'}, output_path)
return 0
# Bail out early if 'all' was asked for, since 'gn refs' won't recognize it.
if 'all' in inp['targets']:
self.WriteJSON({'status': 'Found dependency (all)'}, output_path)
return 0
# This shouldn't normally happen, but could due to unusual race conditions,
# like a try job that gets scheduled before a patch lands but runs after
# the patch has landed.
if not inp['files']:
self.Print('Warning: No files modified in patch, bailing out early.')
self.WriteJSON({'targets': [],
'build_targets': [],
'status': 'No dependency'}, output_path)
return 0
ret = 0
response_file = self.TempFile()
response_file.write('\n'.join(inp['files']) + '\n')
response_file.close()
matching_targets = []
try:
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all', '--as=output']
ret, out, _ = self.Run(cmd)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
build_dir = self.ToSrcRelPath(self.args.path[0]) + os.sep
for output in out.splitlines():
build_output = output.replace(build_dir, '')
if build_output in inp['targets']:
matching_targets.append(build_output)
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all']
ret, out, _ = self.Run(cmd)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
for label in out.splitlines():
build_target = label[2:]
# We want to accept 'chrome/android:chrome_shell_apk' and
# just 'chrome_shell_apk'. This may result in too many targets
# getting built, but we can adjust that later if need be.
for input_target in inp['targets']:
if (input_target == build_target or
build_target.endswith(':' + input_target)):
matching_targets.append(input_target)
finally:
self.RemoveFile(response_file.name)
if matching_targets:
# TODO: it could be that a target X might depend on a target Y
# and both would be listed in the input, but we would only need
# to specify target X as a build_target (whereas both X and Y are
# targets). I'm not sure if that optimization is generally worth it.
self.WriteJSON({'targets': sorted(matching_targets),
'build_targets': sorted(matching_targets),
'status': 'Found dependency'}, output_path)
else:
self.WriteJSON({'targets': [],
'build_targets': [],
'status': 'No dependency'}, output_path)
if not ret and self.args.verbose:
outp = json.loads(self.ReadFile(output_path))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return 0
def ReadInputJSON(self, required_keys):
path = self.args.input_path[0]
output_path = self.args.output_path[0]
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
try:
inp = json.loads(self.ReadFile(path))
except Exception as e:
self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
(path, e), output_path)
for k in required_keys:
if not k in inp:
self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
output_path)
return inp
def WriteFailureAndRaise(self, msg, output_path):
if output_path:
self.WriteJSON({'error': msg}, output_path)
raise MBErr(msg)
def WriteJSON(self, obj, path):
try:
self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n')
except Exception as e:
raise MBErr('Error %s writing to the output path "%s"' %
(e, path))
def PrintCmd(self, cmd):
if cmd[0] == sys.executable:
cmd = ['python'] + cmd[1:]
self.Print(*[pipes.quote(c) for c in cmd])
def PrintJSON(self, obj):
self.Print(json.dumps(obj, indent=2, sort_keys=True))
def Print(self, *args, **kwargs):
# This function largely exists so it can be overridden for testing.
print(*args, **kwargs)
def Run(self, cmd):
# This function largely exists so it can be overridden for testing.
if self.args.dryrun or self.args.verbose:
self.PrintCmd(cmd)
if self.args.dryrun:
return 0, '', ''
ret, out, err = self.Call(cmd)
if self.args.verbose:
if out:
self.Print(out, end='')
if err:
self.Print(err, end='', file=sys.stderr)
return ret, out, err
def Call(self, cmd):
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return p.returncode, out, err
def ExpandUser(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.expanduser(path)
def Exists(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.exists(path)
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def ReadFile(self, path):
# This function largely exists so it can be overriden for testing.
with open(path) as fp:
return fp.read()
def RemoveFile(self, path):
# This function largely exists so it can be overriden for testing.
os.remove(path)
def TempFile(self, mode='w'):
# This function largely exists so it can be overriden for testing.
return tempfile.NamedTemporaryFile(mode=mode, delete=False)
def WriteFile(self, path, contents):
# This function largely exists so it can be overriden for testing.
if self.args.dryrun or self.args.verbose:
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
with open(path, 'w') as fp:
return fp.write(contents)
class MBErr(Exception):
pass
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except MBErr as e:
print(e)
sys.exit(1)
except KeyboardInterrupt:
print("interrupted, exiting", stream=sys.stderr)
sys.exit(130)
|
{
"content_hash": "14dff62262a20ee9275d095297acbc3a",
"timestamp": "",
"source": "github",
"line_count": 798,
"max_line_length": 80,
"avg_line_length": 35.11528822055138,
"alnum_prop": 0.581400328313468,
"repo_name": "lihui7115/ChromiumGStreamerBackend",
"id": "80f2979189e2279c4c3824e32ede0874f841ba33",
"size": "28207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/mb/mb.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9508834"
},
{
"name": "C++",
"bytes": "242598549"
},
{
"name": "CSS",
"bytes": "943747"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27281878"
},
{
"name": "Java",
"bytes": "14561064"
},
{
"name": "JavaScript",
"bytes": "20540839"
},
{
"name": "Makefile",
"bytes": "70864"
},
{
"name": "Objective-C",
"bytes": "1745880"
},
{
"name": "Objective-C++",
"bytes": "10008668"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "482954"
},
{
"name": "Python",
"bytes": "8626890"
},
{
"name": "Shell",
"bytes": "481888"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
import os
import warnings
import matplotlib.pyplot as plt
#plt.interactive(True)
import matplotlib as mpl
import matplotlib.ticker as ticker
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
# convenient dataset and projection functions
import wradlib as wrl
# georef, gis
from osgeo import osr
def _check_file(filename):
# todo: move this to another place
geo_src = 'https://bitbucket.org/kaimuehlbauer/wradlib_miub/downloads/geo.tar.gz'
if not os.path.exists(filename):
warnings.warn(
"File does not exist: {0}\nGet data from {1} and extract archive to wradlib/example/data folder".format(
filename, geo_src))
exit(0)
def nex_overlay():
# set filepath
filepath = os.path.join(os.path.dirname(__file__), 'data/geo')
# setup figure
fig1 = plt.figure(figsize=(10, 8))
# create 4 subplots
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
ax4 = plt.subplot2grid((2, 2), (1, 1))
# we use this special prepared geotiff
# created from srtm data via the following shell command:
# gdalwarp -te 88. 20. 93. 27. srtm_54_07.tif srtm_55_07.tif srtm_54_08.tif srtm_55_08.tif bangladesh.tif
filename = os.path.join(filepath, 'bangladesh.tif')
_check_file(filename)
# pixel_spacing is in output units (lonlat)
rastercoords, rastervalues = wrl.io.read_raster_data(filename, spacing=0.005)
# specify kwargs for plotting, using terrain colormap and LogNorm
dem = ax1.pcolormesh(rastercoords[..., 0], rastercoords[..., 1], rastervalues, cmap=mpl.cm.terrain, norm=LogNorm(),
vmin=1, vmax=3000)
# make some space on the right for colorbar axis
div1 = make_axes_locatable(ax1)
cax1 = div1.append_axes("right", size="5%", pad=0.1)
# add colorbar and title
# we use LogLocator for colorbar
cb = fig1.colorbar(dem, cax=cax1, ticks=ticker.LogLocator(subs=range(10)))
cb.set_label('terrain height [m]')
# plot country borders from esri vector shape, filter by attribute
# create wgs84 and india osr objects (spatial reference system)
wgs84 = osr.SpatialReference()
wgs84.ImportFromEPSG(4326)
india = osr.SpatialReference()
# asia south albers equal area conic
india.ImportFromEPSG(102028)
# country list
countries = ['India', 'Nepal', 'Bhutan', 'Myanmar']
# open the input data source and get the layer
filename = os.path.join(filepath, 'ne_10m_admin_0_boundary_lines_land.shp')
_check_file(filename)
dataset, inLayer = wrl.io.open_shape(filename)
# iterate over countries, filter accordingly, get coordinates and plot
for item in countries:
# SQL-like selection syntax
fattr = "(adm0_left = '" + item + "' or adm0_right = '" + item + "')"
inLayer.SetAttributeFilter(fattr)
# get borders and names
borders, keys = wrl.georef.get_shape_coordinates(inLayer, key='name')
wrl.vis.add_lines(ax1, borders, color='black', lw=2, zorder=4)
# some testing on additional axes
# add Bangladesh to countries
countries.append('Bangladesh')
# create colors for country-patches
cm = mpl.cm.jet
colors = []
for i in range(len(countries)):
colors.append(cm(1. * i / len(countries)))
# open the input data source and get the layer
filename = os.path.join(filepath, 'ne_10m_admin_0_countries.shp')
_check_file(filename)
dataset, layer = wrl.io.open_shape(filename)
# iterate over countries, filter by attribute and plot single patches on ax2
for i, item in enumerate(countries):
fattr = "name = '" + item + "'"
layer.SetAttributeFilter(fattr)
# get country patches and geotransform to destination srs
patches, keys = wrl.georef.get_shape_coordinates(layer, dest_srs=india, key='name')
wrl.vis.add_patches(ax2, patches, facecolor=colors[i])
ax2.autoscale(True)
ax2.set_aspect('equal')
ax2.set_xlabel('X - Coordinate')
ax2.set_ylabel('Y - Coordinate')
ax2.ticklabel_format(style='sci', scilimits=(0, 0))
ax2.set_title('South Asia - Albers Equal Area Conic ')
# reset Layer filter
layer.SetAttributeFilter(None)
layer.SetSpatialFilter(None)
# filter spatially and plot as PatchCollection on ax3
layer.SetSpatialFilterRect(88, 20, 93, 27)
patches, keys = wrl.georef.get_shape_coordinates(layer, dest_srs=wgs84, key='name')
i = 0
for name, patch in zip(keys, patches):
# why comes the US in here?
if name in countries:
wrl.vis.add_patches(ax3, patch, facecolor=colors[i], cmap=mpl.cm.jet, alpha=0.4)
i += 1
ax3.autoscale(True)
ax3.set_aspect('equal')
ax3.set_xlabel('Longitude')
ax3.set_ylabel('Latitude')
ax3.set_title('South Asia - WGS 84')
# plot rivers from esri vector shape, filter spatially
# http://www.fao.org/geonetwork/srv/en/metadata.show?id=37331
# open the input data source and get the layer
filename = os.path.join(filepath, 'rivers_asia_37331.shp')
_check_file(filename)
dataset, inLayer = wrl.io.open_shape(filename)
# do spatial filtering to get only geometries inside bounding box
inLayer.SetSpatialFilterRect(88, 20, 93, 27)
rivers, keys = wrl.georef.get_shape_coordinates(inLayer, key='MAJ_NAME')
# plot on ax1, and ax4
wrl.vis.add_lines(ax1, rivers, color=mpl.cm.terrain(0.), lw=0.5, zorder=3)
wrl.vis.add_lines(ax4, rivers, color=mpl.cm.terrain(0.), lw=0.5, zorder=3)
ax4.autoscale(True)
ax4.set_aspect('equal')
ax4.set_xlim((88, 93))
ax4.set_ylim((20, 27))
ax4.set_xlabel('Longitude')
ax4.set_ylabel('Latitude')
ax4.set_title('Bangladesh - Rivers')
# plot rivers from esri vector shape, filter spatially
# plot rivers from NED
# open the input data source and get the layer
filename = os.path.join(filepath, 'ne_10m_rivers_lake_centerlines.shp')
_check_file(filename)
dataset, inLayer = wrl.io.open_shape(filename)
inLayer.SetSpatialFilterRect(88, 20, 93, 27)
rivers, keys = wrl.georef.get_shape_coordinates(inLayer)
wrl.vis.add_lines(ax1, rivers, color=mpl.cm.terrain(0.), lw=0.5, zorder=3)
ax1.autoscale(True)
# ### plot city dots with annotation, finalize plot
# lat/lon coordinates of five cities in Bangladesh
lats = [23.73, 22.32, 22.83, 24.37, 24.90]
lons = [90.40, 91.82, 89.55, 88.60, 91.87]
cities = ['Dhaka', 'Chittagong', 'Khulna', 'Rajshahi', 'Sylhet']
for lon, lat, city in zip(lons, lats, cities):
ax1.plot(lon, lat, 'ro', zorder=5)
ax1.text(lon + 0.01, lat + 0.01, city)
# set axes limits and equal aspect
ax1.set_xlim((88, 93))
ax1.set_ylim((20, 27))
ax1.set_xlabel('Longitude')
ax1.set_ylabel('Latitude')
ax1.set_aspect('equal')
ax1.set_title('Bangladesh')
plt.tight_layout(w_pad=0.1)
plt.show()
# =======================================================
if __name__ == '__main__':
nex_overlay()
|
{
"content_hash": "e85063c43cba64bd6fb7dc2727c5477a",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 119,
"avg_line_length": 38.11764705882353,
"alnum_prop": 0.6554433221099888,
"repo_name": "jjhelmus/wradlib",
"id": "fa9690b260f80ffe60b652056378017aa37d2a30",
"size": "7636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/overlay_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "FORTRAN",
"bytes": "2097"
},
{
"name": "Python",
"bytes": "593487"
},
{
"name": "Shell",
"bytes": "2741"
}
],
"symlink_target": ""
}
|
import sys
from hiclib import mapping, fragmentHiC
from mirnylib import h5dict, genome
basedir = sys.argv[1]
mapped_reads1 = h5dict.h5dict('%s/Data/Timing/mapped_reads1.hdf5' % basedir)
mapped_reads2 = h5dict.h5dict('%s/Data/Timing/mapped_reads2.hdf5' % basedir)
mapped_reads3 = h5dict.h5dict('%s/Data/Timing/mapped_reads3.hdf5' % basedir)
genome_db = genome.Genome('%s/Data/Genome/mm9_fasta' % basedir, readChrms=['1'], chrmFileTemplate="%s.fa")
mapping.parse_sam(
sam_basename1='%s/Data/Timing/SRR443886_sub_1.bam' % basedir,
sam_basename2='%s/Data/Timing/SRR443886_sub_2.bam' % basedir,
out_dict=mapped_reads1,
genome_db=genome_db,
enzyme_name='NcoI')
mapping.parse_sam(
sam_basename1='%s/Data/Timing/SRR443887_sub_1.bam' % basedir,
sam_basename2='%s/Data/Timing/SRR443887_sub_2.bam' % basedir,
out_dict=mapped_reads2,
genome_db=genome_db,
enzyme_name='NcoI')
mapping.parse_sam(
sam_basename1='%s/Data/Timing/SRR443888_sub_1.bam' % basedir,
sam_basename2='%s/Data/Timing/SRR443888_sub_2.bam' % basedir,
out_dict=mapped_reads3,
genome_db=genome_db,
enzyme_name='NcoI')
fragments2 = fragmentHiC.HiCdataset(
filename='temp2',
genome=genome_db,
maximumMoleculeLength=500,
mode='w',
enzymeName="NcoI",
inMemory=True)
fragments2.parseInputData(dictLike="%s/Data/Timing/mapped_reads2.hdf5" % basedir)
fragments2.save('%s/Data/Timing/hiclib_data2.hdf5' % basedir)
del fragments2
fragments3 = fragmentHiC.HiCdataset(
filename='temp3',
genome=genome_db,
maximumMoleculeLength=500,
mode='w',
enzymeName="NcoI",
inMemory=True)
fragments3.parseInputData(dictLike="%s/Data/Timing/mapped_reads3.hdf5" % basedir)
fragments3.save('%s/Data/Timing/hiclib_data3.hdf5' % basedir)
del fragments3
fragments = fragmentHiC.HiCdataset(
filename='temp1',
genome=genome_db,
maximumMoleculeLength=500,
mode='w',
enzymeName="NcoI",
inMemory=True)
fragments.parseInputData(dictLike="%s/Data/Timing/mapped_reads1.hdf5" % basedir)
fragments.merge(['%s/Data/Timing/hiclib_data2.hdf5' % basedir, '%s/Data/Timing/hiclib_data3.hdf5' % basedir])
fragments.save('%s/Data/Timing/hiclib_data.hdf5' % basedir)
|
{
"content_hash": "d9751f44cc3ceaa2d4283e0aa9028279",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 109,
"avg_line_length": 34.12307692307692,
"alnum_prop": 0.7146077547339946,
"repo_name": "bxlab/HiFive_Paper",
"id": "39635708243888919a7c905b4b8a6e4fd9dc3dde",
"size": "2241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/Timing/hiclib_mapping.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5096"
},
{
"name": "C",
"bytes": "107381"
},
{
"name": "C++",
"bytes": "182835"
},
{
"name": "CMake",
"bytes": "3353"
},
{
"name": "Forth",
"bytes": "152"
},
{
"name": "Makefile",
"bytes": "22978"
},
{
"name": "Perl",
"bytes": "25453"
},
{
"name": "Python",
"bytes": "4229513"
},
{
"name": "R",
"bytes": "43022"
},
{
"name": "Shell",
"bytes": "10798"
}
],
"symlink_target": ""
}
|
'''
Load the statewide dataset and plot partial regression of high school
graduation on murder rate after removing the effects of rate of urbanization,
poverty, and rate of single household.
'''
import statsmodels.api as sm
import matplotlib.pyplot as plt
crime_data = sm.datasets.statecrime.load_pandas()
sm.graphics.plot_partregress(endog='murder', exog_i='hs_grad',
exog_others=['urban', 'poverty', 'single'],
data=crime_data.data, obs_labels=False)
plt.show()
|
{
"content_hash": "9bde674c1e92a0726a8d776a3f53d425",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 35,
"alnum_prop": 0.6819047619047619,
"repo_name": "statsmodels/statsmodels.github.io",
"id": "c68751d7c0af7d5a398872a47f40b7387cfb86e2",
"size": "549",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "v0.10.1/plots/graphics_regression_partregress.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import logging
from ..analysis import Analysis, register_analysis
from networkx import DiGraph
from copy import copy
l = logging.getLogger(name="angr.analyses.dfg")
class DFG(Analysis):
def __init__(self, cfg=None, annocfg=None):
"""
Build a Data Flow Grah (DFG) for every basic block of a CFG
The DFGs are available in the dict self.dfgs where the key
is a basic block addr and the value a DFG.
:param cfg: A CFG used to get all the basic blocks
:param annocfg: An AnnotatedCFG built from a backward slice used to only build the DFG on the whitelisted statements
"""
if cfg is None:
self._cfg = self.project.analyses.CFG()
else:
self._cfg = cfg
self._annocfg = annocfg
self.dfgs = self._construct()
def _need_to_ignore(self, addr, stmt, stmt_idx):
if self._annocfg is not None:
whitelist = self._annocfg.get_whitelisted_statements(addr)
if whitelist is False or (whitelist is not None and stmt_idx not in whitelist):
return True
if stmt.tag == 'Ist_IMark' or stmt.tag == 'Ist_AbiHint' or stmt.tag == 'Ist_Exit':
return True
elif stmt.tag == 'Ist_Put':
arch = self.project.arch
if stmt.offset in arch.register_names:
if stmt.offset == arch.ip_offset:
return True
return False
def _construct(self):
"""
We want to build the type of DFG that's used in "Automated Ident. of Crypto
Primitives in Binary Code with Data Flow Graph Isomorphisms." Unlike that
paper, however, we're building it on Vex IR instead of assembly instructions.
"""
cfg = self._cfg
p = self.project
dfgs = {}
l.debug("Building Vex DFG...")
for node in cfg.nodes():
try:
if node.simprocedure_name == None:
irsb = p.factory.block(node.addr).vex
else:
l.debug("Cannot process SimProcedures, ignoring %s" % node.simprocedure_name)
continue
except Exception as e:
l.debug(e)
continue
tmpsnodes = {}
storesnodes = {}
putsnodes = {}
statements = irsb.statements
dfg = DiGraph()
for stmt_idx, stmt in enumerate(statements):
# We want to skip over certain types, such as Imarks
if self._need_to_ignore(node.addr, stmt, stmt_idx):
continue
# break statement down into sub-expressions
exprs = stmt.expressions
stmt_node = stmt
dfg.add_node(stmt)
if stmt.tag == 'Ist_WrTmp':
tmpsnodes[stmt.tmp] = stmt_node
if exprs[0].tag == 'Iex_Binop':
if exprs[1].tag == 'Iex_RdTmp':
dfg.add_edge(tmpsnodes[exprs[1].tmp], stmt_node)
else:
dfg.add_edge(exprs[1], stmt_node)
if exprs[2].tag == 'Iex_RdTmp':
dfg.add_edge(tmpsnodes[exprs[2].tmp], stmt_node)
else:
dfg.add_edge(exprs[2], stmt_node)
elif exprs[0].tag == 'Iex_Unop':
dfg.remove_node(stmt_node)
if exprs[1].tag == 'Iex_RdTmp':
tmpsnodes[stmt.tmp] = copy(tmpsnodes[exprs[1].tmp])
tmpsnodes[stmt.tmp].tmp = stmt.tmp
else:
tmpsnodes[stmt.tmp] = exprs[1]
elif exprs[0].tag == 'Iex_RdTmp':
tmpsnodes[stmt.tmp] = copy(tmpsnodes[exprs[0].tmp])
tmpsnodes[stmt.tmp].tmp = stmt.tmp
elif exprs[0].tag == 'Iex_Get':
if putsnodes.has_key(exprs[0].offset):
dfg.add_edge(putsnodes[exprs[0].offset], stmt_node)
if len(exprs) > 1 and exprs[1].tag == "Iex_RdTmp":
dfg.add_edge(tmpsnodes[exprs[1].tmp], stmt_node)
elif len(exprs) > 1:
dfg.add_edge(exprs[1], stmt_node)
elif exprs[0].tag == 'Iex_Load':
if exprs[1].tag == 'Iex_RdTmp':
dfg.add_edge(tmpsnodes[exprs[1].tmp], stmt_node)
else:
dfg.add_edge(exprs[1], stmt_node)
else:
# Take a guess by assuming exprs[0] is the op and any other expressions are args
for e in exprs[1:]:
if e.tag == 'Iex_RdTmp':
dfg.add_edge(tmpsnodes[e.tmp], stmt_node)
else:
dfg.add_edge(e, stmt_node)
elif stmt.tag == 'Ist_Store':
if exprs[0].tag == 'Iex_RdTmp':
dfg.add_edge(tmpsnodes[exprs[0].tmp], stmt_node)
elif exprs[0].tag == 'Iex_Const':
dfg.add_edge(exprs[0], stmt_node)
if exprs[1].tag == 'Iex_RdTmp':
dfg.add_edge(tmpsnodes[exprs[1].tmp], stmt_node)
else:
dfg.add_edge(exprs[1], stmt_node)
elif stmt.tag == 'Ist_Put':
if exprs[0].tag == 'Iex_RdTmp':
dfg.add_edge(tmpsnodes[exprs[0].tmp], stmt_node)
elif exprs[0].tag == 'Iex_Const':
dfg.add_edge(exprs[0], stmt_node)
putsnodes[stmt.offset] = stmt_node
elif stmt.tag == 'Ist_Exit':
if exprs[0].tag == 'Iex_RdTmp':
dfg.add_edge(tmpsnodes[exprs[0].tmp], stmt_node)
elif stmt.tag == 'Ist_Dirty':
tmpsnodes[stmt.tmp] = stmt_node
elif stmt.tag == 'Ist_CAS':
tmpsnodes[stmt.oldLo] = stmt_node
else:
for e in stmt.expressions:
if e.tag == 'Iex_RdTmp':
dfg.add_edge(tmpsnodes[e.tmp], stmt_node)
else:
dfg.add_edge(e, stmt_node)
for vtx in dfg.nodes():
if dfg.degree(vtx) == 0:
dfg.remove_node(vtx)
if dfg.size() > 0:
dfgs[node.addr] = dfg
return dfgs
register_analysis(DFG, 'DFG')
|
{
"content_hash": "b5875cc6df6acd3f8e43d1809111abf8",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 124,
"avg_line_length": 40.61904761904762,
"alnum_prop": 0.46468347010551,
"repo_name": "haylesr/angr",
"id": "5bf5d4b58292ae2ba69a29b18aecb8fa7bd4c257",
"size": "6824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/analyses/dfg.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "824"
},
{
"name": "Makefile",
"bytes": "291"
},
{
"name": "Python",
"bytes": "799030"
}
],
"symlink_target": ""
}
|
from som.primitives.primitives import Primitives
from som.vm.globals import trueObject, falseObject
from som.vmobjects.primitive import UnaryPrimitive, BinaryPrimitive
def _not(_rcvr):
return trueObject
def _and(_rcvr, _arg):
return falseObject
class FalsePrimitivesBase(Primitives):
def install_primitives(self):
self._install_instance_primitive(UnaryPrimitive("not", self._universe, _not))
self._install_instance_primitive(BinaryPrimitive("and:", self._universe, _and))
self._install_instance_primitive(BinaryPrimitive("&&", self._universe, _and))
|
{
"content_hash": "f7db76a3780f6de85b633a5214869738",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 87,
"avg_line_length": 31.210526315789473,
"alnum_prop": 0.7403035413153457,
"repo_name": "SOM-st/RPySOM",
"id": "bbd490dd1a9cc7a936ead9579e0a8087d8d32ee7",
"size": "593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/som/primitives/false_primitives.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183215"
},
{
"name": "Shell",
"bytes": "223"
}
],
"symlink_target": ""
}
|
import sys
import traceback
import threading
import logging
import time
from ginga.misc import Task, Future, Callback
from collections import deque
import queue as Queue
class GwMain(Callback.Callbacks):
def __init__(self, queue=None, logger=None, ev_quit=None, app=None,
thread_pool=None):
Callback.Callbacks.__init__(self)
self.enable_callback('shutdown')
# You can pass in a queue if you prefer to do so
if not queue:
queue = Queue.Queue()
self.gui_queue = queue
self.priority_gui_queue = Queue.PriorityQueue()
# You can pass in a logger if you prefer to do so
if logger is None:
logger = logging.getLogger('GwMain')
self.logger = logger
if not ev_quit:
ev_quit = threading.Event()
self.ev_quit = ev_quit
self.app = app
# Mark our thread id
self.gui_thread_id = threading.get_ident()
self.threadPool = thread_pool
# For asynchronous tasks on the thread pool
self.tag = 'master'
self.shares = ['threadPool', 'logger']
self.oneshots = {}
def get_widget(self):
return self.app
def get_threadPool(self):
return self.threadPool
def _execute_future(self, future):
# Execute the GUI method
try:
try:
future.thaw(suppress_exception=False)
except Exception as e:
self.logger.error("gui event loop error: %s" % str(e))
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
self.logger.error("Traceback information unavailable.")
future.resolve(e)
except Exception as e2:
self.logger.error("Exception resolving future: %s" % str(e2))
def update_pending(self, timeout=0.0, elapsed_max=0.02):
self.assert_gui_thread()
# Process "out-of-band" events
# self.logger.debug("1. processing out-of-band GUI events")
try:
self.app.process_events()
except Exception as e:
self.logger.error(str(e))
# Process "in-band" GUI events
# self.logger.debug("2. processing approx %d in-band GUI events" % (
# self.gui_queue.qsize()))
done = False
time_start = time.time()
# First process priority futures
while not done:
try:
future = self.priority_gui_queue.get(block=False)
self._execute_future(future)
except Queue.Empty:
break
if time.time() - time_start > elapsed_max:
done = True
# Next process non-priority futures
while not done:
try:
future = self.gui_queue.get(block=True,
timeout=timeout)
self._execute_future(future)
except Queue.Empty:
done = True
if time.time() - time_start > elapsed_max:
done = True
# Execute all the one-shots
# self.logger.debug("3. processing one-shot GUI events")
deqs = list(filter(lambda deq: len(deq) > 0, self.oneshots.values()))
for deq in deqs:
try:
future = deq.pop()
self._execute_future(future)
except IndexError:
continue
# Process "out-of-band" events, again
# self.logger.debug("4. processing out-of-band GUI events")
try:
self.app.process_events()
except Exception as e:
self.logger.error(str(e))
# self.logger.debug("5. done")
def gui_do_priority(self, priority, method, *args, **kwdargs):
"""General method for asynchronously calling into the GUI.
It makes a future to call the given (method) with the given (args)
and (kwdargs) inside the gui thread. If the calling thread is a
non-gui thread the future is returned.
"""
future = Future.Future(priority=priority)
future.freeze(method, *args, **kwdargs)
self.priority_gui_queue.put(future)
my_id = threading.get_ident()
if my_id != self.gui_thread_id:
return future
def gui_do(self, method, *args, **kwdargs):
future = Future.Future(priority=0)
future.freeze(method, *args, **kwdargs)
self.gui_queue.put(future)
my_id = threading.get_ident()
if my_id != self.gui_thread_id:
return future
def gui_call(self, method, *args, **kwdargs):
"""General method for synchronously calling into the GUI.
This waits until the method has completed before returning.
"""
my_id = threading.get_ident()
if my_id == self.gui_thread_id:
return method(*args, **kwdargs)
else:
future = self.gui_do(method, *args, **kwdargs)
return future.wait()
def gui_do_future(self, future):
self.gui_queue.put(future)
return future
def gui_do_oneshot(self, catname, method, *args, **kwdargs):
if catname not in self.oneshots:
deq = self.oneshots.setdefault(catname, deque([], 1))
else:
deq = self.oneshots[catname]
future = Future.Future()
future.freeze(method, *args, **kwdargs)
deq.append(future)
my_id = threading.get_ident()
if my_id != self.gui_thread_id:
return future
def nongui_do(self, method, *args, **kwdargs):
task = Task.FuncTask(method, args, kwdargs, logger=self.logger)
return self.nongui_do_task(task)
def nongui_do_cb(self, tup, method, *args, **kwdargs):
task = Task.FuncTask(method, args, kwdargs, logger=self.logger)
task.register_callback(tup[0], args=tup[1:])
return self.nongui_do_task(task)
def nongui_do_future(self, future):
task = Task.FuncTask(future.thaw, (), {}, logger=self.logger)
return self.nongui_do_task(task)
def nongui_do_task(self, task):
try:
task.init_and_start(self)
return task
except Exception as e:
self.logger.error("Error starting task: %s" % (str(e)))
raise e
def is_gui_thread(self):
my_id = threading.get_ident()
return my_id == self.gui_thread_id
def assert_gui_thread(self):
my_id = threading.get_ident()
assert my_id == self.gui_thread_id, \
Exception("Non-GUI thread (%d) is executing GUI (%d) code!" % (
my_id, self.gui_thread_id))
def assert_nongui_thread(self):
my_id = threading.get_ident()
assert my_id != self.gui_thread_id, \
Exception("GUI thread (%d) is executing non-GUI code!" % (
my_id))
def mainloop(self, timeout=0.001):
# Mark our thread id
self.gui_thread_id = threading.get_ident()
while not self.ev_quit.is_set():
self.update_pending(timeout=timeout)
def gui_quit(self):
"Call this to cause the GUI thread to quit the mainloop."""
self.ev_quit.set()
self.make_callback('shutdown')
self.app.process_end()
def _quit(self):
self.gui_quit()
# END
|
{
"content_hash": "1cf960d48b5b7ef48d96e84443209eb1",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 77,
"avg_line_length": 30.502024291497975,
"alnum_prop": 0.559729227501991,
"repo_name": "ejeschke/ginga",
"id": "b360ac4f7d809982dc9ab243285c73aedb3c6d32",
"size": "7700",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ginga/gw/GwMain.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2781"
},
{
"name": "GLSL",
"bytes": "7344"
},
{
"name": "HTML",
"bytes": "2129"
},
{
"name": "JavaScript",
"bytes": "87198"
},
{
"name": "Jupyter Notebook",
"bytes": "2691970"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "4426376"
}
],
"symlink_target": ""
}
|
from common_fixtures import * # NOQA
from cattle import ApiError
def _get_agent_for_container(container):
agent = None
for map in container.hosts()[0].instanceHostMaps():
c = map.instance()
if c.agentId is not None:
agent = c.agent()
assert agent is not None
return agent
def _get_agent_client(agent):
creds = agent.account().credentials()
api_key = [x for x in creds if x.kind == 'agentApiKey'][0]
assert len(api_key)
return api_client(api_key.publicValue, api_key.secretValue)
def test_health_check_create_instance(super_client, context):
container = context.create_container(healthCheck={
'port': 80,
})
assert container.healthCheck.port == 80
container = super_client.reload(container)
hci = find_one(container.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(container)
assert hcihm.healthState == 'healthy'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
se = super_client.wait_success(se)
assert se.state == 'created'
assert se.accountId == container.accountId
assert se.instanceId == container.id
assert se.healthcheckInstanceId == hci.id
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
assert hcihm.externalTimestamp == ts
check = lambda: super_client.reload(container).healthState == 'healthy'
wait_for(check, timeout=5)
def test_health_check_create_service(super_client, context, client):
env = client.create_environment(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, environmentId=env.id, scale=1)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
expose_map = find_one(service.serviceExposeMaps)
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(container)
assert hcihm.healthState == 'healthy'
assert container.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
assert container.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
check = lambda: super_client.reload(container).healthState == 'healthy'
wait_for(check, timeout=5)
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='INIT',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
check = lambda: super_client.reload(container).healthState == 'healthy'
wait_for(check, timeout=5)
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
check = lambda: super_client.reload(container).healthState == 'healthy'
wait_for(check, timeout=5)
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='INIT',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
check = lambda: super_client.reload(container).healthState == 'healthy'
wait_for(check, timeout=5)
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
se = super_client.wait_success(se)
assert se.state == 'created'
assert se.accountId == container.accountId
assert se.instanceId == container.id
assert se.healthcheckInstanceId == hci.id
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'unhealthy'
assert hcihm.externalTimestamp == ts
check = lambda: super_client.reload(container).healthState == 'unhealthy'
wait_for(check, timeout=5)
wait_for(lambda: len(service.serviceExposeMaps()) > 1)
def test_health_check_bad_external_timestamp(super_client, context, client):
env = client.create_environment(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, environmentId=env.id, scale=1)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
expose_map = find_one(service.serviceExposeMaps)
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(container)
agent_client = _get_agent_client(agent)
assert hcihm.healthState == 'healthy'
with pytest.raises(ApiError) as e:
agent_client.create_service_event(reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
assert e.value.error.code == 'MissingRequired'
assert e.value.error.fieldName == 'externalTimestamp'
def test_health_check_bad_agent(super_client, context, client):
# Create another host to get the agent from that host
host2 = super_client.reload(register_simulated_host(context))
# register one more host to ensure
# there is at least one more host
# to schedule healtcheck on
register_simulated_host(context)
env = client.create_environment(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, environmentId=env.id, scale=1)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
expose_map = find_one(service.serviceExposeMaps)
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
hcihm = None
for h in hci.healthcheckInstanceHostMaps():
if h.hostId != host2.id:
hcihm = h
break
assert hcihm.hostId != host2.id
agent_client = _get_agent_client(host2.agent())
assert hcihm.healthState == 'healthy'
ts = int(time.time())
with pytest.raises(ApiError) as e:
agent_client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
assert e.value.error.code == 'CantVerifyHealthcheck'
def test_health_check_host_remove(super_client, context, client):
# create 4 hosts for healtcheck as one of them would be removed later
super_client.reload(register_simulated_host(context))
super_client.reload(register_simulated_host(context))
super_client.reload(register_simulated_host(context))
super_client.reload(register_simulated_host(context))
env = client.create_environment(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, environmentId=env.id, scale=1)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
expose_map = find_one(service.serviceExposeMaps)
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
initial_len = len(hci.healthcheckInstanceHostMaps())
assert initial_len == 3
hcihm = hci.healthcheckInstanceHostMaps()[0]
hosts = super_client.list_host(uuid=hcihm.host().uuid)
assert len(hosts) == 1
host = hosts[0]
# remove the host
host = super_client.wait_success(host.deactivate())
host = super_client.wait_success(super_client.delete(host))
assert host.state == 'removed'
# verify that new hostmap was created for the instance
hci = find_one(container.healthcheckInstances)
final_len = len(hci.healthcheckInstanceHostMaps())
assert final_len >= initial_len
hcim = None
for h in hci.healthcheckInstanceHostMaps():
if h.hostId == host.id:
if hcihm.state == 'active':
hcihm = h
break
assert hcim is None
|
{
"content_hash": "f50bcd046927ee977838de4ec6f864b2",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 77,
"avg_line_length": 37.405204460966544,
"alnum_prop": 0.646789902603856,
"repo_name": "stresler/cattle",
"id": "37b1670f8ed201731ab28d31f68580ef7e0c4cdf",
"size": "10062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/cattletest/core/test_healthcheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "FreeMarker",
"bytes": "17738"
},
{
"name": "Java",
"bytes": "5536744"
},
{
"name": "Python",
"bytes": "650065"
},
{
"name": "Shell",
"bytes": "44520"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import time
import random
import hmac
import hashlib
import string
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord
from shadowsocks import lru_cache
def create_tls_ticket_auth_obfs(method):
return tls_ticket_auth(method)
obfs_map = {
'tls1.2_ticket_auth': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_auth_compatible': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_fastauth': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_fastauth_compatible': (create_tls_ticket_auth_obfs,),
}
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False
class obfs_auth_data(object):
def __init__(self):
self.client_data = lru_cache.LRUCache(60 * 5)
self.client_id = os.urandom(32)
self.startup_time = int(time.time() - 60 * 30) & 0xFFFFFFFF
class tls_ticket_auth(plain.plain):
def __init__(self, method):
self.method = method
self.handshake_status = 0
self.send_buffer = b''
self.recv_buffer = b''
self.client_id = b''
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.tls_version = b'\x03\x03'
self.overhead = 5
def init_data(self):
return obfs_auth_data()
def get_overhead(self, direction): # direction: true for c->s false for s->c
return self.overhead
def sni(self, url):
url = common.to_bytes(url)
data = b"\x00" + struct.pack('>H', len(url)) + url
data = b"\x00\x00" + struct.pack('>H', len(data) + 2) + struct.pack('>H', len(data)) + data
return data
def pack_auth_data(self, client_id):
utc_time = int(time.time()) & 0xFFFFFFFF
data = struct.pack('>I', utc_time) + os.urandom(18)
data += hmac.new(self.server_info.key + client_id, data, hashlib.sha1).digest()[:10]
return data
def client_encode(self, buf):
if self.handshake_status == -1:
return buf
if self.handshake_status == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
if len(buf) > 0:
self.send_buffer += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
if self.handshake_status == 0:
self.handshake_status = 1
data = self.tls_version + self.pack_auth_data(self.server_info.data.client_id) + b"\x20" + self.server_info.data.client_id + binascii.unhexlify(b"001cc02bc02fcca9cca8cc14cc13c00ac014c009c013009c0035002f000a" + b"0100")
ext = binascii.unhexlify(b"ff01000100")
host = self.server_info.obfs_param or self.server_info.host
if host and host[-1] in string.digits:
host = ''
hosts = host.split(',')
host = random.choice(hosts)
ext += self.sni(host)
ext += b"\x00\x17\x00\x00"
ext += b"\x00\x23\x00\xd0" + os.urandom(208) # ticket
ext += binascii.unhexlify(b"000d001600140601060305010503040104030301030302010203")
ext += binascii.unhexlify(b"000500050100000000")
ext += binascii.unhexlify(b"00120000")
ext += binascii.unhexlify(b"75500000")
ext += binascii.unhexlify(b"000b00020100")
ext += binascii.unhexlify(b"000a0006000400170018")
data += struct.pack('>H', len(ext)) + ext
data = b"\x01\x00" + struct.pack('>H', len(data)) + data
data = b"\x16\x03\x01" + struct.pack('>H', len(data)) + data
return data
elif self.handshake_status == 1 and len(buf) == 0:
data = b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
data += b"\x16" + self.tls_version + b"\x00\x20" + os.urandom(22) #Finished
data += hmac.new(self.server_info.key + self.server_info.data.client_id, data, hashlib.sha1).digest()[:10]
ret = data + self.send_buffer
self.send_buffer = b''
self.handshake_status = 8
return ret
return b''
def client_decode(self, buf):
if self.handshake_status == -1:
return (buf, False)
if self.handshake_status == 8:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
buf = self.recv_buffer[5:size+5]
ret += buf
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, False)
if len(buf) < 11 + 32 + 1 + 32:
raise Exception('client_decode data error')
verify = buf[11:33]
if hmac.new(self.server_info.key + self.server_info.data.client_id, verify, hashlib.sha1).digest()[:10] != buf[33:43]:
raise Exception('client_decode data error')
if hmac.new(self.server_info.key + self.server_info.data.client_id, buf[:-10], hashlib.sha1).digest()[:10] != buf[-10:]:
raise Exception('client_decode data error')
return (b'', True)
def server_encode(self, buf):
if self.handshake_status == -1:
return buf
if (self.handshake_status & 8) == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
self.handshake_status |= 8
data = self.tls_version + self.pack_auth_data(self.client_id) + b"\x20" + self.client_id + binascii.unhexlify(b"c02f000005ff01000100")
data = b"\x02\x00" + struct.pack('>H', len(data)) + data #server hello
data = b"\x16" + self.tls_version + struct.pack('>H', len(data)) + data
if random.randint(0, 8) < 1:
ticket = os.urandom((struct.unpack('>H', os.urandom(2))[0] % 164) * 2 + 64)
ticket = struct.pack('>H', len(ticket) + 4) + b"\x04\x00" + struct.pack('>H', len(ticket)) + ticket
data += b"\x16" + self.tls_version + ticket #New session ticket
data += b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
finish_len = random.choice([32, 40])
data += b"\x16" + self.tls_version + struct.pack('>H', finish_len) + os.urandom(finish_len - 10) #Finished
data += hmac.new(self.server_info.key + self.client_id, data, hashlib.sha1).digest()[:10]
if buf:
data += self.server_encode(buf)
return data
def decode_error_return(self, buf):
self.handshake_status = -1
if self.overhead > 0:
self.server_info.overhead -= self.overhead
self.overhead = 0
if self.method == 'tls1.2_ticket_auth' or self.method == 'tls1.2_ticket_fastauth':
return (b'E'*2048, False, False)
return (buf, True, False)
def server_decode(self, buf):
if self.handshake_status == -1:
return (buf, True, False)
if (self.handshake_status & 4) == 4:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17 or ord(self.recv_buffer[1]) != 0x3 or ord(self.recv_buffer[2]) != 0x3:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
ret += self.recv_buffer[5:size+5]
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, True, False)
if (self.handshake_status & 1) == 1:
self.recv_buffer += buf
buf = self.recv_buffer
verify = buf
if len(buf) < 11:
raise Exception('server_decode data error')
if not match_begin(buf, b"\x14" + self.tls_version + b"\x00\x01\x01"): #ChangeCipherSpec
raise Exception('server_decode data error')
buf = buf[6:]
if not match_begin(buf, b"\x16" + self.tls_version + b"\x00"): #Finished
raise Exception('server_decode data error')
verify_len = struct.unpack('>H', buf[3:5])[0] + 1 # 11 - 10
if len(verify) < verify_len + 10:
return (b'', False, False)
if hmac.new(self.server_info.key + self.client_id, verify[:verify_len], hashlib.sha1).digest()[:10] != verify[verify_len:verify_len+10]:
raise Exception('server_decode data error')
self.recv_buffer = verify[verify_len + 10:]
status = self.handshake_status
self.handshake_status |= 4
ret = self.server_decode(b'')
return ret;
#raise Exception("handshake data = %s" % (binascii.hexlify(buf)))
self.recv_buffer += buf
buf = self.recv_buffer
ogn_buf = buf
if len(buf) < 3:
return (b'', False, False)
if not match_begin(buf, b'\x16\x03\x01'):
return self.decode_error_return(ogn_buf)
buf = buf[3:]
header_len = struct.unpack('>H', buf[:2])[0]
if header_len > len(buf) - 2:
return (b'', False, False)
self.recv_buffer = self.recv_buffer[header_len + 5:]
self.handshake_status = 1
buf = buf[2:header_len + 2]
if not match_begin(buf, b'\x01\x00'): #client hello
logging.info("tls_auth not client hello message")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if struct.unpack('>H', buf[:2])[0] != len(buf) - 2:
logging.info("tls_auth wrong message size")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if not match_begin(buf, self.tls_version):
logging.info("tls_auth wrong tls version")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
verifyid = buf[:32]
buf = buf[32:]
sessionid_len = ord(buf[0])
if sessionid_len < 32:
logging.info("tls_auth wrong sessionid_len")
return self.decode_error_return(ogn_buf)
sessionid = buf[1:sessionid_len + 1]
buf = buf[sessionid_len+1:]
self.client_id = sessionid
sha1 = hmac.new(self.server_info.key + sessionid, verifyid[:22], hashlib.sha1).digest()[:10]
utc_time = struct.unpack('>I', verifyid[:4])[0]
time_dif = common.int32((int(time.time()) & 0xffffffff) - utc_time)
if self.server_info.obfs_param:
try:
self.max_time_dif = int(self.server_info.obfs_param)
except:
pass
if self.max_time_dif > 0 and (time_dif < -self.max_time_dif or time_dif > self.max_time_dif \
or common.int32(utc_time - self.server_info.data.startup_time) < -self.max_time_dif / 2):
logging.info("tls_auth wrong time")
return self.decode_error_return(ogn_buf)
if sha1 != verifyid[22:]:
logging.info("tls_auth wrong sha1")
return self.decode_error_return(ogn_buf)
if self.server_info.data.client_data.get(verifyid[:22]):
logging.info("replay attack detect, id = %s" % (binascii.hexlify(verifyid)))
return self.decode_error_return(ogn_buf)
self.server_info.data.client_data.sweep()
self.server_info.data.client_data[verifyid[:22]] = sessionid
if len(self.recv_buffer) >= 11:
ret = self.server_decode(b'')
return (ret[0], True, True)
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
buf = buf[48:]
host_name = b''
for index in range(len(buf)):
if index + 4 < len(buf):
if buf[index:index + 4] == b"\x00\x17\x00\x00":
if buf[:index] != '':
host_name = buf[:index]
host_name = host_name.decode('utf-8')
return (b'', False, True, host_name)
|
{
"content_hash": "40f8e62aee28213168898a2c85958352",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 230,
"avg_line_length": 44.439189189189186,
"alnum_prop": 0.5552683594343926,
"repo_name": "kaneawk/shadowsocksr",
"id": "36f4af354552c5644c971ccaf7abc771420379b6",
"size": "13757",
"binary": false,
"copies": "2",
"ref": "refs/heads/mu",
"path": "shadowsocks/obfsplugin/obfs_tls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "216"
},
{
"name": "Dockerfile",
"bytes": "732"
},
{
"name": "Python",
"bytes": "438088"
},
{
"name": "Shell",
"bytes": "16758"
}
],
"symlink_target": ""
}
|
import requests
import json
import sys
sys.path.append('../conn/')
import conn
#vol_name = raw_input('Input volume name:')
url = conn.url + 'storage/scrub/'
auth = conn.auth
headers = conn.headers
payload = {
"scrub_volume": 2,
"scrub_dayweek": "7",
"scrub_minute": "00",
"scrub_hour": "00",
"scrub_month": "*",
"scrub_daymonth": "*"
}
def scrub_get():
r = requests.get(url, auth = auth)
result = json.loads(r.text)
i = 0
for i in range(0,len(result)):
for items in result[i]:
print items+':', result[i][items]
def scrub_post():
r = requests.post(url, auth = auth, data = json.dumps(payload), headers = headers)
result = json.loads(r.text)
for items in result:
print items+':', result[items]
def scrub_put():
id = raw_input('Input id:')+'/'
r = requests.put(url+id, auth = auth, data = json.dumps(payload), headers = headers)
result = json.loads(r.text)
for items in result:
print items+':', result[items]
def scrub_delete():
id = raw_input('Input id:')+'/'
r = requests.delete(url+id, auth = auth)
print r.status_code
while (1):
method = raw_input('Input method:')
if method == 'get':
scrub_get()
elif method == 'post':
scrub_post()
elif method == 'delete':
scrub_delete()
elif method == 'put':
scrub_put()
|
{
"content_hash": "6720e963342e7f483f3fcb2d8865a743",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 86,
"avg_line_length": 24.472727272727273,
"alnum_prop": 0.5973254086181278,
"repo_name": "PatriQ7/freenas-test-api",
"id": "70cce0c70d16449df8fa120f49b8dda6937aaf8b",
"size": "1371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storage/scrub.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "61858"
}
],
"symlink_target": ""
}
|
import unittest
import time
import os
import win32com.client
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
__author__ = 'Kan!skA'
# (C)2017,GIST,CDAC
class TTS(unittest.TestCase):
driver = None
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Firefox()
time.sleep(11)
cls.driver.maximize_window()
cls.driver.get("http://tdil-dc.in/tts1/")
time.sleep(5)
def test_TTS(self):
iFolder = ['COMP', 'DO', 'LANG_SPEC', 'MOS', 'MRT', 'OMOS', 'SUS']
iFile = ['pass', 'DO', 'Lanspec', 'MOS', 'MRT', 'OLDMOS', 'Sus']
for i, j in zip(iFolder, iFile):
inputFileName = 'Input/' + i + '/' + j
iPath = inputFileName + '.txt'
if os.path.exists(iPath):
reader = open(iPath, encoding='utf-8')
lineNumForFile = 1
for inputData in reader.read().split('\n'):
# Select Your Language---------------------------------------------------------------------->
Select(self.driver.find_element_by_xpath(".//*[@id='Language']")).select_by_visible_text("Hindi")
# Clear the Text Area
self.driver.find_element_by_xpath(".//*[@id='ip']").clear()
time.sleep(2)
# Enter Input in the Text Area
self.driver.find_element_by_xpath(".//*[@id='ip']").send_keys(str(inputData))
time.sleep(2)
# Click on Listen Button
self.driver.find_element_by_xpath(".//*[@id='AutoNumber1']/tbody/tr[4]/td[2]/input[2]").click()
time.sleep(35)
displayed = self.driver.find_elements_by_xpath(".//*[@id='t2vDownloadLink']").__len__()
if displayed > 0:
# Right-Click on Download Link
download = self.driver.find_element_by_xpath(".//*[@id='t2vDownloadLink']")
ActionChains(self.driver).move_to_element(download).context_click(download).key_down(
Keys.ARROW_DOWN).perform()
time.sleep(2)
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('k')
time.sleep(2)
# Path for Keeping Output
# Change the language name----------------------------->
shell.SendKeys(os.getcwd() + str('\Output\\' + i + '\\Hin_' + i + '_') + str(lineNumForFile))
lineNumForFile += 1
time.sleep(2)
shell.SendKeys('{ENTER}')
time.sleep(11)
else:
errorOutputFileName = inputFileName + '_error'
error_out_data_file = open(errorOutputFileName + '.txt', "w", encoding='utf-8')
error_out_data_file.write(str(inputData))
error_out_data_file.writelines('\n')
time.sleep(3)
error_out_data_file.close()
else:
print('No Input File exists for ' + i)
@classmethod
def tearDownClass(cls):
cls.driver.quit()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "50e5b717adc06379594d4f88fed6759f",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 117,
"avg_line_length": 42.95238095238095,
"alnum_prop": 0.47200665188470065,
"repo_name": "GruLab/Text-To-Speech",
"id": "cbfb2fc3170a0cb1dc14840aeb69d99acc671f10",
"size": "3608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TestScript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "237"
},
{
"name": "Python",
"bytes": "3873"
}
],
"symlink_target": ""
}
|
"""Decorator for API proxy views"""
import functools
import json
import logging
from requests.exceptions import ConnectTimeout, ReadTimeout
TIMEOUT_RESPONSE = json.dumps({"error": "Request timeout."})
log = logging.getLogger(__name__)
def on_timeout(response=TIMEOUT_RESPONSE):
"Decorator for view functions which proxy API calls"
def view(view_func):
"Wrapped view function"
@functools.wraps(view_func)
def wrapper(*args, **kwargs):
"Wraps the view function with API timeout handling"
try:
return view_func(*args, **kwargs)
except ConnectTimeout as conn_exception:
log.exception("Connection timeout for API: %s", conn_exception)
except ReadTimeout as read_exception:
log.exception("Read timeout for API: %s", read_exception)
return response
return wrapper
return view
|
{
"content_hash": "f01b53b0d75858b77f70f83612d7d7c8",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 26.65714285714286,
"alnum_prop": 0.6441586280814576,
"repo_name": "ministryofjustice/cla_public",
"id": "b3c605a78fea5d9e6fdfaf39d117418350053f88",
"size": "949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cla_public/libs/api_proxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1271"
},
{
"name": "HTML",
"bytes": "206100"
},
{
"name": "JavaScript",
"bytes": "110334"
},
{
"name": "Jinja",
"bytes": "12336"
},
{
"name": "Mustache",
"bytes": "2915"
},
{
"name": "Python",
"bytes": "351431"
},
{
"name": "SCSS",
"bytes": "35282"
},
{
"name": "Shell",
"bytes": "5150"
}
],
"symlink_target": ""
}
|
import atexit
import functools
import os
import re
import sys
import time
import urllib
import uuid
import fixtures
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from oslo_utils import importutils
import six
import testscenarios
import testtools
from tempest import clients
from tempest.common import credentials
from tempest.common import fixed_network
import tempest.common.generator.valid_generator as valid
import tempest.common.validation_resources as vresources
from tempest import config
from tempest import exceptions
LOG = logging.getLogger(__name__)
CONF = config.CONF
def attr(**kwargs):
"""A decorator which applies the testtools attr decorator
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
return f
return decorator
def idempotent_id(id):
"""Stub for metadata decorator"""
if not isinstance(id, six.string_types):
raise TypeError('Test idempotent_id must be string not %s'
'' % type(id).__name__)
uuid.UUID(id)
def decorator(f):
f = testtools.testcase.attr('id-%s' % id)(f)
if f.__doc__:
f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
else:
f.__doc__ = 'Test idempotent id: %s' % id
return f
return decorator
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'orchestration': CONF.service_available.heat,
# NOTE(mtreinish) nova-network will provide networking functionality
# if neutron isn't available, so always set to True.
'network': True,
'identity': True,
'object_storage': CONF.service_available.swift,
'dashboard': CONF.service_available.horizon,
'telemetry': CONF.service_available.ceilometer,
'data_processing': CONF.service_available.sahara,
'database': CONF.service_available.trove
}
return service_list
def services(*args):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
def decorator(f):
services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
'network', 'identity', 'object_storage', 'dashboard',
'telemetry', 'data_processing', 'database']
for service in args:
if service not in services:
raise exceptions.InvalidServiceTag('%s is not a valid '
'service' % service)
attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
return f(self, *func_args, **func_kwargs)
return wrapper
return decorator
def stresstest(**kwargs):
"""Add stress test decorator
For all functions with this decorator a attr stress will be
set automatically.
@param class_setup_per: allowed values are application, process, action
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
@param allow_inheritance: allows inheritance of this attribute
"""
def decorator(f):
if 'class_setup_per' in kwargs:
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
else:
setattr(f, "st_class_setup_per", 'process')
if 'allow_inheritance' in kwargs:
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
else:
setattr(f, "st_allow_inheritance", False)
attr(type='stress')(f)
return f
return decorator
def requires_ext(**kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
config_dict = {
'compute': CONF.compute_feature_enabled.api_extensions,
'volume': CONF.volume_feature_enabled.api_extensions,
'network': CONF.network_feature_enabled.api_extensions,
'object': CONF.object_storage_feature_enabled.discoverable_apis,
}
if len(config_dict[service]) == 0:
return False
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
at_exit_set = set()
def validate_tearDownClass():
if at_exit_set:
LOG.error(
"tearDownClass does not call the super's "
"tearDownClass in these classes: \n"
+ str(at_exit_set))
atexit.register(validate_tearDownClass)
class BaseTestCase(testtools.testcase.WithAttributes,
testtools.TestCase):
"""The test base class defines Tempest framework for class level fixtures.
`setUpClass` and `tearDownClass` are defined here and cannot be overwritten
by subclasses (enforced via hacking rule T105).
Set-up is split in a series of steps (setup stages), which can be
overwritten by test classes. Set-up stages are:
- skip_checks
- setup_credentials
- setup_clients
- resource_setup
Tear-down is also split in a series of steps (teardown stages), which are
stacked for execution only if the corresponding setup stage had been
reached during the setup phase. Tear-down stages are:
- clear_isolated_creds (defined in the base test class)
- resource_cleanup
"""
setUpClassCalled = False
_service = None
# NOTE(andreaf) credentials holds a list of the credentials to be allocated
# at class setup time. Credential types can be 'primary', 'alt', 'admin' or
# a list of roles - the first element of the list being a label, and the
# rest the actual roles
credentials = []
# Resources required to validate a server using ssh
validation_resources = {}
network_resources = {}
# NOTE(sdague): log_format is defined inline here instead of using the oslo
# default because going through the config path recouples config to the
# stress tests too early, and depending on testr order will fail unit tests
log_format = ('%(asctime)s %(process)d %(levelname)-8s '
'[%(name)s] %(message)s')
@classmethod
def setUpClass(cls):
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
# Stack of (name, callable) to be invoked in reverse order at teardown
cls.teardowns = []
# All the configuration checks that may generate a skip
cls.skip_checks()
try:
# Allocation of all required credentials and client managers
cls.teardowns.append(('credentials', cls.clear_isolated_creds))
cls.setup_credentials()
# Shortcuts to clients
cls.setup_clients()
# Additional class-wide test resources
cls.teardowns.append(('resources', cls.resource_cleanup))
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
etype, cls.__name__))
cls.tearDownClass()
try:
six.reraise(etype, value, trace)
finally:
del trace # to avoid circular refs
@classmethod
def tearDownClass(cls):
at_exit_set.discard(cls)
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
# Save any existing exception, we always want to re-raise the original
# exception only
etype, value, trace = sys.exc_info()
# If there was no exception during setup we shall re-raise the first
# exception in teardown
re_raise = (etype is None)
while cls.teardowns:
name, teardown = cls.teardowns.pop()
# Catch any exception in tearDown so we can re-raise the original
# exception at the end
try:
teardown()
except Exception as te:
sys_exec_info = sys.exc_info()
tetype = sys_exec_info[0]
# TODO(andreaf): Till we have the ability to cleanup only
# resources that were successfully setup in resource_cleanup,
# log AttributeError as info instead of exception.
if tetype is AttributeError and name == 'resources':
LOG.info("tearDownClass of %s failed: %s" % (name, te))
else:
LOG.exception("teardown of %s failed: %s" % (name, te))
if not etype:
etype, value, trace = sys_exec_info
# If exceptions were raised during teardown, an not before, re-raise
# the first one
if re_raise and etype is not None:
try:
six.reraise(etype, value, trace)
finally:
del trace # to avoid circular refs
@classmethod
def skip_checks(cls):
"""Class level skip checks. Subclasses verify in here all
conditions that might prevent the execution of the entire test class.
Checks implemented here may not make use API calls, and should rely on
configuration alone.
In general skip checks that require an API call are discouraged.
If one is really needed it may be implemented either in the
resource_setup or at test level.
"""
if 'admin' in cls.credentials and not credentials.is_admin_available():
msg = "Missing Identity Admin API credentials in configuration."
raise cls.skipException(msg)
if 'alt' in cls.credentials and not credentials.is_alt_available():
msg = "Missing a 2nd set of API credentials in configuration."
raise cls.skipException(msg)
if hasattr(cls, 'identity_version'):
if cls.identity_version == 'v2':
if not CONF.identity_feature_enabled.api_v2:
raise cls.skipException("Identity api v2 is not enabled")
elif cls.identity_version == 'v3':
if not CONF.identity_feature_enabled.api_v3:
raise cls.skipException("Identity api v3 is not enabled")
@classmethod
def setup_credentials(cls):
"""Allocate credentials and the client managers from them.
A test class that requires network resources must override
setup_credentials and defined the required resources before super
is invoked.
"""
for credentials_type in cls.credentials:
# This may raise an exception in case credentials are not available
# In that case we want to let the exception through and the test
# fail accordingly
if isinstance(credentials_type, six.string_types):
manager = cls.get_client_manager(
credential_type=credentials_type)
setattr(cls, 'os_%s' % credentials_type, manager)
# Setup some common aliases
# TODO(andreaf) The aliases below are a temporary hack
# to avoid changing too much code in one patch. They should
# be removed eventually
if credentials_type == 'primary':
cls.os = cls.manager = cls.os_primary
if credentials_type == 'admin':
cls.os_adm = cls.admin_manager = cls.os_admin
if credentials_type == 'alt':
cls.alt_manager = cls.os_alt
elif isinstance(credentials_type, list):
manager = cls.get_client_manager(roles=credentials_type[1:],
force_new=True)
setattr(cls, 'os_roles_%s' % credentials_type[0], manager)
@classmethod
def setup_clients(cls):
"""Create links to the clients into the test object."""
# TODO(andreaf) There is a fair amount of code that could me moved from
# base / test classes in here. Ideally tests should be able to only
# specify which client is `client` and nothing else.
pass
@classmethod
def resource_setup(cls):
"""Class level resource setup for test cases.
"""
if hasattr(cls, "os"):
cls.validation_resources = vresources.create_validation_resources(
cls.os, cls.validation_resources)
else:
LOG.warn("Client manager not found, validation resources not"
" created")
@classmethod
def resource_cleanup(cls):
"""Class level resource cleanup for test cases.
Resource cleanup must be able to handle the case of partially setup
resources, in case a failure during `resource_setup` should happen.
"""
if cls.validation_resources:
if hasattr(cls, "os"):
vresources.clear_validation_resources(cls.os,
cls.validation_resources)
cls.validation_resources = {}
else:
LOG.warn("Client manager not found, validation resources not"
" deleted")
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's"
"setUpClass in the "
+ self.__class__.__name__)
at_exit_set.add(self.__class__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
os.environ.get('OS_LOG_CAPTURE') != '0'):
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=self.log_format,
level=None))
@property
def credentials_provider(self):
return self._get_credentials_provider()
@classmethod
def _get_credentials_provider(cls):
"""Returns a credentials provider
If no credential provider exists yet creates one.
It uses self.identity_version if defined, or the configuration value
"""
if (not hasattr(cls, '_creds_provider') or not cls._creds_provider or
not cls._creds_provider.name == cls.__name__):
force_tenant_isolation = getattr(cls, 'force_tenant_isolation',
False)
identity_version = getattr(cls, 'identity_version', None)
identity_version = identity_version or CONF.identity.auth_version
cls._creds_provider = credentials.get_isolated_credentials(
name=cls.__name__, network_resources=cls.network_resources,
force_tenant_isolation=force_tenant_isolation,
identity_version=identity_version)
return cls._creds_provider
@classmethod
def get_client_manager(cls, credential_type=None, roles=None,
force_new=None):
"""Returns an OpenStack client manager
Returns an OpenStack client manager based on either credential_type
or a list of roles. If neither is specified, it defaults to
credential_type 'primary'
:param credential_type: string - primary, alt or admin
:param roles: list of roles
:returns the created client manager
:raises skipException: if the requested credentials are not available
"""
if all([roles, credential_type]):
msg = "Cannot get credentials by type and roles at the same time"
raise ValueError(msg)
if not any([roles, credential_type]):
credential_type = 'primary'
cred_provider = cls._get_credentials_provider()
if roles:
for role in roles:
if not cred_provider.is_role_available(role):
skip_msg = (
"%s skipped because the configured credential provider"
" is not able to provide credentials with the %s role "
"assigned." % (cls.__name__, role))
raise cls.skipException(skip_msg)
params = dict(roles=roles)
if force_new is not None:
params.update(force_new=force_new)
creds = cred_provider.get_creds_by_roles(**params)
else:
credentials_method = 'get_%s_creds' % credential_type
if hasattr(cred_provider, credentials_method):
creds = getattr(cred_provider, credentials_method)()
else:
raise exceptions.InvalidCredentials(
"Invalid credentials type %s" % credential_type)
return clients.Manager(credentials=creds, service=cls._service)
@classmethod
def clear_isolated_creds(cls):
"""
Clears isolated creds if set
"""
if hasattr(cls, '_creds_provider'):
cls._creds_provider.clear_isolated_creds()
@classmethod
def set_validation_resources(cls, keypair=None, floating_ip=None,
security_group=None,
security_group_rules=None):
"""Specify which ssh server validation resources should be created.
Each of the argument must be set to either None, True or False, with
None - use default from config (security groups and security group
rules get created when set to None)
False - Do not create the validation resource
True - create the validation resource
@param keypair
@param security_group
@param security_group_rules
@param floating_ip
"""
if not CONF.validation.run_validation:
return
if keypair is None:
if CONF.validation.auth_method.lower() == "keypair":
keypair = True
else:
keypair = False
if floating_ip is None:
if CONF.validation.connect_method.lower() == "floating":
floating_ip = True
else:
floating_ip = False
if security_group is None:
security_group = True
if security_group_rules is None:
security_group_rules = True
if not cls.validation_resources:
cls.validation_resources = {
'keypair': keypair,
'security_group': security_group,
'security_group_rules': security_group_rules,
'floating_ip': floating_ip}
@classmethod
def set_network_resources(cls, network=False, router=False, subnet=False,
dhcp=False):
"""Specify which network resources should be created
@param network
@param router
@param subnet
@param dhcp
"""
# network resources should be set only once from callers
# in order to ensure that even if it's called multiple times in
# a chain of overloaded methods, the attribute is set only
# in the leaf class
if not cls.network_resources:
cls.network_resources = {
'network': network,
'router': router,
'subnet': subnet,
'dhcp': dhcp}
@classmethod
def get_tenant_network(cls):
"""Get the network to be used in testing
:return: network dict including 'id' and 'name'
"""
# Make sure isolated_creds exists and get a network client
networks_client = cls.get_client_manager().networks_client
cred_provider = cls._get_credentials_provider()
# In case of nova network, isolated tenants are not able to list the
# network configured in fixed_network_name, even if the can use it
# for their servers, so using an admin network client to validate
# the network name
if (not CONF.service_available.neutron and
credentials.is_admin_available()):
admin_creds = cred_provider.get_admin_creds()
networks_client = clients.Manager(admin_creds).networks_client
return fixed_network.get_tenant_network(cred_provider,
networks_client)
def assertEmpty(self, list, msg=None):
self.assertTrue(len(list) == 0, msg)
def assertNotEmpty(self, list, msg=None):
self.assertTrue(len(list) > 0, msg)
class NegativeAutoTest(BaseTestCase):
_resources = {}
@classmethod
def setUpClass(cls):
super(NegativeAutoTest, cls).setUpClass()
os = cls.get_client_manager(credential_type='primary')
cls.client = os.negative_client
@staticmethod
def load_tests(*args):
"""
Wrapper for testscenarios to set the mandatory scenarios variable
only in case a real test loader is in place. Will be automatically
called in case the variable "load_tests" is set.
"""
if getattr(args[0], 'suiteClass', None) is not None:
loader, standard_tests, pattern = args
else:
standard_tests, module, loader = args
for test in testtools.iterate_tests(standard_tests):
schema = getattr(test, '_schema', None)
if schema is not None:
setattr(test, 'scenarios',
NegativeAutoTest.generate_scenario(schema))
return testscenarios.load_tests_apply_scenarios(*args)
@staticmethod
def generate_scenario(description):
"""
Generates the test scenario list for a given description.
:param description: A file or dictionary with the following entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
generator.validate_schema(description)
schema = description.get("json-schema", None)
resources = description.get("resources", [])
scenario_list = []
expected_result = None
for resource in resources:
if isinstance(resource, dict):
expected_result = resource['expected_result']
resource = resource['name']
LOG.debug("Add resource to test %s" % resource)
scn_name = "inv_res_%s" % (resource)
scenario_list.append((scn_name, {"resource": (resource,
str(uuid.uuid4())),
"expected_result": expected_result
}))
if schema is not None:
for scenario in generator.generate_scenarios(schema):
scenario_list.append((scenario['_negtest_name'],
scenario))
LOG.debug(scenario_list)
return scenario_list
def execute(self, description):
"""
Execute a http call on an api that are expected to
result in client errors. First it uses invalid resources that are part
of the url, and then invalid data for queries and http request bodies.
:param description: A json file or dictionary with the following
entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
schema = description.get("json-schema", None)
method = description["http-method"]
url = description["url"]
expected_result = None
if "default_result_code" in description:
expected_result = description["default_result_code"]
resources = [self.get_resource(r) for
r in description.get("resources", [])]
if hasattr(self, "resource"):
# Note(mkoderer): The resources list already contains an invalid
# entry (see get_resource).
# We just send a valid json-schema with it
valid_schema = None
if schema:
valid_schema = \
valid.ValidTestGenerator().generate_valid(schema)
new_url, body = self._http_arguments(valid_schema, url, method)
elif hasattr(self, "_negtest_name"):
schema_under_test = \
valid.ValidTestGenerator().generate_valid(schema)
local_expected_result = \
generator.generate_payload(self, schema_under_test)
if local_expected_result is not None:
expected_result = local_expected_result
new_url, body = \
self._http_arguments(schema_under_test, url, method)
else:
raise Exception("testscenarios are not active. Please make sure "
"that your test runner supports the load_tests "
"mechanism")
if "admin_client" in description and description["admin_client"]:
if not credentials.is_admin_available():
msg = ("Missing Identity Admin API credentials in"
"configuration.")
raise self.skipException(msg)
creds = self.credentials_provider.get_admin_creds()
os_adm = clients.Manager(credentials=creds)
client = os_adm.negative_client
else:
client = self.client
resp, resp_body = client.send_request(method, new_url,
resources, body=body)
self._check_negative_response(expected_result, resp.status, resp_body)
def _http_arguments(self, json_dict, url, method):
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
if not json_dict:
return url, None
elif method in ["GET", "HEAD", "PUT", "DELETE"]:
return "%s?%s" % (url, urllib.urlencode(json_dict)), None
else:
return url, json.dumps(json_dict)
def _check_negative_response(self, expected_result, result, body):
self.assertTrue(result >= 400 and result < 500 and result != 413,
"Expected client error, got %s:%s" %
(result, body))
self.assertTrue(expected_result is None or expected_result == result,
"Expected %s, got %s:%s" %
(expected_result, result, body))
@classmethod
def set_resource(cls, name, resource):
"""
This function can be used in setUpClass context to register a resoruce
for a test.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
:resource: The id of the resource
"""
cls._resources[name] = resource
def get_resource(self, name):
"""
Return a valid uuid for a type of resource. If a real resource is
needed as part of a url then this method should return one. Otherwise
it can return None.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
"""
if isinstance(name, dict):
name = name['name']
if hasattr(self, "resource") and self.resource[0] == name:
LOG.debug("Return invalid resource (%s) value: %s" %
(self.resource[0], self.resource[1]))
return self.resource[1]
if name in self._resources:
return self._resources[name]
return None
def SimpleNegativeAutoTest(klass):
"""
This decorator registers a test function on basis of the class name.
"""
@attr(type=['negative'])
def generic_test(self):
if hasattr(self, '_schema'):
self.execute(self._schema)
cn = klass.__name__
cn = cn.replace('JSON', '')
cn = cn.replace('Test', '')
# NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
func_name = 'test_%s' % lower_cn
setattr(klass, func_name, generic_test)
return klass
def call_until_true(func, duration, sleep_for):
"""
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func():
return True
time.sleep(sleep_for)
now = time.time()
return False
|
{
"content_hash": "18d7e07d26d42ca8d795c6f7600b8497",
"timestamp": "",
"source": "github",
"line_count": 812,
"max_line_length": 79,
"avg_line_length": 41.064039408866996,
"alnum_prop": 0.5909309021113244,
"repo_name": "flyingfish007/tempest",
"id": "df6b30d236a49f3fb4e65746a52439c798641d83",
"size": "33980",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tempest/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2740403"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
'''
Copyright (c) 2014 Tilman Ginzel
This code is licensed under MIT license (see LICENSE.txt for details).
Created on 20.04.2014
'''
from general.Color import Color
from general.HomVector import HomVector
from general.Camera import Camera
from general.Light import Light
from entities.Sphere import Sphere
from entities.materials.SphereTextureMaterial import SphereTextureMaterial
class SpaceSetting(object):
"""
This class is used to initialize import settings such as image size, background color, recursive depth etc.
Furthermore you can configure the camera and add several entities and light sources to the scene.
"""
def __init__(self, width=400, height=400, recursiveDepth=3, showMaterial=False):
self.setSize(width, height)
self.showMaterial = showMaterial
self.BACKGROUND_COLOR = Color(0, 0, 0)
self.recursiveDepth = recursiveDepth
self.entities = []
self.entities.append(Sphere(HomVector(-1.5, -0.5, 0, 1), 2, color=(0, 255, 0), material=SphereTextureMaterial(filename='earth_texture_4096.jpg'), reflects=False, ambientCoefficient=0.2, diffuseCoefficient=0.7, specularCoefficient=0.1)) # earth
self.entities.append(Sphere(HomVector(2, 2,-2.8, 1), 2*0.2727986, color=(0, 255, 0), material=SphereTextureMaterial(filename='moonmap.jpg'), reflects=False, ambientCoefficient=0.2, diffuseCoefficient=0.7, specularCoefficient=0.1)) # moon
self.lights = []
self.lights.append(Light(HomVector(30, 30, 10, 1), color=(255, 255, 255))) # white light
if not self.showMaterial:
for entity in self.entities:
entity.showMaterial = False
def setSize(self, width, height):
""" Sets the size of the image and (re)configures the camera. """
self.WIDTH = width
self.HEIGHT = height
self.PIXELCOUNT = self.WIDTH * self.HEIGHT
self.progressStep = self.PIXELCOUNT / 100
self.camera = Camera(HomVector(0, 0, 6, 1), HomVector(0, 0, 0, 1), HomVector(0, 1, 0, 0), 45, self.WIDTH, self.HEIGHT)
|
{
"content_hash": "2d790de23b02f7abb1d8da9c9cccbce7",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 251,
"avg_line_length": 48.97674418604651,
"alnum_prop": 0.6818613485280152,
"repo_name": "tilmanginzel/raytracer",
"id": "8be9a9c1b597caed17b19b44427be2d57477b12a",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings/SpaceSetting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37329"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.