text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import codecs
import functools
import os
import threading
import types
import typing
from importlib.abc import InspectLoader
from cauldron import environ
from cauldron import templating
from cauldron.cli import threads
from cauldron.render import stack as render_stack
from cauldron.session import projects
class UserAbortError(Exception):
"""
Error to raise when the user intentionally aborts a step by stopping it
programmatically. A custom exception is required because this type of
error should be handled differently by Cauldron. It should not result in
the display of an error.
"""
def __init__(self, halt: bool = False):
"""Create UserAbortError"""
self.halt = halt
def set_executing(on: bool):
"""
Toggle whether or not the current thread is executing a step file. This
will only apply when the current thread is a CauldronThread. This function
has no effect when run on a Main thread.
:param on:
Whether or not the thread should be annotated as executing a step file.
"""
my_thread = threading.current_thread()
if isinstance(my_thread, threads.CauldronThread):
my_thread.is_executing = on
def get_file_contents(source_path: str) -> str:
"""
Loads the contents of the source into a string for execution using multiple
loading methods to handle cross-platform encoding edge cases. If none of
the load methods work, a string is returned that contains an error function
response that will be displayed when the step is run alert the user to the
error.
:param source_path:
Path of the step file to load.
"""
open_funcs = [
functools.partial(codecs.open, source_path, encoding='utf-8'),
functools.partial(open, source_path, 'r')
]
for open_func in open_funcs:
try:
with open_func() as f:
return f.read()
except Exception:
pass
return (
'raise IOError("Unable to load step file at: {}")'
.format(source_path)
)
def load_step_file(source_path: str) -> str:
"""
Loads the source for a step file at the given path location and then
renders it in a template to add additional footer data.
The footer is used to force the display to flush the print buffer and
breathe the step to open things up for resolution. This shouldn't be
necessary, but it seems there's an async race condition with print
buffers that is hard to reproduce and so this is in place to fix the
problem.
"""
return templating.render_template(
template_name='embedded-step.py.txt',
source_contents=get_file_contents(source_path)
)
def create_module(
project: 'projects.Project',
step: 'projects.ProjectStep'
):
"""
Creates an artificial module that will encompass the code execution for
the specified step. The target module is populated with the standard dunder
attributes like __file__ to simulate the normal way that Python populates
values when loading a module.
:param project:
The currently open project.
:param step:
The step whose code will be run inside the target_module.
:return
The created and populated module for the given step.
"""
module_name = step.definition.name.rsplit('.', 1)[0]
target_module = types.ModuleType(module_name)
dunders = dict(
__file__=step.source_path,
__package__='.'.join(
[project.id.replace('.', '-')] +
step.filename.rsplit('.', 1)[0].split(os.sep)
)
)
for key, value in dunders.items():
setattr(target_module, key, value)
return target_module
def run(
project: 'projects.Project',
step: 'projects.ProjectStep',
) -> dict:
"""
Carries out the execution of the step python source file by loading it into
an artificially created module and then executing that module and returning
the result.
:param project:
The currently open project.
:param step:
The project step for which the run execution will take place.
:return:
A dictionary containing the results of the run execution, which
indicate whether or not the run was successful. If the run failed for
any reason, the dictionary will contain error information for display.
"""
target_module = create_module(project, step)
source_code = load_step_file(step.source_path)
try:
code = InspectLoader.source_to_code(source_code, step.source_path)
except SyntaxError as error:
return render_syntax_error(project, error)
def exec_test():
step.test_locals = dict()
step.test_locals.update(target_module.__dict__)
exec(code, step.test_locals)
try:
set_executing(True)
threads.abort_thread()
if environ.modes.has(environ.modes.TESTING):
exec_test()
else:
exec(code, target_module.__dict__)
out = {
'success': True,
'stop_condition': projects.StopCondition(False, False)
}
except threads.ThreadAbortError:
# Raised when a user explicitly aborts the running of the step through
# a user-interface action.
out = {
'success': False,
'stop_condition': projects.StopCondition(True, True)
}
except UserAbortError as error:
# Raised when a user explicitly aborts the running of the step using
# a cd.step.stop(). This behavior should be considered a successful
# outcome as it was intentional on the part of the user that the step
# abort running early.
out = {
'success': True,
'stop_condition': projects.StopCondition(True, error.halt)
}
except Exception as error:
out = render_error(project, error)
set_executing(False)
return out
def render_syntax_error(
project: 'projects.Project',
error: SyntaxError
) -> dict:
"""
Renders a SyntaxError, which has a shallow, custom stack trace derived
from the data included in the error, instead of the standard stack trace
pulled from the exception frames.
:param project:
Currently open project.
:param error:
The SyntaxError to be rendered to html and text for display.
:return:
A dictionary containing the error response with rendered display
messages for both text and html output.
"""
return render_error(
project=project,
error=error,
stack=[dict(
filename=getattr(error, 'filename'),
location=None,
line_number=error.lineno,
line=error.text.rstrip()
)]
)
def render_error(
project: 'projects.Project',
error: Exception,
stack: typing.List[dict] = None
) -> dict:
"""
Renders an Exception to an error response that includes rendered text and
html error messages for display.
:param project:
Currently open project.
:param error:
The SyntaxError to be rendered to html and text for display.
:param stack:
Optionally specify a parsed stack. If this value is None the standard
Cauldron stack frames will be rendered.
:return:
A dictionary containing the error response with rendered display
messages for both text and html output.
"""
data = dict(
type=error.__class__.__name__,
message='{}'.format(error),
stack=(
stack
if stack is not None else
render_stack.get_formatted_stack_frame(project)
)
)
return dict(
success=False,
error=error,
message=templating.render_template('user-code-error.txt', **data),
html_message=templating.render_template('user-code-error.html', **data)
)
|
{
"content_hash": "b28f4013c14beb7a3cdcc6508fa6709b",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 79,
"avg_line_length": 30.603846153846153,
"alnum_prop": 0.6444639939675757,
"repo_name": "sernst/cauldron",
"id": "6736f973ea50d6f0f94ab37f4af22e18415b0900",
"size": "7957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cauldron/runner/python_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "36"
},
{
"name": "CSS",
"bytes": "1369"
},
{
"name": "Dockerfile",
"bytes": "842"
},
{
"name": "HTML",
"bytes": "21740"
},
{
"name": "JavaScript",
"bytes": "48753"
},
{
"name": "Python",
"bytes": "913057"
},
{
"name": "SCSS",
"bytes": "17130"
},
{
"name": "Shell",
"bytes": "300"
},
{
"name": "Vue",
"bytes": "95790"
}
],
"symlink_target": ""
}
|
import datetime as dt
import re
from flask import flash
from flask_wtf import Form
from wtforms import TextField
from wtforms.fields import(
SelectMultipleField,
SelectField,
IntegerField,
DateField,
TextAreaField,
)
from wtforms.validators import InputRequired, Length, Optional, ValidationError
from wtforms.fields.core import BooleanField, StringField
from wtforms.fields.simple import SubmitField
from isbnlib import meta, editions
from isbnlib.dev._exceptions import ISBNNotConsistentError
from roodkamer.library.models import Book, Author, Publisher
from roodkamer.validators import ValidateISBN
from roodkamer.database import db
class AuthorCollisionException(Exception):
def __init__(self, author):
self.author = ' '.join(author)
self.message = "Author '{0}' found in database.".format(self.author)
class ISBNField(StringField):
mdata = None
def pre_validate(self, form):
if hasattr(form.loan_submit, "data") and form.loan_submit.data:
return True
else:
try:
if self.get_metadata_from_ISBN():
return True
else:
return False
except NoDataForSelectorError:
msg = "Couldn't find the ISBN in the services we use. Sorry!"
raise ValidationError(msg)
def get_metadata_from_ISBN(self):
# TODO: Maybe find earliest edition and fill in
# original_publication_date...but probably not...
try:
self.mdata = meta(self.data, service='goob')
except ISBNNotConsistentError as e:
# Something wrong with google service. Fall back!
self.mdata = meta(self.data)
isbnre = re.compile("'identifier': u'(?P<isbn13>[0-9]{13})'")
srch = isbnre.search(e.message) if hasattr(e, "message") else None
if self.mdata:
return True
else:
if srch:
msg = "This returned an ISBN consistency error, try: '{num}'"
msg.format(num=srch.groupdict().get('isbn13'))
raise ValidationError(msg)
return False
class LoanForm(Form):
book_title = StringField(
'Title',
validators=[InputRequired(), Length(min=3, max=128)]
)
authors = SelectMultipleField('Author(s)', validators=[InputRequired()])
pages = IntegerField('Pages')
book_type = SelectField('Book Type')
publication_date = DateField('Publication Date', format="%Y-%m-%d",
validators=(Optional(),))
original_publication_date = DateField('Original Publication Date',
format="%Y-%m-%d",
validators=(Optional(),))
published_by = StringField('Publisher', validators=(Optional(),))
isbn = ISBNField('ISBN',
validators=[InputRequired(), ValidateISBN()])
term_of_loan = DateField("Term of Loan", format="%Y-%m-%d",
validators=(Optional(),))
condition_of_loan = TextAreaField("Conditions of Loan",
validators=(Optional(),))
loan_submit = SubmitField("Submit")
fill_ISBN = SubmitField("Fill in metadata by ISBN")
def __init__(self, *args, **kwargs):
super(LoanForm, self).__init__(*args, **kwargs)
self.author_collisions = []
self.authors_new = []
def validate(self):
if self.fill_ISBN.data:
try:
if self.isbn.pre_validate(LoanForm):
self.set_metadata_from_ISBN()
return True
except ValidationError as ve:
self.errors.update({self.isbn.label.field_id: [ve.message]})
return False
else:
if not self.prep_validation_is_success():
return False
if super(LoanForm, self).validate():
opd = self.original_publication_date.data
pd = self.publication_date.data
if opd and pd and opd > pd:
msg = "No editions should precede the initial version."
self.original_publication_date.errors.append(msg)
return False
else:
return True
else:
return False
def set_metadata_from_ISBN(self):
# TODO: Maybe find earliest edition and fill in
# original_publication_date...but probably not...
mdata = self.isbn.mdata
self.authors.data = mdata.get('Authors')
if not self.authors.choices:
#self.authors.data = mdata.get('Authors')
self.authors.choices = [(a,a) for a in self.authors.data]
else:
#self.authors.data.extend(mdata.get('Authors'))
self.authors.choices.extend([(a,a) for a in self.authors.data])
self.isbn.data = mdata.get('ISBN-13')
self.published_by.data = mdata.get('Publisher')
self.book_title.data = mdata.get('Title')
try:
self.publication_date.data = dt.datetime.strptime(
mdata.get('Year'),
"%Y-%m-%d"
)
except ValueError:
self.publication_date.data = dt.datetime.strptime(
mdata.get('Year') + "-01-01", # Year, default to YYYY-01-01
"%Y-%m-%d"
)
self.publication_date.raw_data = None
if mdata.get('Pages'):
self.pages.data = int(mdata.get('Pages'))
self.pages.raw_data = None
def prep_validation_is_success(self):
if self.published_by.data != None:
pub = Publisher.query.filter_by(id=self.published_by.data).first()
if not pub:
new_pub = Publisher(name=self.published_by.data)
self.publisher = new_pub
self.publisher_added = True
else:
self.publisher = pub
self.publisher_added = False
author_list = Author.query.order_by('last_name').all()
if self.authors.data:
for author in self.authors.data:
try:
self.authors_new.append(
self._prep_authors(author, author_list)
)
except AuthorCollisionException as ace:
self.author_collisions.append(ace.author)
if self.authors_new and not self.author_collisions:
self.authors.data = []
# TODO: It may be easier to replace maxid with any integer
# As long as the new authors have different numbers, even
# if those numbers already used as id's. This is because
# currently the self.authors.data isn't actually used for
# anything. This is really a hack to get around the validation
maxid = max([a[0] for a in self.authors.choices])
# Add all the new authors
for i, a in enumerate(self.authors_new, start=1):
self.authors.choices.extend([
(maxid+i, a.full_name())
])
self.authors.data.append(maxid+i)
return True
return False
def _prep_authors(self, new_author, author_list):
"""Checks author name against database to find 'collisions'
Function checks to see if the author's name is already in the database.
The method also prepares the author's name to be included in the
database.
Args:
self (`~roodkamer.library.forms.LoanForm`): This is the loan book
view.
new_author(unicode): This is the author's full name.
author_list(list[`~roodkamer.library.models.Author`]): This is the
list of all Author models presently in the database.
Returns:
`~roodkamer.library.models.Author`: A new Author model to be added
to the database.
Raises:
AuthorCollisionException: If an author with the same first and last
name is found, this exception is raised. This can be caught to
allow the user to indicate if this is the same author as is
currently in the database, or one with the same name.
"""
new_author = new_author.split(' ')
for author in author_list:
if author.first_name == new_author[0] and\
author.last_name == new_author[-1]:
raise AuthorCollisionException(new_author)
return Author(first_name=new_author[0], last_name=new_author[-1])
|
{
"content_hash": "791d25319091a7a0d94e59964016fa40",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 79,
"avg_line_length": 40.6651376146789,
"alnum_prop": 0.5607445008460237,
"repo_name": "brotherjack/Rood-Kamer",
"id": "187f77392bd776a1b8751ef24cc36da4eef3ab11",
"size": "8889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roodkamer/library/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "39014"
},
{
"name": "HTML",
"bytes": "193070"
},
{
"name": "JavaScript",
"bytes": "364822"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "90352"
}
],
"symlink_target": ""
}
|
import unittest
from tests.server import TestServer
from vscphelper.vscplib import vscp, vscpEvent
from vscphelper.exception import *
from vscphelper import VSCPConstant as constant
from time import sleep
class vscplibMalfunctionTests(unittest.TestCase):
def test_seedNotSent(self):
a = TestServer(port=8080)
with self.assertRaises(VSCPNoCommException):
vscp()
a.shutdown()
def test_noCommunicationInit(self, ):
with self.assertRaises(VSCPNoCommException):
vscp()
class vscplibFunctionalTests(unittest.TestCase):
def setUp(self, ):
self.server = TestServer(port=8080,
welcomeMessage = "+;AUTH0;d002c278b35c152eed9ee2f475a561f1|+;AUTH1")
self.client = vscp(user='admin', password='secret', domain='mydomain.com')
def __receiveMessage(self):
self.receivedMessage = True
def test_checkAuthenticated(self, ):
self.assertTrue(self.client.authenticated)
def test_checkSeedAndKey(self, ):
self.assertEqual(self.client.ws.seed, "d002c278b35c152eed9ee2f475a561f1")
self.assertEqual(self.client.calculateKey('admin', 'secret', 'mydomain.com'),
'1aaabe6d6af390f9729618ad3af4782f')
def test_setResponseTimeout(self):
self.client.setResponseTimeOut(1)
self.assertEqual(self.client.ws.timeout, 1)
with self.assertRaises(ValueError):
self.client.setResponseTimeOut(0)
def test_setHandler(self):
self.assertIsNone(self.client.handler)
with self.assertRaises(ValueError):
self.client.setHandler("Malformed")
self.client.setHandler(self.__receiveMessage)
event = "E;0,9,1,1,523627200,FF:FF:FF:FF:FF:FF:FF:FE:00:26:55:CA:00:06:00:00,0,1,2,3"
self.client.ws.send(event, False)
sleep(0.1)
self.assertTrue(self.receivedMessage)
def test_doCommand(self, ):
self.client.ws.send("^+;NOOP", False)
self.assertEqual(self.client.isConnected(),
constant.VSCP_ERROR_SUCCESS)
self.assertEqual(self.client.doCommand(),
constant.VSCP_ERROR_SUCCESS)
self.client.ws.send("^-;2;Unkown command",False)
self.assertEqual(self.client.doCommand(),
constant.VSCP_ERROR_ERROR)
self.client.ws.connected = False
self.assertEqual(self.client.doCommand(),
constant.VSCP_ERROR_CONNECTION)
self.client.ws.connected = True
def test_sendEvent(self, ):
event = vscpEvent(vscp_class = 2,
vscp_type = 0,
vscp_data = [1])
with self.assertRaises(ValueError):
self.client.sendEvent("Malformed Arg")
self.client.ws.connected = False
self.assertEqual(self.client.doCommand(),
constant.VSCP_ERROR_CONNECTION)
self.client.ws.connected = True
self.client.ws.send("^+;EVENT", False)
self.assertEqual(self.client.isConnected(),
constant.VSCP_ERROR_SUCCESS)
self.assertEqual(self.client.sendEvent(event),
constant.VSCP_ERROR_SUCCESS)
self.client.ws.send("^-;2;Unkown command",False)
self.assertEqual(self.client.sendEvent(event),
constant.VSCP_ERROR_ERROR)
def test_ReceiveLoop(self, ):
self.assertFalse(self.client.eventStreaming)
self.client.ws.send("^+;OPEN|+;CLOSE", False)
self.client.enterReceiveLoop()
self.assertTrue(self.client.eventStreaming)
self.client.quitReceiveLoop()
self.assertFalse(self.client.eventStreaming)
def test_receiveData(self, ):
GUID = "FF:FF:FF:FF:FF:FF:FF:FE:00:26:55:CA:00:06:00:00"
event = "E;0,9,1,2,523627200,"+GUID+",0,1,2,3"
self.assertFalse(self.client.isDataAvailable())
self.assertEqual(self.client.receiveEvent(), None)
self.client.ws.send(event, False)
sleep(0.05)
self.assertTrue(self.client.isDataAvailable())
self.assertIsInstance(self.client.receiveEvent(), vscpEvent)
def test_blockingReceiveData(self, ):
event = "E;0,9,1,1,523627200,FF:FF:FF:FF:FF:FF:FF:FE:00:26:55:CA:00:06:00:00,0,1,2,3"
with self.assertRaises(VSCPException):
self.client.blockingReceiveEvent()
self.client.eventStreaming = True
self.client.authenticated = False
self.assertFalse(self.client.isDataAvailable())
with self.assertRaises(VSCPException):
self.client.blockingReceiveEvent()
self.client.authenticated = True
self.client.ws.connected = True
self.client.ws.send(event, False)
self.assertIsInstance(self.client.blockingReceiveEvent(),
vscpEvent)
def tearDown(self, ):
self.client.ws.close()
self.server.shutdown()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "f21419ab04957d9f90647679a46ed85a",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 101,
"avg_line_length": 40.28346456692913,
"alnum_prop": 0.6182564503518374,
"repo_name": "nos86/VSCP-Helper-for-Python",
"id": "774a16e3bd8ae305232c7754e0fac8aa59bd326f",
"size": "5116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_vscplib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107418"
},
{
"name": "Shell",
"bytes": "84"
}
],
"symlink_target": ""
}
|
import wgAPI
import telebot
import config
from loggerFunc import log
def polling():
bot = telebot.TeleBot(config.telegramToken)
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.send_message(message.chat.id, "Здарова, танкист! Давай, пиши ник своего товарища...")
log("Новый пользователь - " + str(message.chat.id))
@bot.message_handler(func=lambda message: True, content_types=['text'])
def echo_msg(message):
log("Пользователь - " + str(message.chat.id) + " ищет " + message.text)
bot.send_message(message.chat.id, "Поиск досье...")
dossier = wgAPI.getUserDossier(message.text)
if len(dossier) == 0:
bot.send_message(message.chat.id, "Досье не найдено. Увы!")
else:
bot.send_message(message.chat.id, dossier)
global isPolling
while 1:
try:
log('Стартуем бота')
isPolling = 'ok'
bot.polling(none_stop=True, interval=1)
except:
log("Exception (polling)")
|
{
"content_hash": "2413b5ec86d6bc331d34eaed2bee68b5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 97,
"avg_line_length": 33.15625,
"alnum_prop": 0.6173421300659755,
"repo_name": "rvgorlov/WotStatomerTelegramBot",
"id": "b8740020abb19bd0ffd5a75ff5ff9ab91c604ca4",
"size": "1173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "botLogic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8597"
}
],
"symlink_target": ""
}
|
from .client import RegionNotificationEndpointsClient
__all__ = ("RegionNotificationEndpointsClient",)
|
{
"content_hash": "41b8a17f5049463cb14ed6d775053d4f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 53,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.8173076923076923,
"repo_name": "googleapis/python-compute",
"id": "749cc698c25fb94c0c39e220432ca89003ed45de",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/compute_v1/services/region_notification_endpoints/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
}
|
import datetime
import hashlib
from cli_tools.flakiness_cli import api
from cli_tools.flakiness_cli import frames
def GetBuilders():
"""Get the builders data frame and keep a cached copy."""
def make_frame():
data = api.GetBuilders()
return frames.BuildersDataFrame(data)
return frames.GetWithCache(
'builders.pkl', make_frame, expires_after=datetime.timedelta(hours=12))
def GetTestResults(master, builder, test_type):
"""Get a test results data frame and keep a cached copy."""
def make_frame():
data = api.GetTestResults(master, builder, test_type)
return frames.TestResultsDataFrame(data)
basename = hashlib.md5('/'.join([master, builder, test_type])).hexdigest()
return frames.GetWithCache(
basename + '.pkl', make_frame, expires_after=datetime.timedelta(hours=3))
|
{
"content_hash": "3c9f69d516ec80adcc81e241fd6b87f2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 31.5,
"alnum_prop": 0.7264957264957265,
"repo_name": "chromium/chromium",
"id": "2d4d48a8534b03dd7656f2a8644c9ffce1d56832",
"size": "960",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "tools/perf/cli_tools/flakiness_cli/cached_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
HypermediaRequest is the base class for the Common CRUD CCML client, a dictionary driven interface
to http and CoAP requesters and response handlers to be used by clients and server request processors
Requests and their associated responses are exposed in a dictionary interface for processing by proxies and resources
in a linked structure by passing references to the request and the associated response to resources at selected link
targets.
The server invokes a request handler callback when requests are received, and passes a reference to a structure
containing request and response elements used in processing the hypermedia.
REQUEST
URI
content-format
method
payload
RESPONSE
status (code and reason)
content-format
payload
Hypermedia handlers will process the URI and query parameters first in order to select a set of resources for processing,
then apply the content format and method along with any supplementary options to the resources.
The terms used in the request and response elements are semantically aligned with the vocabularies used in links and
forms to describe hypermedia state exchanges, and are abstracted across different protocols like http and CoAP.
Common abstractions are needed to support multiple protocols. Initially there are HTTP and CoAP bindings to a
common set of terms.
URI and Query parameters are the same in http and CoAP
methods use the following mappings
GET
PUT
POST
PATCH
DELETE
Mappings to CoAP content-format identifiers:
22001 application/collection+senml+json
22002 application/senml+json
22003 application/link-format+json
22004 application/forms+link-format+json
responseTypes are a common subset of http and CoAP response codes
Success 200, 202, 204 2.02, 2.03, 2.04, 2.05
Created 201 2.01
BadRequest 400 4.00, 4.02
Unauthorized 401 4.01
Forbidden 403 4.03
NotFound 404 4.04
MethodNotAllowed 405 4.05
NotAcceptable 406 4.06
Conflict 409 4.09
PrecondFailed 412 4.12
UnsupportedType 415 4.15
ServerError 500 5.00
JSON keys to the request-response interface are described in the example below.
This is the common CRUD interface between HTTP and CoAP, and can be used as a
generic REST proxy.
{
"uriPath": ["/","a", "b"],
"uriQuery": {"rt": "test", "obs": "", "if": "core.b"}
"contentFormat": "application/link-format+json",
"options": {}
"method": "GET",
"payload": null,
"response": {
"status": "Success",
"code": "204",
"reason": "No Content",
"contentFormat": "application/link-format+json",
"payload": "[{"href":"","rel":"self","rt":"index"}]"
}
}
Client fills request form and sends to server using selected protocol
Server processes request and fills in response and transmits back to client
Client processes the response and updates application state
"""
__version__ = "0.1"
from urllib2 import urlparse
import MachineHypermediaToolkit.terms as v
class HypermediaRequest():
def __init__(self, url=None, requestMap={}):
self._requestMap = self._initRequestMap()
for item in requestMap:
self._requestMap[item] = requestMap[item]
if url:
self._u = urlparse.urlparse(url)
self._host = self._u.hostname
self._port = self._u.port
for self.pathElement in self._u.path.split("/") :
if 0 < len(self.pathElement) :
self._requestMap[v.uriPath].append(self.pathElement)
for self.queryElement in self._u.query.split("&") :
if 0 < self.queryElement.find("=") :
(self.k, self.v) = self.queryElement.split("=")
self._requestMap[v.uriQuery][self.k] = self.v
elif 0 < len(self.queryElement) :
self._requestMap[v.uriQuery][self.queryElement] = True
def _initRequestMap(self):
requestMap = \
{v.uriPath:["/"], v.uriQuery: {}, v.options: {}, v.contentFormat: v._null, v.method: v._null, v.payload: v._null}
requestMap[v.response] = \
{v.status:v._null, v.code:v._null, v.reason:v._null, v.contentFormat:v._null, v.payload:v._null}
return requestMap
def send(self, responseHandler=None):
pass
def getResponse(self):
pass
|
{
"content_hash": "ac302e32bd9d708de54e07e896c44223",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 122,
"avg_line_length": 34.84615384615385,
"alnum_prop": 0.6549668874172185,
"repo_name": "connectIOT/MachineHypermediaToolkit",
"id": "31a920cc3bc47e94506430b5953277ddba2a7c7f",
"size": "4530",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "MachineHypermediaToolkit/resource/HypermediaRequest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "115885"
}
],
"symlink_target": ""
}
|
import numpy as np
import argparse
import time
import cv2
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
ap.add_argument("-o", "--output",help="path to output image")
ap.add_argument("-y", "--yolo", default="/opt/mask-detector/cfg", help="base path to YOLO cfg directory")
ap.add_argument("-c", "--confidence", type=float, default=0.2, help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.1, help="threshold when applying non-max suppression")
args = vars(ap.parse_args())
# load the class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "obj.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label (red and green)
COLORS = [[0,255,0], [0,0,255]]
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "yolov3-tiny_obj_train_tiny8.weights"])
configPath = os.path.sep.join([args["yolo"], "yolov3-tiny_obj_train.cfg"])
# load our YOLO object detector
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# load our input image and get it height and width
image = cv2.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln) #list of 3 arrays, for each output layer.
end = time.time()
# show timing information on YOLO
print("[INFO] YOLO took {:.6f} seconds".format(end - start))
# initialize our lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:] #last 2 values in vector
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply NMS to suppress weak, overlapping bounding
# boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],args["threshold"])
border_size=100
border_text_color=[255,255,255]
#Add top-border to image to display stats
image = cv2.copyMakeBorder(image, border_size,0,0,0, cv2.BORDER_CONSTANT)
#calculate count values
filtered_classids=np.take(classIDs,idxs)
mask_count=(filtered_classids==0).sum()
nomask_count=(filtered_classids==1).sum()
#display count
text = "NoMaskCount: {} MaskCount: {}".format(nomask_count, mask_count)
cv2.putText(image,text, (0, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,border_text_color, 2)
#display status
text = "Status:"
cv2.putText(image,text, (W-300, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,border_text_color, 2)
ratio=nomask_count/(mask_count+nomask_count)
if ratio>=0.1 and nomask_count>=3:
text = "Danger !"
cv2.putText(image,text, (W-200, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,[26,13,247], 2)
elif ratio!=0 and np.isnan(ratio)!=True:
text = "Warning !"
cv2.putText(image,text, (W-200, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,[0,255,255], 2)
else:
text = "Safe "
cv2.putText(image,text, (W-200, int(border_size-50)), cv2.FONT_HERSHEY_SIMPLEX,0.8,[0,255,0], 2)
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1]+border_size)
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 1)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv2.putText(image, text, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 1)
if args["output"]:
#save the image
cv2.imwrite(args["output"],image)
else:
# show the output image
cv2.imshow("Image",image)
cv2.waitKey(0)
|
{
"content_hash": "28849e23910bdebe87d7ff1bfb0ca11e",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 116,
"avg_line_length": 38.15714285714286,
"alnum_prop": 0.7077873455634593,
"repo_name": "grycap/scar",
"id": "6867c84896ab362bbdd1e2f2ef6ea1a16157b9b2",
"size": "5370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/mask-detector-workflow/mask-detector/mask-detector-image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "601"
},
{
"name": "Python",
"bytes": "324748"
}
],
"symlink_target": ""
}
|
"""Stores various configuration options and constants for Oppia."""
import copy
import os
# Whether to unconditionally log info messages.
DEBUG = False
# The platform for the storage backend. This is used in the model-switching
# code in core/platform.
PLATFORM = 'gae'
# Whether we should serve the development or production experience.
if PLATFORM == 'gae':
DEV_MODE = (
not os.environ.get('SERVER_SOFTWARE')
or os.environ['SERVER_SOFTWARE'].startswith('Development'))
else:
raise Exception('Invalid platform: expected one of [\'gae\']')
TESTS_DATA_DIR = os.path.join('core', 'tests', 'data')
SAMPLE_EXPLORATIONS_DIR = os.path.join('data', 'explorations')
INTERACTIONS_DIR = os.path.join('extensions', 'interactions')
GADGETS_DIR = os.path.join('extensions', 'gadgets')
RTE_EXTENSIONS_DIR = os.path.join('extensions', 'rich_text_components')
RULES_DIR = os.path.join('extensions', 'rules')
OBJECT_TEMPLATES_DIR = os.path.join('extensions', 'objects', 'templates')
OBJECTS_DIR = os.path.join('extensions', 'objects')
SKINS_TEMPLATES_DIR = os.path.join('extensions', 'skins')
TEMPLATES_DIR_PREFIX = 'dev' if DEV_MODE else 'prod'
FRONTEND_TEMPLATES_DIR = os.path.join(
'core', 'templates', TEMPLATES_DIR_PREFIX, 'head')
DEPENDENCIES_TEMPLATES_DIR = os.path.join('extensions', 'dependencies')
VALUE_GENERATORS_DIR = os.path.join('extensions', 'value_generators')
# The maximum number of results to retrieve in a datastore query.
DEFAULT_QUERY_LIMIT = 1000
# The current version of the exploration states blob schema. If any backward-
# incompatible changes are made to the states blob schema in the data store,
# this version number must be changed and the exploration migration job
# executed.
CURRENT_EXPLORATION_STATES_SCHEMA_VERSION = 3
# The default number of exploration tiles to load at a time in the gallery
# page.
GALLERY_PAGE_SIZE = 10
# The default number of commits to show on a page in the exploration history
# tab.
COMMIT_LIST_PAGE_SIZE = 50
# The default number of items to show on a page in the exploration feedback
# tab.
FEEDBACK_TAB_PAGE_SIZE = 20
# Default name for the initial state of an exploration.
DEFAULT_INIT_STATE_NAME = 'First State'
# The default content text for the initial state of an exploration.
DEFAULT_INIT_STATE_CONTENT_STR = ''
# Name (and description) of the default rule.
DEFAULT_RULE_NAME = 'Default'
# Default valid parameter for instantiating Explorations when explicit
# skin customizations aren't provided.
DEFAULT_SKIN_CUSTOMIZATIONS = {'panels_contents': {}}
# A dict containing the accepted image formats (as determined by the imghdr
# module) and the corresponding allowed extensions in the filenames of uploaded
# files.
ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS = {
'jpeg': ['jpg', 'jpeg'],
'png': ['png'],
'gif': ['gif']
}
# Static file url to path mapping
PATH_MAP = {
'/css': os.path.join('core', 'templates', 'dev', 'head', 'css'),
'/extensions/gadgets': GADGETS_DIR,
'/extensions/interactions': INTERACTIONS_DIR,
'/extensions/rich_text_components': RTE_EXTENSIONS_DIR,
'/favicon.ico': os.path.join('static', 'images', 'favicon.ico'),
'/images': os.path.join('static', 'images'),
'/lib/static': os.path.join('lib', 'static'),
'/third_party/static': os.path.join('third_party', 'static'),
}
# Format string for displaying datetimes in the UI.
HUMAN_READABLE_DATETIME_FORMAT = '%b %d %Y, %H:%M UTC'
# A string containing the disallowed characters in state or exploration names.
# The underscore is needed because spaces in names must be converted to
# underscores when displayed as part of a URL or key. The other conventions
# here are derived from the Wikipedia guidelines for naming articles.
INVALID_NAME_CHARS = u':#/|_%<>[]{}\ufffd\\' + chr(127)
for ind in range(32):
INVALID_NAME_CHARS += chr(ind)
# Prefix for data sent from the server to the client via JSON.
XSSI_PREFIX = ')]}\'\n'
# A regular expression for alphanumeric characters.
ALPHANUMERIC_REGEX = r'^[A-Za-z0-9]+$'
# A regular expression for tags.
TAG_REGEX = r'^[a-z ]+$'
# Invalid names for parameters used in expressions.
AUTOMATICALLY_SET_PARAMETER_NAMES = ['answer', 'choices']
INVALID_PARAMETER_NAMES = AUTOMATICALLY_SET_PARAMETER_NAMES + [
'abs', 'all', 'and', 'any', 'else', 'floor', 'if', 'log', 'or',
'pow', 'round', 'then']
# These are here rather than in rating_services.py to avoid import
# circularities with exp_services.
# TODO (Jacob) Refactor exp_services to remove this problem.
_EMPTY_RATINGS = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0}
def get_empty_ratings():
return copy.deepcopy(_EMPTY_RATINGS)
# Committer id for system actions.
ADMIN_COMMITTER_ID = 'admin'
ADMIN_EMAIL_ADDRESS = 'testadmin@example.com'
# Ensure that ADMIN_EMAIL_ADDRESS is valid and corresponds to an owner of the
# app before setting this to True. If ADMIN_EMAIL_ADDRESS is not that of an
# app owner, email messages from this user cannot be sent.
CAN_SEND_EMAILS_TO_ADMIN = False
# The maximum size of an uploaded file, in bytes.
MAX_FILE_SIZE_BYTES = 1048576
# The default language code for an exploration.
DEFAULT_LANGUAGE_CODE = 'en'
# Whether to include a page with the Oppia discussion forum.
SHOW_FORUM_PAGE = True
# User id and username for exploration migration bot.
MIGRATION_BOT_USER_ID = 'OppiaMigrationBot'
MIGRATION_BOT_USERNAME = 'OppiaMigrationBot'
# Ids and locations of the permitted extensions.
ALLOWED_RTE_EXTENSIONS = {
'Collapsible': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Collapsible')
},
'Image': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Image')
},
'Link': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Link')
},
'Math': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Math')
},
'Tabs': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Tabs')
},
'Video': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Video')
},
}
# These categories and interactions are displayed in the order in which they
# appear in the interaction selector.
ALLOWED_INTERACTION_CATEGORIES = [{
'name': 'General',
'interaction_ids': [
'Continue',
'EndExploration',
'ImageClickInput',
'MultipleChoiceInput',
'TextInput'
],
}, {
'name': 'Math',
'interaction_ids': [
'GraphInput',
'LogicProof',
'NumericInput',
'SetInput',
]
}, {
'name': 'Programming',
'interaction_ids': ['CodeRepl'],
}, {
'name': 'Music',
'interaction_ids': [
'MusicNotesInput'
],
}, {
'name': 'Geography',
'interaction_ids': [
'InteractiveMap'
],
}]
ALLOWED_GADGETS = {
'AdviceBar': {
'dir': os.path.join(GADGETS_DIR, 'AdviceBar')
},
'ScoreBar': {
'dir': os.path.join(GADGETS_DIR, 'ScoreBar')
},
}
# Demo explorations to load on startup. The id assigned to each exploration
# is based on the index of the exploration in this list, so if you want to
# add a new exploration and preserve the existing ids, add that exploration
# to the end of the list.
# Each item is represented as a tuple: (filepath, title, category). If the
# filepath is a yaml file it should end with '.yaml', otherwise it should
# be the path to the directory WITHOUT a trailing '/'.
DEMO_EXPLORATIONS = [
('welcome.yaml', 'Welcome to Oppia!', 'Welcome'),
('multiples.yaml', 'Project Euler Problem 1', 'Coding'),
('binary_search', 'The Lazy Magician', 'Mathematics'),
('root_linear_coefficient_theorem.yaml', 'Root Linear Coefficient Theorem',
'Mathematics'),
('three_balls', 'Three Balls', 'Mathematics'),
('cities.yaml', 'World Cities', 'Geography'),
('boot_verbs.yaml', 'Boot Verbs', 'Languages'),
('hola.yaml', u'¡Hola!', 'Languages'),
# This exploration is included to show other applications of Oppia, but
# please note that Oppia lacks many of the features of a full interactive
# fiction engine!
('adventure.yaml', 'Parameterized Adventure', 'Interactive Fiction'),
('pitch_perfect.yaml', 'Pitch Perfect', 'Music'),
('test_interactions', 'Test of expressions and interactions', 'Test'),
('modeling_graphs', 'Graph Modeling', 'Mathematics'),
('protractor_test_1.yaml', 'Protractor Test', 'Mathematics'),
('solar_system', 'The Solar System', 'Physics'),
('about_oppia.yaml', 'About Oppia', 'Welcome'),
# TODO(anuzis): Replace about_oppia.yaml with this dev version when gadget
# visibility by state is functional. Currently an AdviceBar gadget that
# should only display on the Helsinki map state is visible during the
# entire exploration as a dev demo.
('about_oppia_w_gadgets.yaml', 'Welcome with Gadgets! (DEV ONLY)',
'Welcome'),
]
# TODO(sll): Add all other URLs here.
CLONE_EXPLORATION_URL = '/contributehandler/clone'
CONTRIBUTE_GALLERY_URL = '/contribute'
CONTRIBUTE_GALLERY_DATA_URL = '/contributehandler/data'
EDITOR_URL_PREFIX = '/create'
EXPLORATION_RIGHTS_PREFIX = '/createhandler/rights'
EXPLORATION_DATA_PREFIX = '/createhandler/data'
EXPLORATION_URL_PREFIX = '/explore'
EXPLORATION_INIT_URL_PREFIX = '/explorehandler/init'
FEEDBACK_LAST_UPDATED_URL_PREFIX = '/feedback_last_updated'
FEEDBACK_THREAD_URL_PREFIX = '/threadhandler'
FEEDBACK_THREADLIST_URL_PREFIX = '/threadlisthandler'
GALLERY_URL = '/gallery'
GALLERY_CREATE_MODE_URL = '%s?mode=create' % GALLERY_URL
GALLERY_DATA_URL = '/galleryhandler/data'
LEARN_GALLERY_URL = '/learn'
LEARN_GALLERY_DATA_URL = '/learnhandler/data'
NEW_EXPLORATION_URL = '/contributehandler/create_new'
PLAYTEST_QUEUE_URL = '/playtest'
PLAYTEST_QUEUE_DATA_URL = '/playtesthandler/data'
RECENT_COMMITS_DATA_URL = '/recentcommitshandler/recent_commits'
RECENT_FEEDBACK_MESSAGES_DATA_URL = '/recent_feedback_messages'
SIGNUP_URL = '/signup'
SIGNUP_DATA_URL = '/signuphandler/data'
UPLOAD_EXPLORATION_URL = '/contributehandler/upload'
USERNAME_CHECK_DATA_URL = '/usernamehandler/data'
NAV_MODE_ABOUT = 'about'
NAV_MODE_CONTACT = 'contact'
NAV_MODE_CREATE = 'create'
NAV_MODE_EXPLORE = 'explore'
NAV_MODE_GALLERY = 'gallery'
NAV_MODE_HOME = 'home'
NAV_MODE_PARTICIPATE = 'participate'
NAV_MODE_PROFILE = 'profile'
# Event types.
EVENT_TYPE_STATE_HIT = 'state_hit'
EVENT_TYPE_ANSWER_SUBMITTED = 'answer_submitted'
EVENT_TYPE_DEFAULT_ANSWER_RESOLVED = 'default_answer_resolved'
EVENT_TYPE_EXPLORATION_CHANGE = 'exploration_change'
EVENT_TYPE_EXPLORATION_STATUS_CHANGE = 'exploration_status_change'
EVENT_TYPE_NEW_THREAD_CREATED = 'feedback_thread_created'
EVENT_TYPE_THREAD_STATUS_CHANGED = 'feedback_thread_status_changed'
# The values for these event types should be left as-is for backwards
# compatibility.
EVENT_TYPE_START_EXPLORATION = 'start'
EVENT_TYPE_MAYBE_LEAVE_EXPLORATION = 'leave'
EVENT_TYPE_COMPLETE_EXPLORATION = 'complete'
# Play type constants
PLAY_TYPE_PLAYTEST = 'playtest'
PLAY_TYPE_NORMAL = 'normal'
# Predefined commit messages.
COMMIT_MESSAGE_EXPLORATION_DELETED = 'Exploration deleted.'
# Unlaunched feature.
SHOW_SKIN_CHOOSER = False
# Output formats of downloaded explorations.
OUTPUT_FORMAT_JSON = 'json'
OUTPUT_FORMAT_ZIP = 'zip'
# Types of updates shown in the 'recent updates' table in the dashboard page.
UPDATE_TYPE_EXPLORATION_COMMIT = 'exploration_commit'
UPDATE_TYPE_FEEDBACK_MESSAGE = 'feedback_thread'
# The name for the default handler of an interaction.
SUBMIT_HANDLER_NAME = 'submit'
# Default color
COLOR_TEAL = 'teal'
# Social sciences
COLOR_SALMON = 'salmon'
# Art
COLOR_SUNNYSIDE = 'sunnyside'
# Mathematics and computing
COLOR_SHARKFIN = 'sharkfin'
# Science
COLOR_GUNMETAL = 'gunmetal'
DEFAULT_COLOR = COLOR_TEAL
# List of supported default categories. For now, each category has
# a specific color associated with it.
CATEGORIES_TO_COLORS = {
'Architecture': COLOR_SUNNYSIDE,
'Art': COLOR_SUNNYSIDE,
'Biology': COLOR_GUNMETAL,
'Business': COLOR_SALMON,
'Chemistry': COLOR_GUNMETAL,
'Computing': COLOR_SHARKFIN,
'Economics': COLOR_SALMON,
'Education': COLOR_TEAL,
'Engineering': COLOR_GUNMETAL,
'Environment': COLOR_GUNMETAL,
'Geography': COLOR_SALMON,
'Government': COLOR_SALMON,
'Hobbies': COLOR_TEAL,
'Languages': COLOR_SUNNYSIDE,
'Law': COLOR_SALMON,
'Life Skills': COLOR_TEAL,
'Mathematics': COLOR_SHARKFIN,
'Medicine': COLOR_GUNMETAL,
'Music': COLOR_SUNNYSIDE,
'Philosophy': COLOR_SALMON,
'Physics': COLOR_GUNMETAL,
'Programming': COLOR_SHARKFIN,
'Psychology': COLOR_SALMON,
'Puzzles': COLOR_TEAL,
'Reading': COLOR_TEAL,
'Religion': COLOR_SALMON,
'Sport': COLOR_SUNNYSIDE,
'Statistics': COLOR_SHARKFIN,
'Welcome': COLOR_TEAL,
}
# List of supported language codes. Each description has a
# parenthetical part that may be stripped out to give a shorter
# description.
ALL_LANGUAGE_CODES = [{
'code': 'en', 'description': u'English',
}, {
'code': 'ar', 'description': u'العربية (Arabic)',
}, {
'code': 'bg', 'description': u'български (Bulgarian)',
}, {
'code': 'ca', 'description': u'català (Catalan)',
}, {
'code': 'zh', 'description': u'中文 (Chinese)',
}, {
'code': 'hr', 'description': u'hrvatski (Croatian)',
}, {
'code': 'cs', 'description': u'čeština (Czech)',
}, {
'code': 'da', 'description': u'dansk (Danish)',
}, {
'code': 'nl', 'description': u'Nederlands (Dutch)',
}, {
'code': 'tl', 'description': u'Filipino (Filipino)',
}, {
'code': 'fi', 'description': u'suomi (Finnish)',
}, {
'code': 'fr', 'description': u'français (French)',
}, {
'code': 'de', 'description': u'Deutsch (German)',
}, {
'code': 'el', 'description': u'ελληνικά (Greek)',
}, {
'code': 'he', 'description': u'עברית (Hebrew)',
}, {
'code': 'hi', 'description': u'हिन्दी (Hindi)',
}, {
'code': 'hu', 'description': u'magyar (Hungarian)',
}, {
'code': 'id', 'description': u'Bahasa Indonesia (Indonesian)',
}, {
'code': 'it', 'description': u'italiano (Italian)',
}, {
'code': 'ja', 'description': u'日本語 (Japanese)',
}, {
'code': 'ko', 'description': u'한국어 (Korean)',
}, {
'code': 'lv', 'description': u'latviešu (Latvian)',
}, {
'code': 'lt', 'description': u'lietuvių (Lithuanian)',
}, {
'code': 'no', 'description': u'Norsk (Norwegian)',
}, {
'code': 'fa', 'description': u'فارسی (Persian)',
}, {
'code': 'pl', 'description': u'polski (Polish)',
}, {
'code': 'pt', 'description': u'português (Portuguese)',
}, {
'code': 'ro', 'description': u'română (Romanian)',
}, {
'code': 'ru', 'description': u'русский (Russian)',
}, {
'code': 'sr', 'description': u'српски (Serbian)',
}, {
'code': 'sk', 'description': u'slovenčina (Slovak)',
}, {
'code': 'sl', 'description': u'slovenščina (Slovenian)',
}, {
'code': 'es', 'description': u'español (Spanish)',
}, {
'code': 'sv', 'description': u'svenska (Swedish)',
}, {
'code': 'th', 'description': u'ภาษาไทย (Thai)',
}, {
'code': 'tr', 'description': u'Türkçe (Turkish)',
}, {
'code': 'uk', 'description': u'українська (Ukrainian)',
}, {
'code': 'vi', 'description': u'Tiếng Việt (Vietnamese)',
}]
|
{
"content_hash": "a85b8cc61ac362c4ceb0de1e49e89fc7",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 79,
"avg_line_length": 34.52739726027397,
"alnum_prop": 0.6764530847054155,
"repo_name": "directorlive/oppia",
"id": "5a8b8e561f284619a6306abc6bd5c4d68d4fd6b7",
"size": "15865",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "feconf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "363"
},
{
"name": "CSS",
"bytes": "44437"
},
{
"name": "HTML",
"bytes": "259274"
},
{
"name": "JavaScript",
"bytes": "1296213"
},
{
"name": "Python",
"bytes": "1502686"
},
{
"name": "Shell",
"bytes": "25115"
}
],
"symlink_target": ""
}
|
from src.circular.utils.events import EventMixin
from src.circular.template.context import Context
class MockAttr:
def __init__(self, name, val=None):
self.name = name
self.value = val
def clone(self):
return MockAttr(self.name, self.value)
def __repr__(self):
return "<MockAttr("+self.name+","+self.value+")>"
class attrlist(list):
def __getattr__(self, name):
for a in self:
if a.name == name:
return a.value
return super().__getattribute__(name)
class MockElement(EventMixin):
_LAST_ID = 0
def __init__(self, tagName, **kwargs):
super().__init__()
self.tagName = tagName
self.attributes = attrlist([])
self.children = []
self.elt = MockDomElt(self)
self.nodeName = tagName
self.parent = None
self._text = ''
if self.tagName == 'input' or self.tagName == 'textarea':
self.value = ''
for (arg, value) in kwargs.items():
self.setAttribute(arg, kwargs[arg])
@property
def text(self):
ret = self._text
for ch in self.children:
ret += ch.text
return ret
@text.setter
def text(self, val):
self._text = val
@property
def id(self):
try:
return self.attributes[self._indexAttr('id')].value
except:
self.id = str(MockElement._LAST_ID)
MockElement._LAST_ID += 1
return str(MockElement._LAST_ID-1)
@id.setter
def set_id(self, value):
self.attributes[self._indexAttr('id')].value = value
def click(self):
self.emit('click', {'type': 'click', 'target': self})
def clone(self):
ret = MockElement(self.tagName)
for attr in self.attributes:
ret.attributes.append(attr.clone())
for ch in self.children:
ret <= ch.clone()
ret._text = self._text
return ret
def clear(self):
self.elt.clear()
self.children = []
def _indexAttr(self, name):
pos = 0
for attr in self.attributes:
if attr.name == name:
return pos
pos += 1
return -1
def removeAttribute(self, name):
pos = self._indexAttr(name)
if pos > -1:
del self.attributes[pos]
def setAttribute(self, name, value):
if name == 'value':
self.value = value
self.emit('input', Context({'target': self}))
else:
pos = self._indexAttr(name)
if pos > -1:
self.attributes[pos].value = value
else:
self.attributes.append(MockAttr(name, value))
def insertBefore(self, domnode, before):
pos = self.children.index(before)
self.elt.insertBefore(domnode.elt, self.children[pos].elt)
self.children.insert(pos, domnode)
def replaceChild(self, replace_with, replace_what):
pos = self.children.index(replace_what)
self.elt.replaceChild(replace_with.elt, replace_what.elt)
self.children[pos] = replace_with
replace_with.parent = self
replace_what.parent = None
def _findChild(self, id):
for ch in self.children:
if ch.id == id:
return ch
ret = ch._findChild(id)
if ret is not None:
return ret
return None
def __getattr__(self, attr):
if attr == 'value':
pos = self._indexAttr(attr)
return self.attributes[pos].value
return super().__getattribute__(attr)
def __setattr__(self, name, value):
if name in ['tagName', 'attributes', 'children', 'elt', 'nodeName', 'parent', 'text', 'value'] or name.startswith('_'):
return super().__setattr__(name, value)
else:
for attr in self.attributes:
if attr.name == name:
attr.value = value
return
self.attributes.append(MockAttr(name, value))
def __delattr__(self, key):
pos = -1
for attr in self.attributes:
pos += 1
if attr.name == key:
break
if pos > -1:
del self.attributes[pos]
else:
raise KeyError()
def __le__(self, other):
if isinstance(other, list):
for o in other:
o.parent = self
self.children.append(o)
self.elt.appendChild(o)
else:
other.parent = self
self.children.append(other)
self.elt.appendChild(other.elt)
def __repr__(self):
ret = "<"+self.tagName
if len(self.attributes) > 0:
ret += " "+" ".join([a.name+"='"+str(a.value)+"'" for a in self.attributes])
ret += ">"
return ret
class MockDomElt:
def __init__(self, node, parent=None):
self.parent = parent
self.children = []
self.node = node
def clear(self):
for ch in self.children:
ch.parent = None
self.children = []
def appendChild(self, ch):
self.children.append(ch)
ch.parent = self
def replaceChild(self, replace_with, replace_what):
pos = self.children.index(replace_what)
repl = self.children[pos]
repl.parent = None
self.children[pos] = replace_with
replace_with.parent = self
def insertBefore(self, ch, reference):
pos = self.children.index(ch)
self.children.insert(pos, ch)
ch.parent = self
self.children.insert(pos, ch)
class COMMENT(MockElement):
def __init__(self, text=None, **kwargs):
super().__init__('comment', **kwargs)
self._comment_text = text
class SPAN(MockElement):
def __init__(self, **kwargs):
super().__init__('span', **kwargs)
|
{
"content_hash": "77544723ec785dd44b31c6e3ef754c50",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 127,
"avg_line_length": 28.3,
"alnum_prop": 0.5342419653373717,
"repo_name": "jonathanverner/circular",
"id": "fa34c9e119db2f68600b84dd869e2ad69283d2c6",
"size": "5943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/brython/browser/html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4448"
},
{
"name": "HTML",
"bytes": "1956"
},
{
"name": "Python",
"bytes": "183088"
},
{
"name": "Shell",
"bytes": "432"
},
{
"name": "Smarty",
"bytes": "754"
}
],
"symlink_target": ""
}
|
"""Fichier chargé de lancer les tests unitaires.
Ne pas utiliser 'python -m unittest' mais exécutez ce script. La
raison est simple : ce script est un script de boostrap. Il charge
l'importeur et les modules. La sauvegarde des enregistrements
n'est cependant ni chargée, ni écrasée.
"""
import os
import sys
import unittest
from lib import *
from bases.anaconf import anaconf
from bases.importeur import Importeur
from bases.logs import man_logs
from bases.logs.logger import Logger
from bases.parser_cmd import ParserCMD
from corps.config import pere
from primaires.format.date import *
from reseau.connexions.serveur import *
# On neutralise les logs
Logger.print = lambda l, m: None
parser_cmd = ParserCMD()
parser_cmd.interpreter()
anaconf.config(parser_cmd)
config_globale = anaconf.get_config("globale", "kassie.cfg", \
"modèle global", pere)
man_logs.config(anaconf, parser_cmd)
log = man_logs.creer_logger("", "sup", "kassie.log")
serveur = ConnexionServeur(4000, config_globale.nb_clients_attente, \
config_globale.nb_max_connectes, config_globale.tps_attente_connexion,
config_globale.tps_attente_reception)
importeur = Importeur(parser_cmd, anaconf, man_logs, serveur,
sauvegarde=False)
importeur.tout_charger()
importeur.tout_instancier()
importeur.tout_configurer()
importeur.tout_initialiser()
importeur.tout_preparer()
# Bootstrap
# Cette partie contient la création d'objets de l'univers
# On parcourt récursivement les packages et modules dans test.bootstrap
def charger_bootstrap(pypath):
path = pypath.replace(".", os.sep)
if os.path.isdir(path):
for fichier in os.listdir(path):
nom_complet = os.path.join(path, fichier)
if os.path.isdir(nom_complet):
charger_bootstrap(pypath + "." + fichier)
elif os.path.isfile(nom_complet):
if fichier.endswith(".py") and not fichier.startswith("_"):
nom_module = pypath + "." + fichier[:-3]
__import__(nom_module)
charger_bootstrap("test.boostrap")
tests = unittest.TestLoader().discover('.')
retour = unittest.TextTestRunner().run(tests)
importeur.tout_detruire()
importeur.tout_arreter()
if not retour.wasSuccessful():
sys.exit(1)
|
{
"content_hash": "dfaccd1b35308929b1b9736d18fc2179",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 78,
"avg_line_length": 31.041095890410958,
"alnum_prop": 0.7118270079435128,
"repo_name": "stormi/tsunami",
"id": "7788fdeccc8e8ac7599572c65f6bfc57af0e0986",
"size": "3836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/runtest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from .constants import CONSTANTS
from .game import PTI__game
from .thing import PTI__Thing
from .world import PTI__World
from .events import PTI_decorator__event
from .collisions import PTI_decorator__collision
from .color import PTI__Color
from .picture import PTI__Picture, PTI__open_picture, PTI__text_to_picture
from .canvas import PTI__Canvas
from .picture_thing import PTI__PictureThing
from .window import PTI__window
import pygame as _pygame
for name in dir(_pygame):
if name.startswith("K_"):
CONSTANTS[name] = getattr(_pygame, name)
import sys as _sys
_module = _sys.modules[__name__]
for key, value in CONSTANTS.items():
setattr(_module, key, value)
_pygame.font.init()
|
{
"content_hash": "0c88911677c1722045faaede63fbc09e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 25.178571428571427,
"alnum_prop": 0.7333333333333333,
"repo_name": "syslo/gaminator",
"id": "c1bcdb6ad7278c1e635420f26e2ded0f6dc483ad",
"size": "730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gaminator-src/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43550"
}
],
"symlink_target": ""
}
|
from typing import Tuple
from sklearn.model_selection import train_test_split
import scipy.io
import scipy.stats
import scipy.fftpack
import numpy as np
from decision_trees.datasets.dataset_base import DatasetBase
from decision_trees.gridsearch import perform_gridsearch
from decision_trees.utils.constants import ClassifierType, GridSearchType
class Terrain(DatasetBase):
def __init__(self, path: str):
self.path = path
def load_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
mat = scipy.io.loadmat(f'{self.path}pociete_kroki_rasp.mat')
# for key, value in mat.items():
# print(key)
steps_raw = {
'krok_black': mat['krok_black'],
'krok_deski': mat['krok_deski'],
'krok_kam': mat['krok_kam'],
'krok_pcv': mat['krok_pcv'],
'krok_plytki': mat['krok_plytki'],
'krok_wyk': mat['krok_wyk']
}
# for key, value in steps_raw.items():
# print(f'{key} shape: {np.shape(value)}')
def do_fft(signal):
yf = scipy.fftpack.fft(signal, axis=0)
yf = 2.0 / len(signal) * np.abs(yf[0:len(signal) // 2])
return yf
def compute_stats(matrix):
vector = []
for i in range(0, np.shape(matrix)[0]):
data_for_processing = matrix[i, :401, :]
variance = np.var(data_for_processing, axis=0)
skew = scipy.stats.skew(data_for_processing, axis=0)
kurtosis = scipy.stats.kurtosis(data_for_processing, axis=0)
fifth_moment = scipy.stats.moment(data_for_processing, moment=5, axis=0)
temp = np.array([variance, skew, kurtosis, fifth_moment])
fouriers = do_fft(data_for_processing)
temp = np.vstack((temp, fouriers[1:25, :]))
if vector == []:
vector = temp
else:
vector = np.dstack((vector, temp))
vector = np.transpose(vector)
return vector
# 1st dim - step index
# 2nd dim - measurement [F_x, F_y, F_z, T_x, T_y, T_z]
# 3rd dim - [variance, skew, kurtosis, fifth_moment, 24 first fft elements sans zero frequency]
input_data = []
output_data = []
for idx, (key, value) in enumerate(steps_raw.items()):
# print(idx)
steps_stats = compute_stats(value)
# print(np.shape(step_stats))
# print(step_stats[0, :, :])
# print(step_stats[0, 0, :])
steps_stats_concatenated = []
for s in steps_stats:
# print(np.shape(s))
s_concatenated = np.concatenate(s)
# print(np.shape(s_concatenated))
steps_stats_concatenated.append(s_concatenated)
# print(np.shape(steps_stats_concatenated))
input_data.extend(steps_stats_concatenated)
output_data.extend(np.full(np.shape(steps_stats_concatenated)[0], idx))
print(np.shape(input_data))
print(np.shape(output_data))
input_data = self._normalise(np.asarray(input_data))
train_data, test_data, train_target, test_target = train_test_split(
input_data, output_data, test_size=0.2, random_state=42, shuffle=True
)
return np.asarray(train_data), np.asarray(train_target), np.asarray(test_data), np.asarray(test_target)
@staticmethod
def _normalise(data: np.ndarray):
# normalise each parameter (variance / skew / ...) for Fx, Fy etc separately
# col_idx = 167
#
# print(f'max: {np.max(data[:, col_idx])}')
# print(f'min: {np.min(data[:, col_idx])}')
# print(np.shape(data[:, col_idx]))
data_normed = (data - np.min(data, 0)) / np.ptp(data, 0)
# print(f'max: {np.max(data_normed[:, col_idx])}')
# print(f'min: {np.min(data_normed[:, col_idx])}')
# print(np.shape(data_normed[:, col_idx]))
return data_normed
def main():
d = Terrain('./../../data/datasets/terrain_data/')
train_data, train_target, test_data, test_target = d.load_data()
print(f'np.shape(train_data): {np.shape(train_data)}')
print(f'np.unique(test_target): {np.unique(test_target)}')
d.test_as_classifier(8, './../../data/vhdl/')
# perform_gridsearch(train_data, train_target, test_data, test_target,
# [16, 12, 8, 6, 4, 2, 1],
# ClassifierType.RANDOM_FOREST,
# GridSearchType.NONE,
# './../../data/gridsearch_results/',
# d.__class__.__name__
# )
if __name__ == '__main__':
main()
|
{
"content_hash": "2deb0d4982c442dd5978e7a961c865ff",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 111,
"avg_line_length": 36.05263157894737,
"alnum_prop": 0.5474452554744526,
"repo_name": "PUTvision/decision_tree",
"id": "2474dd05427acca3f8f5d52797d5c3d44093e3f5",
"size": "4795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "decision_trees/datasets/terrain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143168"
}
],
"symlink_target": ""
}
|
"""
WSGI config for tutorial project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "tutorial.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoproject.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
{
"content_hash": "6c2ffedc8237d27e1ff0eb8d5b4234c4",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 44.6875,
"alnum_prop": 0.7944055944055944,
"repo_name": "mburst/gevent-socketio-starterkit",
"id": "8632088c8b792c85f70a0fc7f348957688b1af18",
"size": "1430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoproject/djangoproject/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8286"
},
{
"name": "JavaScript",
"bytes": "301576"
},
{
"name": "Python",
"bytes": "30261"
}
],
"symlink_target": ""
}
|
from operator import itemgetter, attrgetter
from wand.image import Image
from config import DEVELOP_MODE
from datetime import datetime, timedelta
from libs import doubandb, doubanfs, Employee, doubanmc, store, User
from webapp.models.consts import *
from webapp.models.notify import Notify
from webapp.models.profile import Profile
from webapp.models.badage import Badage
from webapp.models.question import Question, Answer
from config import SITE
import json
import math
CHART_BLACK_LIST_UIDS = [
'46555103', #Ruby = =
]
class Comment(object):
def __init__(self, id, card_id, author_id, content, rtime):
self.id = str(id)
self.card_id = str(card_id)
self.author_id = str(author_id)
self.content = content
self.rtime = rtime
@property
def html(self):
from webapp.models.utils import mention_text
ret = mention_text(self.content)
return ret['html']
@property
def author(self):
return User(id=self.author_id)
@property
def author_card(self):
return Card.get(self.author_id)
@classmethod
def remove(cls, author_id, comment_id):
store.execute("delete from me_comment where"
" `author_id`=%s and `id`=%s", (author_id, comment_id))
store.commit()
class Card(object):
FLAG_NORMAL = 'N'
FLAG_HIDE = 'H'
MC_KEY = 'me-card:%s'
def __init__(self, id, uid, email, skype, name, alias, phone, photo_id, flag, join_time, rtime, ctime):
self.id = str(id)
self.uid = uid
self.email = email
self.skype = skype
self.name = name
self.alias = alias
self.phone = phone
self.join_time = join_time or ''
self.photo_id = photo_id
self.flag = flag
self.rtime = rtime
self.ctime = ctime
@property
def sort_date(self):
return self.join_time
@property
def sort_time(self):
return self.rtime
@property
def profile(self):
return Profile.get(self.id)
def set_profile2(self, profile):
doubandb.set("me/card/profile2-%s" % self.id, profile)
@property
def profile2(self):
return doubandb.get("me/card/profile2-%s" % self.id, {})
@property
def department(self):
return self.profile2.get('department', '')
@property
def selfintro(self):
return self.profile2.get('selfintro', '') or self.profile.intro
@property
def position(self):
return self.profile2.get('position', '')
@property
def path(self):
return "/card/%s/" % self.uid
@property
def url(self):
return "%s/card/%s/" % (SITE, self.uid)
@property
def is_basic(self):
return self.email and self.name
@property
def is_hide(self):
return self.flag == self.FLAG_HIDE
@property
def is_full(self):
return self.is_basic and self.skype and self.join_time and self.alias and self.photo > 0 and self.profile.sex and self.profile.love and self.profile.marriage and self.profile.birthday and self.profile.hometown
def json_dict(self, user):
ret = {}
if not self.owner:
return {}
ret['id'] = self.id
ret['alt'] = self.url
ret['uid'] = self.uid
ret['name'] = self.screen_name
ret['icon'] = self.icon
ret['email'] = self.email
ret['skype'] = self.skype
ret['alias'] = self.alias
ret['join_time'] = self.join_time and self.join_time.strftime('%Y-%m-%d')
if not DEVELOP_MODE:
ret['city'] = self.owner.profile().get('city','城市')
else:
ret['city'] = 'city'
ret['reg_time'] = self.owner.reg_time.strftime('%Y-%m-%d')
ret['photo'] = SITE + self.photo
ret['updated'] = self.rtime.strftime('%Y-%m-%d')
ret['created'] = self.ctime.strftime('%Y-%m-%d')
ret['like_num'] = self.like_num
ret['comment_num'] = self.comment_num
ret['url'] = self.url
ret['province'] = self.profile.province
ret['hometown'] = self.profile.hometown
ret['resume'] = self.profile.resume
ret['intro'] = self.profile.intro
ret['weibo'] = self.profile.weibo
ret['instagram'] = self.profile.instagram
ret['blog'] = self.profile.blog
ret['code'] = self.profile.code
ret['github'] = self.profile.github
ret['tags'] = self.tags
ret['badages'] = [b.json_dict() for b in self.badages]
ret['sex'] = self.profile.sex
ret['love'] = self.profile.love
ret['marriage'] = self.profile.marriage
ret['zodiac'] = self.profile.zodiac
ret['astro'] = self.profile.astro
if user:
ret['is_liked'] = self.is_liked(user.id)
ret['user_tags'] = [t.json_dict(user) for t in self.ptags]
return ret
def can_view(self, user, attr=None):
if user:
if user.id == self.id:
return True
if self.flag == self.FLAG_HIDE:
return False
card = Card.get(user.id)
if not attr:
return card and card.is_full
else:
v = getattr(card.profile, attr)
return v
return False
@classmethod
def search(cls, q):
cids = []
rs = store.execute("select user_id from me_card where name like %s and flag=%s", (q + '%%', cls.FLAG_NORMAL))
if rs:
cids = [str(r[0]) for r in rs]
else:
rs = store.execute("select user_id from me_card where uid like %s and flag=%s", (q + '%%', cls.FLAG_NORMAL))
if rs:
cids = [str(r[0]) for r in rs]
else:
rs = store.execute("select user_id from me_card where alias like %s and flag=%s", (q + '%%', cls.FLAG_NORMAL))
if rs:
cids = [str(r[0]) for r in rs]
else:
rs = store.execute("select user_id from me_card where email like %s and flag=%s", (q + '%%', cls.FLAG_NORMAL))
if rs:
cids = [str(r[0]) for r in rs]
else:
rs = store.execute("select user_id from me_card where skype like %s"
" and flag=%s", (q + '%%', cls.FLAG_NORMAL))
if rs:
cids = [str(r[0]) for r in rs]
return [cls.get(i) for i in cids]
@property
def owner(self):
return User(id=self.id)
@property
def icon(self):
return self.owner and self.owner.picture(default=True) or 'http://img3.douban.com/icon/user_normal.jpg'
@property
def screen_name(self):
return self.owner and self.owner.name
@classmethod
def hide(cls, card_id, admin_id):
if admin_id in ADMINS:
store.execute("update me_card set flag=%s where user_id=%s", (cls.FLAG_HIDE, card_id))
store.commit()
card = cls.get(card_id)
if card:
doubanmc.delete(cls.MC_KEY % card.id)
doubanmc.delete(cls.MC_KEY % card.uid)
@classmethod
def new(cls, user_id, uid):
now = datetime.now()
store.execute("insert into me_card(`user_id`,`uid`, `ctime`) values(%s,%s,%s)"
" on duplicate key update rtime=%s", (user_id, uid, now, now))
store.commit()
doubanmc.delete("me:users:dict")
@classmethod
def get_by_ldap(cls, email):
if not email.endswith('@douban.com'):
email = email + '@douban.com'
r = store.execute("select `user_id` from me_card where email=%s and flag=%s", (email, cls.FLAG_NORMAL))
if r and r[0]:
return cls.get(r[0])
@classmethod
def get(cls, id):
r = store.execute("select `user_id`, `uid`, `email`, `skype`, `name`, `alias`, `phone`, `photo`,"
" `flag`, `join_time`, `rtime`, `ctime`"
" from me_card where `user_id`=%s", id)
card = None
if r and r[0]:
card = cls(*r[0])
else:
r = store.execute("select `user_id`, `uid`, `email`, `skype`, `name`, `alias`, `phone`, `photo`,"
" `flag`, `join_time`, `rtime`, `ctime`"
" from me_card where `uid`=%s", id)
if r and r[0]:
card = cls(*r[0])
if card:
try:
employee = Employee.dget(card.id)
if employee:
#print 'get info by dae service', employee.fullname, employee.douban_mail, employee.entry_date
if employee.fullname:
card.name = employee.fullname
if employee.douban_mail:
card.email = employee.douban_mail
if employee.entry_date:
card.join_time = datetime.strptime(employee.entry_date, '%Y-%m-%d')
except:
print "dae service EmployeeClient error user_id %s" % (card and card.id or '0')
print 'card', id, card.id, card.uid, card.email, card.name
return card
def update_account(self, name, email):
store.execute("update me_card set email=%s, `name`=%s where `user_id`=%s", (email, name, self.id))
store.commit()
doubanmc.delete(self.MC_KEY % self.id)
doubanmc.delete(self.MC_KEY % self.uid)
def update_basic(self, name, skype, alias, join_time):
store.execute("update me_card set `name`=%s, skype=%s, `alias`=%s, join_time=%s where"
" `user_id`=%s", (name, skype, alias, join_time, self.id))
store.commit()
doubanmc.delete(self.MC_KEY % self.id)
doubanmc.delete(self.MC_KEY % self.uid)
def update_profile(self, sex, love, zodiac, astro, birthday, marriage, province, hometown,
weibo, instagram, blog, code, github, resume, intro):
Profile.update(self.id, sex, love, zodiac, astro, birthday, marriage, province, hometown,
weibo, instagram, blog, code, github, resume, intro)
def update_photo(self, filename):
data = open(filename).read()
#print 'update photo old_id', old_id
if len(data) > MAX_SIZE:
return "too_large"
return self.update_photo_data(data)
def update_photo_id(self, photo_id):
store.execute("update me_card set `photo`=%s where `user_id`=%s", (photo_id, self.id))
store.commit()
doubanmc.delete(self.MC_KEY % self.id)
doubanmc.delete(self.MC_KEY % self.uid)
def update_photo_data(self, data):
success = False
old_id = self.photo_id
try:
new_id = old_id + 1
doubanfs.set("/me/card/%s/photo/%s/%s" % (self.id, new_id, Cate.ORIGIN), data)
from webapp.models.utils import scale
d = scale(data, Cate.LARGE, DEFAULT_CONFIG)
doubanfs.set("/me/card/%s/photo/%s/%s" % (self.id, new_id, Cate.LARGE), d)
print "update photo success photo_id=%s" % new_id
store.execute("update me_card set `photo`=%s where `user_id`=%s", (new_id, self.id))
store.commit()
success = True
except:
print "doubanfs write fail!!! %s" % self.id
self.photo_id = old_id
store.execute("update me_card set `photo`=`photo`-1 where `user_id`=%s", self.id)
store.commit()
doubanfs.delete("/me/card/%s/photo/%s/%s" % (self.id, new_id, Cate.LARGE))
doubanfs.delete("/me/card/%s/photo/%s/%s" % (self.id, new_id, Cate.ORIGIN))
print 'rollback photo to old_id', old_id
if success:
Notify.new(self.id, self.id, Notify.TYPE_CHANGE_PHOTO, extra={'photo_id':new_id})
print "send change photo blog"
from webapp.models.blog import Blog
Blog.new(self.id, Blog.TYPE_BLOG, Blog.BLOG_ICON, extra={'photo_id':new_id})
doubanmc.delete(self.MC_KEY % self.id)
doubanmc.delete(self.MC_KEY % self.uid)
def recreate_photo(self):
try:
data = doubanfs.get("/me/card/%s/photo/%s/%s" % (self.id, self.photo_id, Cate.ORIGIN))
d = scale(data, Cate.LARGE, DEFAULT_CONFIG)
doubanfs.set("/me/card/%s/photo/%s/%s" % (self.id, self.photo_id, Cate.LARGE), d)
except:
print "doubanfs write fail!!! %s" % self.id
@property
def photo(self):
if self.photo_id > 0:
return "/p/%s-%s-%s.jpg" % (self.id, self.photo_id, Cate.LARGE)
return ''
def origin_photo(self, photo_id):
if photo_id <= self.photo_id:
return "/p/%s-%s-%s.jpg" % (self.id, photo_id, Cate.ORIGIN)
return ''
def photo_url(self, photo_id):
if photo_id <= self.photo_id:
return "/p/%s-%s-%s.jpg" % (self.id, photo_id, Cate.LARGE)
return ''
def dynamic_photo(self, x, y, scale='center-crop', photo_id=0):
if photo_id < 1:
photo_id = self.photo_id
if photo_id > 0:
s = 'fs'
if scale == 'center-crop':
s = 'cc'
return "/p/%s-%s-r_%s_%sx%s.jpg" % (self.id, photo_id, s, x, y)
return ''
@property
def photo_urls(self):
return ["/p/%s-%s-%s.jpg" % (self.id, i, Cate.LARGE) for i in xrange(0, self.photo_id)]
@property
def like_num(self):
r = store.execute("select count(1) from me_like where user_id=%s", self.id)
if r and r[0]:
return r[0][0]
def likers(self):
rs = store.execute("select liker_id from me_like where user_id=%s", self.id)
cids = []
if rs:
cids = [str(r[0]) for r in rs]
return [User(id=i) for i in cids]
def is_liked(self, liker_id):
r = store.execute("select 1 from me_like where user_id=%s and liker_id=%s", (self.id, liker_id))
if r and r[0][0]:
return True
return False
@classmethod
def gets(cls, cate='', start=0, limit=20):
cids = []
if cate == 'photo':
r = store.execute("select count(1) user_id from me_card where photo>0 and flag=%s", cls.FLAG_NORMAL)
n = r and r[0][0]
rs = store.execute("select user_id from me_card where photo>0 and flag=%s order by rtime desc"
" limit %s, %s", (cls.FLAG_NORMAL, start, limit))
else:
r = store.execute("select count(1) user_id from me_card where flag=%s", cls.FLAG_NORMAL)
n = r and r[0][0]
rs = store.execute("select user_id from me_card where flag=%s order by rtime desc"
" limit %s, %s", (cls.FLAG_NORMAL, start, limit))
if rs:
cids = [str(r[0]) for r in rs]
return n, [cls.get(i) for i in cids]
@classmethod
def gets_by_time(cls, year='', start=0, limit=20):
n, cids = cls.gets_ids_by_time(year)
return n, [cls.get(i) for i in cids[start:start+limit]]
@classmethod
def gets_ids_by_time(cls, year=''):
cids = []
if not year:
r = store.execute("select count(1) from me_card where photo>0 and flag=%s", cls.FLAG_NORMAL)
n = r and r[0][0]
rs = store.execute("select user_id from me_card where flag=%s order by join_time desc", cls.FLAG_NORMAL)
else:
start = '%s-01-01 00:00:00' % year
end = '%s-12-31 23:00:00' % year
rs = store.execute("select user_id from me_card where photo>0 and flag=%s and"
" join_time > %s and join_time < %s order by join_time desc", (cls.FLAG_NORMAL, start, end))
n = len(rs)
if rs:
cids = [str(r[0]) for r in rs]
return n, cids
@classmethod
def gets_all(cls):
rs = store.execute("select user_id from me_card where join_time > 0 and flag=%s order by join_time", cls.FLAG_NORMAL)
cids = []
if rs:
cids = [str(r[0]) for r in rs]
return [cls.get(i) for i in cids]
@classmethod
def gets_by_astro(cls, astro):
astros = [r[1] for r in ASTROS]
index = astros.index(astro)
rs = store.execute("select user_id from me_profile where astro=%s", index)
return [cls.get(r[0]) for r in rs]
@classmethod
def gets_by_zodiac(cls, zodiac):
zodiacs = [r[1] for r in ZODIACS]
index = zodiacs.index(zodiac)
rs = store.execute("select user_id from me_profile where zodiac=%s", index)
return [cls.get(r[0]) for r in rs]
@classmethod
def gets_by_province(cls, province):
rs = store.execute("select user_id from me_profile where province=%s", province)
return [cls.get(r[0]) for r in rs]
@classmethod
def gets_by_hometown(cls, city):
rs = store.execute("select user_id from me_profile where hometown=%s", city)
return [cls.get(r[0]) for r in rs]
@classmethod
def gets_by_tag(cls, tag):
r = store.execute("select id from me_tag where name=%s", tag)
if r and r[0]:
tag_id = r[0][0]
if tag_id:
rs = store.execute("select distinct(user_id) from me_user_tag where tag_id=%s", tag_id)
return sorted([cls.get(r[0]) for r in rs if str(r[0]) not in CHART_BLACK_LIST_UIDS], key=attrgetter('score'), reverse=True)
return []
@classmethod
def gets_by_card(cls, card_id, start=0, limit=10):
r = store.execute("select count(1) user_id from me_like where liker_id=%s", card_id)
n = r and r[0][0]
rs = store.execute("select user_id from me_like where liker_id=%s"
" order by rtime desc limit %s, %s", (card_id, start, limit))
cids = []
if rs:
cids = [str(r[0]) for r in rs]
return n, [cls.get(i) for i in cids]
def like(self, liker_id):
store.execute("replace into me_like(user_id, liker_id) values(%s,%s)", (self.id, liker_id))
store.commit()
Notify.new(self.id, liker_id, Notify.TYPE_LIKE)
def tag(self, tagger_id, tags=[]):
from webapp.models.tag import Tag
Tag.tag(self.id, tagger_id, tags=tags)
@property
def tags(self):
from webapp.models.tag import Tag
return Tag.get_user_tag_names(self.id, self.id)
def user_tags(self, tagger_id):
from webapp.models.tag import Tag
return Tag.get_user_tag_names(self.id, tagger_id)
@property
def badages(self):
return Badage.gets_by_card(self.id)
@classmethod
def gets_by_badage(cls, badage_id):
rs = store.execute("select user_id from me_user_badage where badage_id=%s", badage_id)
cids = [str(r[0]) for r in rs]
return [cls.get(i) for i in cids]
@property
def ptags(self):
from webapp.models.tag import Tag
return Tag.get_user_tags(self.id)
@property
def ptag_names(self):
return [t.name for t in self.ptags]
@property
def comment_num(self):
r = store.execute("select count(1) from me_comment where user_id=%s", self.id)
if r and r[0]:
return r[0][0]
def comment(self, author_id, content):
store.execute("insert into me_comment(`user_id`,`author_id`,`content`)"
" values(%s,%s,%s)", (self.id, author_id, content));
store.commit()
cid = store.get_cursor(table="me_comment").lastrowid
Notify.new(self.id, author_id, Notify.TYPE_COMMENT, extra={"comment_id":cid})
if '@' in content:
from webapp.models.utils import mention_text
ret = mention_text(content)
for b, e, card_id, kind in ret['postions']:
Notify.new(card_id, author_id, Notify.TYPE_MENTION, extra={"card_id":self.id, "comment_id":cid})
@property
def comments(self):
rs = store.execute("select id, user_id, author_id, content, rtime"
" from me_comment where user_id=%s order by rtime", self.id)
return [Comment(*r) for r in rs]
@property
def questions(self):
return Question.gets_by_card(self.id)
@property
def answer_num(self):
return Answer.num_by_card(self.id)
@property
def notify_num(self):
r = store.execute("select count(1) from me_notify where user_id=%s"
" and flag=%s", (self.id, Notify.FLAG_NEW))
if r and r[0]:
return r[0][0]
@property
def notifications(self):
return Notify.gets(self.id)
def photo_data(self, id, cate=Cate.LARGE):
if not id:
id = self.photo_id
if id > 0:
return doubanfs.get("/me/card/%s/photo/%s/%s" % (self.id, id, cate))
@property
def score(self):
r = store.execute("select score from me_card where user_id=%s", self.id)
return r and r[0][0]
@property
def activities(self):
r = store.execute("select activities from me_card where user_id=%s", self.id)
return r and r[0][0]
@classmethod
def max_score(cls):
r = store.execute("select max(score) from me_card")
return r and r[0][0]
@classmethod
def max_activities(cls):
r = store.execute("select max(activities) from me_card")
return r and r[0][0]
@property
def percent_activities(self):
MAX = self.max_activities() or 100
return int(round(float(self.activities)/ MAX, 2)*100)
@property
def percent_score(self):
MAX = self.max_score() or 100
return int(round(float(self.score)/ MAX, 2)*100)
@classmethod
def calculate_score(cls, id):
d = cls.get(id)
c = 0
if d.email:
c = c + 2
if d.skype:
c = c + 2
if d.alias:
c = c + 2
if d.photo_id > 0:
c = c + 6
p = d.profile
sex = int(p.sex)
if sex == 1:
c = c + 2
elif sex == 2:
c = c + 4
love = int(p.love)
if 0 < love < 3:
c = c + 4
elif 3 <= love < 5:
c = c + 1
m = int(p.marriage)
if 0 < m < 4:
c = c + 3*sex
elif 4 <= m:
c = c + sex
if p.birthday:
if sex == 2:
now = datetime.now()
old = now.year - p.birthday.year
if old < 30:
c = c + (35 - old)*2
else:
c = c + 4
if p.zodiac:
c = c + 1
if p.astro:
c = c + 1
if p.province:
c = c + 1
if p.hometown:
c = c + 1
if p.weibo:
c = c + 1
if p.instagram:
c = c + 2
if p.code:
c = c + 1
if p.github:
c = c + 1
if p.resume:
c = c + 1
if p.intro:
c = c + 4
#print 'profile score=', c
now = datetime.now()
if d.join_time and isinstance(d.join_time, datetime) and d.join_time < now:
c = c + get_value_by_time(0, 100, d.join_time, 30, -0.2)
#print 'ctime score=', c
rs = store.execute("select rtime from me_like where user_id=%s", id)
for r in rs:
c = c + get_value_by_time(2, 2, r[0])
#print 'like score=', c
rs = store.execute("select rtime from me_comment where user_id=%s", id)
for r in rs:
c = c + get_value_by_time(3, 2, r[0])
#print 'comment score=', c
rs = store.execute("select rtime from me_user_tag where user_id=%s", id)
for r in rs:
c = c + get_value_by_time(3, 2, r[0])
#print 'tag score=', c
for tag in d.ptags:
t = tag.name
if sex == 1:
if t in ['少年', '萌', '闷骚', '帅', '傲娇', '四大萌神之一', '老师', '少男杀手', '少女杀手',
'单身', '小王子', '正太', '娘', 'gay', '音乐人', '骚年', '很萌']:
##print 'score add tag=', t, ' c=', c
c = c + 5
elif t in ['已婚', '小孩党', '车党']:
c = c + 2
elif sex == 2:
if t in ['妹子', '萝莉', '萌', '90s', '闷骚', '女神', '美女', '萌妹子', '傲娇', '少女', '实在太漂亮了',
'单身', '软妹纸', '温柔如水', '美女不解释', '仙女', '音乐人', '妹纸', '大萝莉', '美少女',
'小清新美女', '姐姐']:
##print 'score add tag=', t, ' c=', c
c = c + 6
elif t in ['已婚', '小孩党']:
c = c + 3
#print 'add tag score=', c
rs = store.execute("select rtime from me_blog, me_blog_like"
" where user_id=%s and id=blog_id", id)
for r in rs:
c = c + get_value_by_time(2, 3, r[0])
rs = store.execute("select rtime from me_blog as b, me_blog_comment as c"
" where user_id=%s and b.id=c.blog_id", id)
for r in rs:
c = c + get_value_by_time(3, 3, r[0])
#print 'add blog score=', c
#metion
rs = store.execute("select rtime from me_notify where user_id=%s"
" and ntype=%s", (id, Notify.TYPE_MENTION))
for r in rs:
c = c + get_value_by_time(2, 2, r[0])
rs = store.execute("select rtime from me_notify where user_id=%s"
" and ntype=%s", (id, Notify.TYPE_BLOG_MENTION))
for r in rs:
c = c + get_value_by_time(3, 2, r[0])
rs = store.execute("select rtime from me_notify where user_id=%s"
" and ntype=%s", (id, Notify.TYPE_BLOG_COMMENT_MENTION))
for r in rs:
c = c + get_value_by_time(3, 3, r[0])
#print 'mention score=', c
rs = store.execute("select rtime from me_notify where user_id=%s"
" and ntype=%s", (id, Notify.TYPE_AWARD_VOTED))
for r in rs:
c = c + get_value_by_time(5, 2, r[0])
#print 'vote score=', c
rs = store.execute("select rtime from me_notify where user_id=%s"
" and ntype=%s", (id, Notify.TYPE_CHANGE_PHOTO))
for r in rs:
c = c + get_value_by_time(2, 4, r[0])
#print 'update photo score=', c
rs = store.execute("select rtime from me_notify where user_id=%s"
" and ntype=%s", (id, Notify.TYPE_REQUEST_PHOTO))
for r in rs:
c = c + get_value_by_time(2, 2, r[0])
#print 'request photo score=', c
rs = store.execute("select c.rtime from me_event_photo as p, me_photo_comment as c"
" where p.author_id=%s and p.id=c.photo_id", id)
for r in rs:
c = c + get_value_by_time(2, 2, r[0])
rs = store.execute("select c.rtime from me_event_photo as p, me_photo_like as c"
" where p.author_id=%s and p.id=c.photo_id", id)
for r in rs:
c = c + get_value_by_time(2, 2, r[0])
rs = store.execute("select rtime from me_photo_tag where user_id=%s", id)
for r in rs:
c = c + get_value_by_time(2, 2, r[0])
return c
@classmethod
def gets_by_score(cls, limit=20):
rs = store.execute("select user_id from me_card where score > 0 and flag=%s"
" order by score desc limit %s", (cls.FLAG_NORMAL, limit))
return [cls.get(str(r[0])) for r in rs if str(r[0]) not in CHART_BLACK_LIST_UIDS]
def get_value_by_time(base, value, time, days=2, factor=-0.1):
if not time or not isinstance(time, datetime):
return value
now = datetime.now()
delta = now - time
return get_value_by_day(base, value, delta.days/float(days), factor)
def get_value_by_day(base, value, day, factor=-0.1):
ret = base + value*math.exp(factor*day)
#print "day=%s, value=%s" % (day, ret)
return ret
|
{
"content_hash": "8107b584fe9c8a575cbe45cb70aa15b8",
"timestamp": "",
"source": "github",
"line_count": 760,
"max_line_length": 217,
"avg_line_length": 36.64078947368421,
"alnum_prop": 0.5338456566236938,
"repo_name": "leonsim/me",
"id": "8cb6550c7d2d132a252b24efd1aadd6b3f1cb0cf",
"size": "28108",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webapp/models/card.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "120029"
},
{
"name": "JavaScript",
"bytes": "575915"
},
{
"name": "Python",
"bytes": "218405"
}
],
"symlink_target": ""
}
|
import simplegui
# Import module, which contains functions that involve
# randomness.
import random
# Import module that contains additional mathematical
# operations.
import math
#---------------------------------------------------------
# Define and initialize global constants.
# Initialize global constants that will hold the "width"
# and "height" of the "canvas" ("deck of cards" - grid of
# 16 "cards").
CANVAS_WIDTH = 800
CANVAS_HEIGHT = 140
# "Memory" game of 16 "cards" (as global constant).
CARDS_NUMBER = 16
# Compute the "width" of a single cell of this grid;
# "placeholder" for a single "card" (cells distributed
# evently).
CARD_PLACEHOLDER_WIDTH = (CANVAS_WIDTH // CARDS_NUMBER)
# Set general "draw" properties.
FONT_SIZE = 50
FONT_FACE = 'sans-serif'
FONT_COLOR = 'White'
MARGIN_Y = 19
# Compute the (global constant) "vertical" position to
# draw a "card", presenting a "textual number" and taking
# into consideration the height of the "deck of cards"
# plus a "margin".
CARD_VALUE_POINT_Y = (CANVAS_HEIGHT // 2) + MARGIN_Y
# More general "draw" properties.
CARD_PLACEHOLDER_LINE_COLOR = 'Black'
CARD_PLACEHOLDER_FILL_COLOR = 'Green'
CARD_PLACEHOLDER_LINE_WIDTH = 2
# Initialize a "dictionary" as global constant, mapping
# numbers from 0-7 (acting as "keys") to "urls" (acting
# as "values"). In practice, the business logic of the
# program models generally the "deck of cards" as a
# "shuffled" list consisting of 16 numbers with each
# number lying in the range [0,8) and appearing twice.
# The following "urls" (links to images)
# are just being used at the "presentation" layer,
# drawing the proper "image" instead of "number" (text).
IMAGES = {}
IMAGES[0] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/riemann.jpg')
IMAGES[1] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/aristotle.jpg')
IMAGES[2] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/euler.jpg')
IMAGES[3] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/gauss.jpg')
IMAGES[4] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/newton.jpg')
IMAGES[5] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/einstein.jpg')
IMAGES[6] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/hilbert.jpg')
IMAGES[7] = simplegui.load_image('http://aristotelis-metsinis.github.io/img/lagrange.jpg')
#---------------------------------------------------------
# Define and initialize global variables.
# Boolean flag: play the game with "images" (True) or
# with "textual numbers" (False).
play_with_images = False
#---------------------------------------------------------
def new_game():
"""
Helper function that starts and restarts the
game, initializing global variables; reshuffle the
"cards", reset the "turn" counter and restart the
game. All "cards" should start the game hidden.
"""
# Initialize global variable that will hold the "deck
# of cards"; we model the "deck of cards" as a list
# consisting of 16 numbers with each number lying in
# the range [0,8) and appearing twice. The list is
# created by concatenating two lists with range [0,8)
# together. Although Player can play the game with
# "textual numbers" or "images", the above mentioned
# technique is being used modeling the game in both
# game "modes".
global deck_of_cards
deck_of_cards = range(CARDS_NUMBER // 2) + range(CARDS_NUMBER // 2)
# Shuffle the "deck".
random.shuffle(deck_of_cards)
# Remove comment if in DEBUG mode.
#print deck_of_cards
# Initialize global variable that will hold the a list,
# with size equal to the size of the "deck of cards"
# consisting of boolean values. The boolean value
# at a certain list index indicates whether the "card"
# is "exposed" (True) or not (False). Particularly,
# the ith entry should be "True" if the ith card is
# face up and its value is visible or "False" if the
# ith card is face down and it's value is hidden.
global deck_of_cards_exposed
deck_of_cards_exposed = [False] * CARDS_NUMBER
# Initialize global variable that will hold the game
# state (0,1 and 2), i.e. beginning of the game, single
# "exposed" unpaired "card" and end of a "turn"
# respectively (have a look at the comments of
# "mouseclick()" for a detailed description
# concerning this variable).
global state
state = 0
# Initialize global variable that will hold the number
# of "turns" playing the game.
global turn
turn = 0
label.set_text("Turns = " + str(turn))
# Initialize global variable that will hold a "helper"
# list, keeping the index of the cards "exposed" in
# a single "turn".
global index_of_cards_exposed_in_a_turn
index_of_cards_exposed_in_a_turn = [-1, -1]
return None
#---------------------------------------------------------
def mouseclick(pos):
"""
Define "mouse click" event handler; implements game
"state" logic. It receives a parameter; pair of screen
coordinates, i.e. a tuple of two non-negative integers
- the position of the mouse click.
"""
# User clicks on a "card" of the "deck" (grid of
# evenly distributed cells - cards placeholders).
# Compute the index of this "card", i.e. determine
# which card have been clicked on with the mouse.
# Recall that the sequence of cards entirely fills
# the "canvas".
clicked_card_index = int(math.floor(float(pos[0]) / CARD_PLACEHOLDER_WIDTH))
# If user clicks on a card already "exposed"; ignore
# event and "return" function immediately.
if deck_of_cards_exposed[clicked_card_index]:
return None
# The counter of "turns" playing the game will be
# updated as a global variable.
global turn
# The following block implements the game logic for
# selecting two "cards" and determining if they match.
# State 0 corresponds to the start of the game.
# In state 0, if you click on a card, that card is
# exposed, and you switch to state 1.
# State 1 corresponds to a single exposed unpaired
# card.
# In state 1, if you click on an unexposed card, that
# card is exposed and you switch to state 2.
# State 2 corresponds to the end of a turn.
# In state 2, if you click on an unexposed card, that
# card is exposed and you switch to state 1.
global state
if state == 0:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
elif state == 1:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the second card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[1] = clicked_card_index
# Switch to the next game "state".
state = 2
else:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Get the value of the cards exposed in the previous
# "turn" of the game (taking advantage of the
# "indexes" stored). Then determine if the previous
# two "exposed" cards are paired or unpaired.
# If unpaired then switch the "status" of these
# cards back to "unexposed"; i.e. flip them back
# over so that they are hidden before moving to
# state 1.
if deck_of_cards[index_of_cards_exposed_in_a_turn[0]] != deck_of_cards[index_of_cards_exposed_in_a_turn[1]]:
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[0]] = False
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[1]] = False
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game, i.e. replace the "index" of the
# first card "exposed" in the previous "turn" of
# the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
return None
#---------------------------------------------------------
def draw(canvas):
"""
Event handler that is responsible for all drawing.
It receives the "canvas" object and draws the "deck of
cards" (grid) as a horizontal sequence of 16 evently
distributed cells - "card" placeholders. It also draws
the "exposed" cards (if any) taking into consideration
the "mode" of the game, i.e either drawing "textual
numbers" or "images" in the "cells" of the "exposed"
cards (placeholders). "Cards" are logically 50 x 140
pixels in size based on the configurations set for
the purposes of this program.
"""
# Iterate through the "Memory deck" and draw all 16
# "card" placeholders.
for index in range(CARDS_NUMBER):
# Store the position of the left and right border
# of this cell (card placeholder).
card_placeholder_left_x = CARD_PLACEHOLDER_WIDTH * index
card_placeholder_right_x = CARD_PLACEHOLDER_WIDTH * (index + 1)
# Check if the "card" of this cell has an "exposed"
# (already) status.
if deck_of_cards_exposed[index]:
# Compute the position at the middle of this
# cell.
card_placeholder_middle_x = (card_placeholder_right_x + card_placeholder_left_x) // 2
# Play the game with "textual numbers" instead
# of "images".
if not play_with_images:
# Use the "index" of this "cell" as the
# "index" in the list of the "deck of
# cards" extracting the "card value".
# Get the width of the "card value" text
# in pixels; useful in (later) computing
# the position to draw the "card value"
# text - centered justified in the "cell"
# of each "card" (placeholder).
card_value_textwidth_in_px = frame.get_canvas_textwidth(str(deck_of_cards[index]),
FONT_SIZE, FONT_FACE)
card_value_point_x = card_placeholder_middle_x - (card_value_textwidth_in_px // 2)
# Draw the "textual number" associated
# with each "card" on the "canvas".
canvas.draw_text(str(deck_of_cards[index]), (card_value_point_x, CARD_VALUE_POINT_Y),
FONT_SIZE, FONT_COLOR, FONT_FACE)
# Play the game with "images" in place of
# "textual numbers".
else:
# Use the "index" of this "cell" as the
# "index" in the list of the "deck of
# cards" extracting the "card value".
# Later use this "card value" as the "key"
# loading the corresponding "image".
image = IMAGES[deck_of_cards[index]]
# Draw the "image" associated with each
# "card" on the "canvas".
canvas.draw_image(image,
(image.get_width() // 2,image.get_height() // 2),
(image.get_width(), image.get_height()),
(card_placeholder_middle_x, CANVAS_HEIGHT // 2),
(image.get_width(), image.get_height()))
# "Card" of this cell is not "exposed" (already);
# simply draw a cell ("card" placeholder).
else:
card_placeholder_points = [[card_placeholder_left_x, 0],
[card_placeholder_right_x, 0],
[card_placeholder_right_x, CANVAS_HEIGHT],
[card_placeholder_left_x, CANVAS_HEIGHT]]
# Just draw a blank green rectangle.
canvas.draw_polygon(card_placeholder_points,
CARD_PLACEHOLDER_LINE_WIDTH,
CARD_PLACEHOLDER_LINE_COLOR,
CARD_PLACEHOLDER_FILL_COLOR)
return None
#---------------------------------------------------------
def switch_game_mode():
"""
Button event handler that updates properly the boolean
flag, which "keeps" the "mode" of the game. The game
has two modes: play with "textual numbers" (False)
or "images" (True). Each time button is pressed the
value of this variable changes from "True" to "False"
and vice versa. The button text is updated
accordingly.
"""
# The boolean flag will be updated as a global
# variable. If already "True", will be "False" (and
# vice versa).
global play_with_images
play_with_images = not play_with_images
if play_with_images:
# User will play this game with "images". Update
# button text informing the user that he/she will
# reset the on-going game and play the next
# game with "textual numbers".
switch_game_mode_button.set_text("Reset and Play with numbers")
else:
# User will play this game with "textual numbers".
# Update button text informing the user that
# he/she will reset the on-going game and play
# the next game with "images".
switch_game_mode_button.set_text("Reset and Play with images")
# Reset on-going game.
new_game()
return None
#---------------------------------------------------------
# Create frame.
frame = simplegui.create_frame("Memory", CANVAS_WIDTH,
CANVAS_HEIGHT)
# Register event handlers for "control" elements and
# frame buttons to "restart" and if necessary "switch"
# the mode of the game. Once the game is over, you should
# hit the "Reset" button to restart the game.
frame.add_button("Reset", new_game)
frame.add_label("")
label = frame.add_label("Turns = 0")
frame.add_label("")
switch_game_mode_button = frame.add_button("Reset and Play with images",
switch_game_mode, 200)
# Register "event handler" that is responsible for the
# management of the mouse clicks on the "canvas".
frame.set_mouseclick_handler(mouseclick)
# Register the "event handler" that is responsible
# for all drawing.
frame.set_draw_handler(draw)
# Call "new_game()" ensuring that all variables are
# always initialized when the program starts running.
new_game()
# Start frame.
frame.start()
#---------------------------------------------------------
|
{
"content_hash": "16e9270ce45713eb28ac9d4c12ca5a4e",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 116,
"avg_line_length": 41.34564643799472,
"alnum_prop": 0.5960433950223357,
"repo_name": "aristotelis-metsinis/ArcadeGames",
"id": "f5e12e644fda9ba3afe7eb296b178dcfa3bc13a4",
"size": "16152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "memory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132391"
}
],
"symlink_target": ""
}
|
import os.path
import posixpath
import re
import urllib
from docutils import nodes
from sphinx import addnodes, util
from sphinx.locale import admonitionlabels
def _parents(node):
while node.parent:
node = node.parent
yield node
class BootstrapTranslator(nodes.NodeVisitor, object):
head_prefix = 'head_prefix'
head = 'head'
stylesheet = 'stylesheet'
body_prefix = 'body_prefix'
body_pre_docinfo = 'body_pre_docinfo'
docinfo = 'docinfo'
body_suffix = 'body_suffix'
subtitle = 'subtitle'
header = 'header'
footer = 'footer'
html_prolog = 'html_prolog'
html_head = 'html_head'
html_title = 'html_title'
html_subtitle = 'html_subtitle'
# <meta> tags
meta = [
'<meta http-equiv="X-UA-Compatible" content="IE=edge">',
'<meta name="viewport" content="width=device-width, initial-scale=1">'
]
def __init__(self, builder, document):
super(BootstrapTranslator, self).__init__(document)
self.builder = builder
self.body = []
self.fragment = self.body
self.html_body = self.body
# document title
self.title = []
self.start_document_title = 0
self.first_title = False
self.context = []
self.section_level = 0
self.highlightlang = self.highlightlang_base = self.builder.config.highlight_language
self.highlightopts = getattr(builder.config, 'highlight_options', {})
self.first_param = 1
self.optional_param_level = 0
self.required_params_left = 0
self.param_separator = ','
def encode(self, text):
return unicode(text).translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
0xa0: u' '
})
def starttag(self, node, tagname, **attributes):
tagname = unicode(tagname).lower()
# extract generic attributes
attrs = {name.lower(): value for name, value in attributes.iteritems()}
attrs.update(
(name, value) for name, value in node.attributes.iteritems()
if name.startswith('data-')
)
prefix = []
postfix = []
# handle possibly multiple ids
assert 'id' not in attrs, "starttag can't be passed a single id attribute, use a list of ids"
ids = node.get('ids', []) + attrs.pop('ids', [])
if ids:
_ids = iter(ids)
attrs['id'] = next(_ids)
postfix.extend(u'<i id="{}"></i>'.format(_id) for _id in _ids)
# set CSS class
classes = set(node.get('classes', []) + attrs.pop('class', '').split())
if classes:
attrs['class'] = u' '.join(classes)
return u'{prefix}<{tag} {attrs}>{postfix}'.format(
prefix=u''.join(prefix),
tag=tagname,
attrs=u' '.join(u'{}="{}"'.format(name, self.attval(value))
for name, value in attrs.iteritems()),
postfix=u''.join(postfix),
)
# only "space characters" SPACE, CHARACTER TABULATION, LINE FEED,
# FORM FEED and CARRIAGE RETURN should be collapsed, not al White_Space
def attval(self, value, whitespace=re.compile(u'[ \t\n\f\r]')):
return self.encode(whitespace.sub(u' ', unicode(value)))
def astext(self):
return u''.join(self.body)
def unknown_visit(self, node):
print "unknown node", node.__class__.__name__
self.body.append(u'[UNKNOWN NODE {}]'.format(node.__class__.__name__))
raise nodes.SkipNode
def visit_highlightlang(self, node):
self.highlightlang = node['lang']
def depart_highlightlang(self, node):
pass
def visit_document(self, node):
self.first_title = True
def depart_document(self, node):
pass
def visit_section(self, node):
# close "parent" or preceding section, unless this is the opening of
# the first section
if self.section_level:
self.body.append(u'</section>')
self.section_level += 1
self.body.append(self.starttag(node, 'section'))
def depart_section(self, node):
self.section_level -= 1
# close last section of document
if not self.section_level:
self.body.append(u'</section>')
def is_compact_paragraph(self, node):
parent = node.parent
if isinstance(parent, (nodes.document, nodes.compound,
addnodes.desc_content,
addnodes.versionmodified)):
# Never compact paragraphs in document or compound.
return False
for key, value in node.attlist():
# we can ignore a few specific classes, all other non-default
# attributes require that a <p> node remains
if key != 'classes' or value not in ([], ['first'], ['last'], ['first', 'last']):
return False
first = isinstance(node.parent[0], nodes.label)
for child in parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return False
parent_length = len([
1 for n in parent
if not isinstance(n, (nodes.Invisible, nodes.label))
])
return parent_length == 1
def visit_paragraph(self, node):
if self.is_compact_paragraph(node):
self.context.append(u'')
return
self.body.append(self.starttag(node, 'p'))
self.context.append(u'</p>')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_literal_block(self, node):
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.body.append(self.starttag(node, 'pre'))
return
lang = self.highlightlang
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
linenos = node.get('linenos', False)
if lang is self.highlightlang_base:
# only pass highlighter options for original language
opts = self.highlightopts
else:
opts = {}
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.builder.highlighter.highlight_block(
node.rawsource, lang, opts=opts, warn=warner, linenos=linenos,
**highlight_args)
self.body.append(self.starttag(node, 'div', CLASS='highlight-%s' % lang))
self.body.append(highlighted)
self.body.append(u'</div>\n')
raise nodes.SkipNode
def depart_literal_block(self, node):
self.body.append(u'</pre>')
def visit_bullet_list(self, node):
self.body.append(self.starttag(node, 'ul'))
def depart_bullet_list(self, node):
self.body.append(u'</ul>')
def visit_enumerated_list(self, node):
self.body.append(self.starttag(node, 'ol'))
def depart_enumerated_list(self, node):
self.body.append(u'</ol>')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li'))
def depart_list_item(self, node):
self.body.append(u'</li>')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl'))
def depart_definition_list(self, node):
self.body.append(u'</dl>')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
self.body.append(self.starttag(node, 'dt'))
def depart_term(self, node):
self.body.append(u'</dt>')
def visit_termsep(self, node):
self.body.append(self.starttag(node, 'br'))
raise nodes.SkipNode
def visit_definition(self, node):
self.body.append(self.starttag(node, 'dd'))
def depart_definition(self, node):
self.body.append(u'</dd>')
def visit_admonition(self, node, type=None):
clss = {
# ???: 'alert-success',
'note': 'alert-info',
'hint': 'alert-info',
'tip': 'alert-info',
'seealso': 'alert-go_to',
'warning': 'alert-warning',
'attention': 'alert-warning',
'caution': 'alert-warning',
'important': 'alert-warning',
'danger': 'alert-danger',
'error': 'alert-danger',
'exercise': 'alert-exercise',
}
self.body.append(self.starttag(node, 'div', role='alert', CLASS='alert {}'.format(
clss.get(type, '')
)))
if 'alert-dismissible' in node.get('classes', []):
self.body.append(
u'<button type="button" class="close" data-dismiss="alert" aria-label="Close">'
u'<span aria-hidden="true">×</span>'
u'</button>')
if type:
node.insert(0, nodes.title(type, admonitionlabels[type]))
def depart_admonition(self, node):
self.body.append(u'</div>')
visit_note = lambda self, node: self.visit_admonition(node, 'note')
visit_warning = lambda self, node: self.visit_admonition(node, 'warning')
visit_attention = lambda self, node: self.visit_admonition(node, 'attention')
visit_caution = lambda self, node: self.visit_admonition(node, 'caution')
visit_danger = lambda self, node: self.visit_admonition(node, 'danger')
visit_error = lambda self, node: self.visit_admonition(node, 'error')
visit_hint = lambda self, node: self.visit_admonition(node, 'hint')
visit_important = lambda self, node: self.visit_admonition(node, 'important')
visit_tip = lambda self, node: self.visit_admonition(node, 'tip')
visit_exercise = lambda self, node: self.visit_admonition(node, 'exercise')
visit_seealso = lambda self, node: self.visit_admonition(node, 'seealso')
depart_note = depart_admonition
depart_warning = depart_admonition
depart_attention = depart_admonition
depart_caution = depart_admonition
depart_danger = depart_admonition
depart_error = depart_admonition
depart_hint = depart_admonition
depart_important = depart_admonition
depart_tip = depart_admonition
depart_exercise = depart_admonition
depart_seealso = depart_admonition
def visit_versionmodified(self, node):
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
self.body.append(u'</div>')
def visit_title(self, node):
parent = node.parent
closing = u'</p>'
if isinstance(parent, nodes.Admonition):
self.body.append(self.starttag(node, 'p', CLASS='alert-title'))
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1'))
closing = u'</h1>'
self.start_document_title = len(self.body)
else:
assert isinstance(parent, nodes.section), "expected a section node as parent to the title, found {}".format(parent)
if self.first_title:
self.first_title = False
raise nodes.SkipNode()
nodename = 'h{}'.format(self.section_level)
self.body.append(self.starttag(node, nodename))
closing = u'</{}>'.format(nodename)
self.context.append(closing)
def depart_title(self, node):
self.body.append(self.context.pop())
if self.start_document_title:
self.title = self.body[self.start_document_title:-1]
self.start_document_title = 0
del self.body[:]
# the rubric should be a smaller heading than the current section, up to
# h6... maybe "h7" should be a ``p`` instead?
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'h{}'.format(min(self.section_level + 1, 6))))
def depart_rubric(self, node):
self.body.append(u'</h{}>'.format(min(self.section_level + 1, 6)))
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append(u'</blockquote>')
def visit_attribution(self, node):
self.body.append(self.starttag(node, 'footer'))
def depart_attribution(self, node):
self.body.append(u'</footer>')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_container(self, node):
self.body.append(u'</div>')
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_compound(self, node):
self.body.append(u'</div>')
def visit_image(self, node):
uri = node['uri']
if uri in self.builder.images:
uri = posixpath.join(self.builder.imgpath,
self.builder.images[uri])
attrs = {'src': uri, 'class': 'img-responsive'}
if 'alt' in node:
attrs['alt'] = node['alt']
if 'align' in node:
if node['align'] == 'center':
attrs['class'] += ' center-block'
else:
doc = None
if node.source:
doc = node.source
if node.line:
doc += ':%d' % node.line
self.builder.app.warn(
"Unsupported alignment value \"%s\"" % node['align'],
location=doc
)
# todo: explicit width/height/scale?
self.body.append(self.starttag(node, 'img', **attrs))
def depart_image(self, node): pass
def visit_figure(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_figure(self, node):
self.body.append(u'</div>')
def visit_caption(self, node):
# first paragraph of figure content
self.body.append(self.starttag(node, 'h4'))
def depart_caption(self, node):
self.body.append(u'</h4>')
def visit_legend(self, node): pass
def depart_legend(self, node): pass
def visit_line(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line'))
# ensure the line still takes the room it needs
if not len(node): self.body.append(u'<br />')
def depart_line(self, node):
self.body.append(u'</div>')
def visit_line_block(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
self.body.append(u'</div>')
def visit_table(self, node):
self.body.append(self.starttag(node, 'table', CLASS='table'))
def depart_table(self, node):
self.body.append(u'</table>')
def visit_tgroup(self, node): pass
def depart_tgroup(self, node): pass
def visit_colspec(self, node): raise nodes.SkipNode
def visit_thead(self, node):
self.body.append(self.starttag(node, 'thead'))
def depart_thead(self, node):
self.body.append(u'</thead>')
def visit_tbody(self, node):
self.body.append(self.starttag(node, 'tbody'))
def depart_tbody(self, node):
self.body.append(u'</tbody>')
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr'))
def depart_row(self, node):
self.body.append(u'</tr>')
def visit_entry(self, node):
if isinstance(node.parent.parent, nodes.thead):
tagname = 'th'
else:
tagname = 'td'
self.body.append(self.starttag(node, tagname))
self.context.append(tagname)
def depart_entry(self, node):
self.body.append(u'</{}>'.format(self.context.pop()))
def visit_Text(self, node):
self.body.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_literal(self, node):
self.body.append(self.starttag(node, 'code'))
def depart_literal(self, node):
self.body.append(u'</code>')
visit_literal_emphasis = visit_literal
depart_literal_emphasis = depart_literal
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em'))
def depart_emphasis(self, node):
self.body.append(u'</em>')
def visit_strong(self, node):
self.body.append(self.starttag(node, 'strong'))
def depart_strong(self, node):
self.body.append(u'</strong>')
visit_literal_strong = visit_strong
depart_literal_strong = depart_strong
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span'))
def depart_inline(self, node):
self.body.append(u'</span>')
def visit_abbreviation(self, node):
attrs = {}
if 'explanation' in node:
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', **attrs))
def depart_abbreviation(self, node):
self.body.append(u'</abbr>')
def visit_reference(self, node):
attrs = {
'class': 'reference',
'href': node['refuri'] if 'refuri' in node else '#' + node['refid']
}
attrs['class'] += ' internal' if (node.get('internal') or 'refuri' not in node) else ' external'
if any(isinstance(ancestor, nodes.Admonition) for ancestor in _parents(node)):
attrs['class'] += ' alert-link'
if 'reftitle' in node:
attrs['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', **attrs))
def depart_reference(self, node):
self.body.append(u'</a>')
def visit_target(self, node): pass
def depart_target(self, node): pass
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'div', CLASS='footnote'))
self.footnote_backrefs(node)
def depart_footnote(self, node):
self.body.append(u'</div>')
def visit_footnote_reference(self, node):
self.body.append(self.starttag(
node, 'a', href='#' + node['refid'], CLASS="footnote-ref"))
def depart_footnote_reference(self, node):
self.body.append(u'</a>')
def visit_label(self, node):
self.body.append(self.starttag(node, 'span', CLASS='footnote-label'))
self.body.append(u'%s[' % self.context.pop())
def depart_label(self, node):
# Context added in footnote_backrefs.
self.body.append(u']%s</span> %s' % (self.context.pop(), self.context.pop()))
def footnote_backrefs(self, node):
# should store following data on context stack (in that order since
# they'll be popped so LIFO)
#
# * outside (after) label
# * after label text
# * before label text
backrefs = node['backrefs']
if not backrefs:
self.context.extend(['', '', ''])
elif len(backrefs) == 1:
self.context.extend([
'',
'</a>',
'<a class="footnote-backref" href="#%s">' % backrefs[0]
])
else:
backlinks = (
'<a class="footnote-backref" href="#%s">%s</a>' % (backref, i)
for i, backref in enumerate(backrefs, start=1)
)
self.context.extend([
'<em class="footnote-backrefs">(%s)</em> ' % ', '.join(backlinks),
'',
''
])
def visit_desc(self, node):
self.body.append(self.starttag(node, 'section', CLASS='code-' + node['objtype']))
def depart_desc(self, node):
self.body.append(u'</section>')
def visit_desc_signature(self, node):
self.body.append(self.starttag(node, 'h6'))
self.body.append(u'<code>')
def depart_desc_signature(self, node):
self.body.append(u'</code>')
self.body.append(u'</h6>')
def visit_desc_addname(self, node): pass
def depart_desc_addname(self, node): pass
def visit_desc_type(self, node): pass
def depart_desc_type(self, node): pass
def visit_desc_returns(self, node):
self.body.append(u' → ')
def depart_desc_returns(self, node):
pass
def visit_desc_name(self, node): pass
def depart_desc_name(self, node): pass
def visit_desc_parameterlist(self, node):
self.body.append(u'(')
self.first_param = True
self.optional_param_level = 0
# How many required parameters are left.
self.required_params_left = sum(isinstance(c, addnodes.desc_parameter) for c in node.children)
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(u')')
# If required parameters are still to come, then put the comma after
# the parameter. Otherwise, put the comma before. This ensures that
# signatures like the following render correctly (see issue #1001):
#
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
if 'noemph' not in node: self.body.append(u'<em>')
def depart_desc_parameter(self, node):
if 'noemph' not in node: self.body.append(u'</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
self.optional_param_level += 1
self.body.append(u'[')
def depart_desc_optional(self, node):
self.optional_param_level -= 1
self.body.append(u']')
def visit_desc_annotation(self, node):
self.body.append(self.starttag(node, 'em'))
def depart_desc_annotation(self, node):
self.body.append(u'</em>')
def visit_desc_content(self, node): pass
def depart_desc_content(self, node): pass
def visit_field_list(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-fields'))
def depart_field_list(self, node):
self.body.append(u'</div>')
def visit_field(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field'))
def depart_field(self, node):
self.body.append(u'</div>')
def visit_field_name(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field-name'))
def depart_field_name(self, node):
self.body.append(u'</div>')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field-body'))
def depart_field_body(self, node):
self.body.append(u'</div>')
def visit_glossary(self, node): pass
def depart_glossary(self, node): pass
def visit_comment(self, node): raise nodes.SkipNode
def visit_toctree(self, node):
# div class=row {{ section_type }}
# h2 class=col-sm-12
# {{ section title }}
# div class=col-sm-6 col-md-3
# figure class=card
# a href=current_link style=background-image: document-image-attribute class=card-img
# figcaption
# {{ card title }}
env = self.builder.env
conf = self.builder.app.config
for title, ref in ((e[0], e[1]) for e in node['entries']):
# external URL, no toc, can't recurse into
if ref not in env.tocs:
continue
toc = env.tocs[ref].traverse(addnodes.toctree)
classes = env.metadata[ref].get('types', 'tutorials')
classes += ' toc-single-entry' if not toc else ' toc-section'
self.body.append(self.starttag(node, 'div', CLASS="row " + classes))
self.body.append(u'<h2 class="col-sm-12">')
self.body.append(title if title else util.nodes.clean_astext(env.titles[ref]))
self.body.append(u'</h2>')
entries = [(title, ref)] if not toc else ((e[0], e[1]) for e in toc[0]['entries'])
for subtitle, subref in entries:
baseuri = self.builder.get_target_uri(node['parent'])
if subref in env.metadata:
cover = env.metadata[subref].get('banner', conf.odoo_cover_default)
elif subref in conf.odoo_cover_external:
cover = conf.odoo_cover_external[subref]
else:
cover = conf.odoo_cover_default_external
if cover:
banner = '_static/' + cover
base, ext = os.path.splitext(banner)
small = "{}.small{}".format(base, ext)
if os.path.isfile(urllib.url2pathname(small)):
banner = small
style = u"background-image: url('{}')".format(
util.relative_uri(baseuri, banner) or '#')
else:
style = u''
self.body.append(u"""
<div class="col-sm-6 col-md-3">
<figure class="card">
<a href="{link}" class="card-img">
<span style="{style}"></span>
<figcaption>{title}</figcaption>
</a>
</figure>
</div>
""".format(
link=subref if util.url_re.match(subref) else util.relative_uri(
baseuri, self.builder.get_target_uri(subref)),
style=style,
title=subtitle if subtitle else util.nodes.clean_astext(env.titles[subref]),
))
self.body.append(u'</div>')
raise nodes.SkipNode
def visit_index(self, node): raise nodes.SkipNode
def visit_raw(self, node):
if 'html' in node.get('format', '').split():
t = 'span' if isinstance(node.parent, nodes.TextElement) else 'div'
if node['classes']:
self.body.append(self.starttag(node, t))
self.body.append(node.astext())
if node['classes']:
self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
# internal node
def visit_substitution_definition(self, node): raise nodes.SkipNode
# without set_translator, add_node doesn't work correctly, so the
# serialization of html_domain nodes needs to be embedded here
def visit_div(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_div(self, node):
self.body.append(u'</div>\n')
def visit_address(self, node):
self.body.append(self.starttag(node, 'address'))
def depart_address(self, node):
self.body.append(u'</address>')
# TODO: inline elements
|
{
"content_hash": "30c8cca15007e56f9d1cd5af06c28f66",
"timestamp": "",
"source": "github",
"line_count": 690,
"max_line_length": 127,
"avg_line_length": 39.392753623188405,
"alnum_prop": 0.5776829402891726,
"repo_name": "vileopratama/vitech",
"id": "48c5e901f28195fc1f3d238135f9ed0bd2516abc",
"size": "27207",
"binary": false,
"copies": "40",
"ref": "refs/heads/master",
"path": "src/doc/_extensions/odoo/translator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='django-rosetta',
version="0.4.7",
description='A Django application that eases the translation of Django projects',
author='Marco Bonetti',
author_email='mbonetti@gmail.com',
url='https://github.com/mbi/django-rosetta',
license='MIT',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Internationalization',
'Framework :: Django',
],
include_package_data=True,
zip_safe=False
)
|
{
"content_hash": "f1142f89e9abf86bc36f6bc200f507c6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 35.208333333333336,
"alnum_prop": 0.6378698224852071,
"repo_name": "grangier/django-rosetta",
"id": "4e0c22d7582c93ff1610fb1f339c0ebd890a1b9e",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "66630"
}
],
"symlink_target": ""
}
|
"""The tests for the logbook component."""
# pylint: disable=protected-access
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.components import sun
import homeassistant.core as ha
from homeassistant.const import (
EVENT_STATE_CHANGED, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
ATTR_HIDDEN, STATE_NOT_HOME, STATE_ON, STATE_OFF)
import homeassistant.util.dt as dt_util
from homeassistant.components import logbook
from homeassistant.bootstrap import setup_component
from tests.common import mock_http_component, get_test_home_assistant
class TestComponentLogbook(unittest.TestCase):
"""Test the History component."""
EMPTY_CONFIG = logbook.CONFIG_SCHEMA({logbook.DOMAIN: {}})
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_http_component(self.hass)
self.hass.config.components |= set(['frontend', 'recorder', 'api'])
with patch('homeassistant.components.logbook.'
'register_built_in_panel'):
assert setup_component(self.hass, logbook.DOMAIN,
self.EMPTY_CONFIG)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_service_call_create_logbook_entry(self):
"""Test if service call create log book entry."""
calls = []
@ha.callback
def event_listener(event):
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(logbook.DOMAIN, 'log', {
logbook.ATTR_NAME: 'Alarm',
logbook.ATTR_MESSAGE: 'is triggered',
logbook.ATTR_DOMAIN: 'switch',
logbook.ATTR_ENTITY_ID: 'switch.test_switch'
}, True)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
self.assertEqual(1, len(calls))
last_call = calls[-1]
self.assertEqual('Alarm', last_call.data.get(logbook.ATTR_NAME))
self.assertEqual('is triggered', last_call.data.get(
logbook.ATTR_MESSAGE))
self.assertEqual('switch', last_call.data.get(logbook.ATTR_DOMAIN))
self.assertEqual('switch.test_switch', last_call.data.get(
logbook.ATTR_ENTITY_ID))
def test_service_call_create_log_book_entry_no_message(self):
"""Test if service call create log book entry without message."""
calls = []
@ha.callback
def event_listener(event):
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(logbook.DOMAIN, 'log', {}, True)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
self.assertEqual(0, len(calls))
def test_humanify_filter_sensor(self):
"""Test humanify filter too frequent sensor values."""
entity_id = 'sensor.bla'
pointA = dt_util.utcnow().replace(minute=2)
pointB = pointA.replace(minute=5)
pointC = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id, 20)
eventC = self.create_state_changed_event(pointC, entity_id, 30)
entries = list(logbook.humanify((eventA, eventB, eventC)))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], pointB, 'bla', domain='sensor', entity_id=entity_id)
self.assert_entry(
entries[1], pointC, 'bla', domain='sensor', entity_id=entity_id)
def test_filter_continuous_sensor_values(self):
"""Test remove continuous sensor events from logbook."""
entity_id = 'sensor.bla'
pointA = dt_util.utcnow()
attributes = {'unit_of_measurement': 'foo'}
eventA = self.create_state_changed_event(
pointA, entity_id, 10, attributes)
entries = list(logbook.humanify((eventA,)))
self.assertEqual(0, len(entries))
def test_exclude_new_entities(self):
"""Test if events are excluded on first update."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
eventA.data['old_state'] = None
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), self.EMPTY_CONFIG)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_removed_entities(self):
"""Test if events are excluded on last update."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
eventA.data['new_state'] = None
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), self.EMPTY_CONFIG)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_events_hidden(self):
"""Test if events are excluded if entity is hidden."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10,
{ATTR_HIDDEN: 'true'})
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), self.EMPTY_CONFIG)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_events_entity(self):
"""Test if events are filtered if entity is excluded in config."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_ENTITIES: [entity_id, ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_events_domain(self):
"""Test if events are filtered if domain is excluded in config."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_DOMAINS: ['switch', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_exclude_automation_events(self):
"""Test if automation entries can be excluded by entity_id."""
name = 'My Automation Rule'
message = 'has been triggered'
domain = 'automation'
entity_id = 'automation.my_automation_rule'
entity_id2 = 'automation.my_automation_rule_2'
entity_id2 = 'sensor.blu'
eventA = ha.Event(logbook.EVENT_LOGBOOK_ENTRY, {
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_DOMAIN: domain,
logbook.ATTR_ENTITY_ID: entity_id,
})
eventB = ha.Event(logbook.EVENT_LOGBOOK_ENTRY, {
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_DOMAIN: domain,
logbook.ATTR_ENTITY_ID: entity_id2,
})
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_ENTITIES: [entity_id, ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], name=name, domain=domain, entity_id=entity_id2)
def test_include_events_entity(self):
"""Test if events are filtered if entity is included in config."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_INCLUDE: {
logbook.CONF_ENTITIES: [entity_id2, ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_include_events_domain(self):
"""Test if events are filtered if domain is included in config."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_INCLUDE: {
logbook.CONF_DOMAINS: ['sensor', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_include_exclude_events(self):
"""Test if events are filtered if include and exclude is configured."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
entity_id3 = 'sensor.bli'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA1 = self.create_state_changed_event(pointA, entity_id, 10)
eventA2 = self.create_state_changed_event(pointA, entity_id2, 10)
eventA3 = self.create_state_changed_event(pointA, entity_id3, 10)
eventB1 = self.create_state_changed_event(pointB, entity_id, 20)
eventB2 = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_INCLUDE: {
logbook.CONF_DOMAINS: ['sensor', ],
logbook.CONF_ENTITIES: ['switch.bla', ]},
logbook.CONF_EXCLUDE: {
logbook.CONF_DOMAINS: ['switch', ],
logbook.CONF_ENTITIES: ['sensor.bli', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA1, eventA2, eventA3,
eventB1, eventB2), config)
entries = list(logbook.humanify(events))
self.assertEqual(3, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointA, 'blu', domain='sensor',
entity_id=entity_id2)
self.assert_entry(entries[2], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_exclude_auto_groups(self):
"""Test if events of automatically generated groups are filtered."""
entity_id = 'switch.bla'
entity_id2 = 'group.switches'
pointA = dt_util.utcnow()
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointA, entity_id2, 20,
{'auto': True})
entries = list(logbook.humanify((eventA, eventB)))
self.assertEqual(1, len(entries))
self.assert_entry(entries[0], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_exclude_attribute_changes(self):
"""Test if events of attribute changes are filtered."""
entity_id = 'switch.bla'
entity_id2 = 'switch.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=1)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(
pointA, entity_id2, 20, last_changed=pointA, last_updated=pointB)
entries = list(logbook.humanify((eventA, eventB)))
self.assertEqual(1, len(entries))
self.assert_entry(entries[0], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_entry_to_dict(self):
"""Test conversion of entry to dict."""
entry = logbook.Entry(
dt_util.utcnow(), 'Alarm', 'is triggered', 'switch', 'test_switch'
)
data = entry.as_dict()
self.assertEqual('Alarm', data.get(logbook.ATTR_NAME))
self.assertEqual('is triggered', data.get(logbook.ATTR_MESSAGE))
self.assertEqual('switch', data.get(logbook.ATTR_DOMAIN))
self.assertEqual('test_switch', data.get(logbook.ATTR_ENTITY_ID))
def test_home_assistant_start_stop_grouped(self):
"""Test if HA start and stop events are grouped.
Events that are occuring in the same minute.
"""
entries = list(logbook.humanify((
ha.Event(EVENT_HOMEASSISTANT_STOP),
ha.Event(EVENT_HOMEASSISTANT_START),
)))
self.assertEqual(1, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='restarted',
domain=ha.DOMAIN)
def test_home_assistant_start(self):
"""Test if HA start is not filtered or converted into a restart."""
entity_id = 'switch.bla'
pointA = dt_util.utcnow()
entries = list(logbook.humanify((
ha.Event(EVENT_HOMEASSISTANT_START),
self.create_state_changed_event(pointA, entity_id, 10)
)))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_entry_message_from_state_device(self):
"""Test if logbook message is correctly created for switches.
Especially test if the special handling for turn on/off events is done.
"""
pointA = dt_util.utcnow()
# message for a device state change
eventA = self.create_state_changed_event(pointA, 'switch.bla', 10)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('changed to 10', message)
# message for a switch turned on
eventA = self.create_state_changed_event(pointA, 'switch.bla',
STATE_ON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('turned on', message)
# message for a switch turned off
eventA = self.create_state_changed_event(pointA, 'switch.bla',
STATE_OFF)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('turned off', message)
def test_entry_message_from_state_device_tracker(self):
"""Test if logbook message is correctly created for device tracker."""
pointA = dt_util.utcnow()
# message for a device tracker "not home" state
eventA = self.create_state_changed_event(pointA, 'device_tracker.john',
STATE_NOT_HOME)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('is away', message)
# message for a device tracker "home" state
eventA = self.create_state_changed_event(pointA, 'device_tracker.john',
'work')
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('is at work', message)
def test_entry_message_from_state_sun(self):
"""Test if logbook message is correctly created for sun."""
pointA = dt_util.utcnow()
# message for a sun rise
eventA = self.create_state_changed_event(pointA, 'sun.sun',
sun.STATE_ABOVE_HORIZON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('has risen', message)
# message for a sun set
eventA = self.create_state_changed_event(pointA, 'sun.sun',
sun.STATE_BELOW_HORIZON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('has set', message)
def test_process_custom_logbook_entries(self):
"""Test if custom log book entries get added as an entry."""
name = 'Nice name'
message = 'has a custom entry'
entity_id = 'sun.sun'
entries = list(logbook.humanify((
ha.Event(logbook.EVENT_LOGBOOK_ENTRY, {
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_ENTITY_ID: entity_id,
}),
)))
self.assertEqual(1, len(entries))
self.assert_entry(
entries[0], name=name, message=message,
domain='sun', entity_id=entity_id)
def assert_entry(self, entry, when=None, name=None, message=None,
domain=None, entity_id=None):
"""Assert an entry is what is expected."""
if when:
self.assertEqual(when, entry.when)
if name:
self.assertEqual(name, entry.name)
if message:
self.assertEqual(message, entry.message)
if domain:
self.assertEqual(domain, entry.domain)
if entity_id:
self.assertEqual(entity_id, entry.entity_id)
def create_state_changed_event(self, event_time_fired, entity_id, state,
attributes=None, last_changed=None,
last_updated=None):
"""Create state changed event."""
# Logbook only cares about state change events that
# contain an old state but will not actually act on it.
state = ha.State(entity_id, state, attributes, last_changed,
last_updated).as_dict()
return ha.Event(EVENT_STATE_CHANGED, {
'entity_id': entity_id,
'old_state': state,
'new_state': state,
}, time_fired=event_time_fired)
|
{
"content_hash": "8e7a687324bc417a5b37a87a5d380b4e",
"timestamp": "",
"source": "github",
"line_count": 546,
"max_line_length": 79,
"avg_line_length": 42.108058608058606,
"alnum_prop": 0.591013874994563,
"repo_name": "keerts/home-assistant",
"id": "b6583ba35364cfb99650bb28cdf265d6edabd104",
"size": "22991",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/test_logbook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1546272"
},
{
"name": "Python",
"bytes": "5270263"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
}
|
from ceph_deploy.exc import ExecutableNotFound
from ceph_deploy.util import system, versions
from ceph_deploy.lib import remoto
class Ceph(object):
"""
Determine different aspects of the Ceph package, like ``version`` and path
``executable``. Although mostly provide a version object that helps for
parsing and comparing.
"""
def __init__(self, conn, _check=None):
self.conn = conn
self._check = _check or remoto.process.check
@property
def installed(self):
"""
If the ``ceph`` executable exists, then Ceph is installed. Should
probably be revisited if different components do not have the ``ceph``
executable (this is currently provided by ``ceph-common``).
"""
return bool(self.executable)
@property
def executable(self):
try:
return system.executable_path(self.conn, 'ceph')
except ExecutableNotFound:
return None
def _get_version_output(self):
"""
Ignoring errors, call `ceph --version` and return only the version
portion of the output. For example, output like::
ceph version 9.0.1-1234kjd (asdflkj2k3jh234jhg)
Would return::
9.0.1-1234kjd
"""
if not self.executable:
return ''
command = [self.executable, '--version']
out, _, _ = self._check(self.conn, command)
try:
return out.split()[2]
except IndexError:
return ''
@property
def version(self):
"""
Return a version object (see
:mod:``ceph_deploy.util.versions.NormalizedVersion``)
"""
return versions.parse_version(self._get_version_output)
# callback helpers
def ceph_is_installed(module):
"""
A helper callback to be executed after the connection is made to ensure
that Ceph is installed.
"""
ceph_package = Ceph(module.conn)
if not ceph_package.installed:
host = module.conn.hostname
raise RuntimeError(
'ceph needs to be installed in remote host: %s' % host
)
|
{
"content_hash": "882bb7d336f8aec69a210c82253abe19",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 28.824324324324323,
"alnum_prop": 0.6085325832161275,
"repo_name": "osynge/ceph-deploy",
"id": "4c638040ad828ac6e419e12f68fe67cd75c8e22b",
"size": "2133",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "ceph_deploy/util/packages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "355952"
},
{
"name": "Shell",
"bytes": "7086"
}
],
"symlink_target": ""
}
|
import os
import sys
import subprocess
from zoo.util.utils import get_node_ip
# Assumption:
# 1. All hosts has oneCCL installed
# 2. The driver can ssh all hosts without password
# 3. All hosts have the same working directory.
# 4. All hosts have the same Python environment in the same location.
class MPIRunner:
def __init__(self,
hosts=None,
processes_per_node=1,
env=None):
driver_ip = get_node_ip()
if hosts is None: # Single node
self.hosts = [driver_ip]
elif hosts == "all": # All executor nodes in the cluster
def get_ip(iter):
yield get_node_ip()
from bigdl.util.common import get_node_and_core_number
from zoo.orca import OrcaContext
sc = OrcaContext.get_spark_context()
node_num, core_num = get_node_and_core_number()
total_cores = node_num * core_num
self.hosts = list(set(sc.range(0, total_cores, numSlices=total_cores).barrier()
.mapPartitions(get_ip).collect()))
else: # User specified hosts, assumed to be non-duplicate
assert isinstance(hosts, list)
self.hosts = hosts
self.master = self.hosts[0]
print("Master: ", self.master)
self.remote_hosts = []
for host in self.hosts:
if host != driver_ip:
self.remote_hosts.append(host)
print("Remote hosts: ", self.remote_hosts)
print("Hosts: ", self.hosts)
self.processes_per_node = processes_per_node
self.env = env if env else {}
def run(self, file, **kwargs):
file_path = os.path.abspath(file)
assert os.path.exists(file_path)
file_dir = "/".join(file_path.split("/")[:-1])
self.scp_file(file_path, file_dir)
# cmd = ["mpiexec.openmpi"]
cmd = ["mpiexec.hydra"]
# -l would label the output with process rank. -l/-ppn not available for openmpi.
# mpi_config = "-np {} ".format(
mpi_config = "-np {} -ppn {} -l ".format(
self.processes_per_node * len(self.hosts),
self.processes_per_node)
mpi_env = os.environ.copy()
mpi_env.update(self.env)
if "I_MPI_PIN_DOMAIN" in mpi_env:
mpi_config += "-genv I_MPI_PIN_DOMAIN={} ".format(mpi_env["I_MPI_PIN_DOMAIN"])
if "OMP_NUM_THREADS" in mpi_env:
mpi_config += "-genv OMP_NUM_THREADS={} ".format(mpi_env["OMP_NUM_THREADS"])
if len(self.remote_hosts) > 0:
mpi_config += "-hosts {}".format(",".join(self.hosts))
cmd.extend(mpi_config.split())
# cmd.append("ls")
cmd.append(sys.executable)
cmd.append("-u") # This can print as the program runs
cmd.append(file_path)
for k, v in kwargs.items():
cmd.append("--{}={}".format(str(k), str(v)))
print(cmd)
if len(self.remote_hosts) > 0:
mpi_env["MASTER_ADDR"] = str(self.master)
else: # Single node
mpi_env["MASTER_ADDR"] = "127.0.0.1"
# print(mpi_env)
process = subprocess.Popen(cmd, env=mpi_env)
process.wait()
def scp_file(self, file, remote_dir):
for host in self.remote_hosts:
p = subprocess.Popen(["scp", file,
"root@{}:{}/".format(host, remote_dir)])
os.waitpid(p.pid, 0)
def launch_plasma(self, object_store_memory="2g"):
import atexit
atexit.register(self.shutdown_plasma)
# TODO: Or can use spark to launch plasma
from zoo.ray.utils import resource_to_bytes
self.plasma_path = "/".join(sys.executable.split("/")[:-1] + ["plasma_store"])
self.object_store_memory = resource_to_bytes(object_store_memory)
self.object_store_address = "/tmp/analytics_zoo_plasma"
command = "{} -m {} -s {}".format(
self.plasma_path, self.object_store_memory, self.object_store_address)
for host in self.hosts:
if host != get_node_ip():
p = subprocess.Popen(["ssh", "root@{}".format(host), command])
else:
p = subprocess.Popen(command.split())
print("Plasma launched on {}".format(host))
return self.object_store_address
def shutdown_plasma(self):
for host in self.hosts:
if host != get_node_ip():
p = subprocess.Popen(["ssh", "root@{}".format(host), "pkill plasma"])
else:
p = subprocess.Popen(["pkill", "plasma"])
os.waitpid(p.pid, 0)
|
{
"content_hash": "cb796f4a2f58053b30a23bd5d1738946",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 91,
"avg_line_length": 40.81578947368421,
"alnum_prop": 0.5559853857726198,
"repo_name": "intel-analytics/analytics-zoo",
"id": "da9f565af0de244151c6fb1b40c55dc464220856",
"size": "5244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/zoo/orca/learn/mpi/mpi_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
}
|
from django.conf import settings
class ChoiceSetMeta(type):
"""
Metaclass for ChoiceSet
"""
def __new__(mcs, name, bases, attrs):
# Extend static choices with any configured choices
if key := attrs.get('key'):
assert type(attrs['CHOICES']) is list, f"{name} has a key defined but CHOICES is not a list"
app = attrs['__module__'].split('.', 1)[0]
replace_key = f'{app}.{key}'
extend_key = f'{replace_key}+' if replace_key else None
if replace_key and replace_key in settings.FIELD_CHOICES:
# Replace the stock choices
attrs['CHOICES'] = settings.FIELD_CHOICES[replace_key]
elif extend_key and extend_key in settings.FIELD_CHOICES:
# Extend the stock choices
attrs['CHOICES'].extend(settings.FIELD_CHOICES[extend_key])
# Define choice tuples and color maps
attrs['_choices'] = []
attrs['colors'] = {}
for choice in attrs['CHOICES']:
if isinstance(choice[1], (list, tuple)):
grouped_choices = []
for c in choice[1]:
grouped_choices.append((c[0], c[1]))
if len(c) == 3:
attrs['colors'][c[0]] = c[2]
attrs['_choices'].append((choice[0], grouped_choices))
else:
attrs['_choices'].append((choice[0], choice[1]))
if len(choice) == 3:
attrs['colors'][choice[0]] = choice[2]
return super().__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
# django-filters will check if a 'choices' value is callable, and if so assume that it returns an iterable
return getattr(cls, '_choices', ())
def __iter__(cls):
return iter(getattr(cls, '_choices', ()))
class ChoiceSet(metaclass=ChoiceSetMeta):
"""
Holds an iterable of choice tuples suitable for passing to a Django model or form field. Choices can be defined
statically within the class as CHOICES and/or gleaned from the FIELD_CHOICES configuration parameter.
"""
CHOICES = list()
@classmethod
def values(cls):
return [c[0] for c in unpack_grouped_choices(cls._choices)]
def unpack_grouped_choices(choices):
"""
Unpack a grouped choices hierarchy into a flat list of two-tuples. For example:
choices = (
('Foo', (
(1, 'A'),
(2, 'B')
)),
('Bar', (
(3, 'C'),
(4, 'D')
))
)
becomes:
choices = (
(1, 'A'),
(2, 'B'),
(3, 'C'),
(4, 'D')
)
"""
unpacked_choices = []
for key, value in choices:
if isinstance(value, (list, tuple)):
# Entered an optgroup
for optgroup_key, optgroup_value in value:
unpacked_choices.append((optgroup_key, optgroup_value))
else:
unpacked_choices.append((key, value))
return unpacked_choices
#
# Generic color choices
#
class ColorChoices(ChoiceSet):
COLOR_DARK_RED = 'aa1409'
COLOR_RED = 'f44336'
COLOR_PINK = 'e91e63'
COLOR_ROSE = 'ffe4e1'
COLOR_FUCHSIA = 'ff66ff'
COLOR_PURPLE = '9c27b0'
COLOR_DARK_PURPLE = '673ab7'
COLOR_INDIGO = '3f51b5'
COLOR_BLUE = '2196f3'
COLOR_LIGHT_BLUE = '03a9f4'
COLOR_CYAN = '00bcd4'
COLOR_TEAL = '009688'
COLOR_AQUA = '00ffff'
COLOR_DARK_GREEN = '2f6a31'
COLOR_GREEN = '4caf50'
COLOR_LIGHT_GREEN = '8bc34a'
COLOR_LIME = 'cddc39'
COLOR_YELLOW = 'ffeb3b'
COLOR_AMBER = 'ffc107'
COLOR_ORANGE = 'ff9800'
COLOR_DARK_ORANGE = 'ff5722'
COLOR_BROWN = '795548'
COLOR_LIGHT_GREY = 'c0c0c0'
COLOR_GREY = '9e9e9e'
COLOR_DARK_GREY = '607d8b'
COLOR_BLACK = '111111'
COLOR_WHITE = 'ffffff'
CHOICES = (
(COLOR_DARK_RED, 'Dark Red'),
(COLOR_RED, 'Red'),
(COLOR_PINK, 'Pink'),
(COLOR_ROSE, 'Rose'),
(COLOR_FUCHSIA, 'Fuchsia'),
(COLOR_PURPLE, 'Purple'),
(COLOR_DARK_PURPLE, 'Dark Purple'),
(COLOR_INDIGO, 'Indigo'),
(COLOR_BLUE, 'Blue'),
(COLOR_LIGHT_BLUE, 'Light Blue'),
(COLOR_CYAN, 'Cyan'),
(COLOR_TEAL, 'Teal'),
(COLOR_AQUA, 'Aqua'),
(COLOR_DARK_GREEN, 'Dark Green'),
(COLOR_GREEN, 'Green'),
(COLOR_LIGHT_GREEN, 'Light Green'),
(COLOR_LIME, 'Lime'),
(COLOR_YELLOW, 'Yellow'),
(COLOR_AMBER, 'Amber'),
(COLOR_ORANGE, 'Orange'),
(COLOR_DARK_ORANGE, 'Dark Orange'),
(COLOR_BROWN, 'Brown'),
(COLOR_LIGHT_GREY, 'Light Grey'),
(COLOR_GREY, 'Grey'),
(COLOR_DARK_GREY, 'Dark Grey'),
(COLOR_BLACK, 'Black'),
(COLOR_WHITE, 'White'),
)
#
# Button color choices
#
class ButtonColorChoices(ChoiceSet):
"""
Map standard button color choices to Bootstrap 3 button classes
"""
DEFAULT = 'outline-dark'
BLUE = 'blue'
INDIGO = 'indigo'
PURPLE = 'purple'
PINK = 'pink'
RED = 'red'
ORANGE = 'orange'
YELLOW = 'yellow'
GREEN = 'green'
TEAL = 'teal'
CYAN = 'cyan'
GRAY = 'gray'
GREY = 'gray' # Backward compatability for <3.2
BLACK = 'black'
WHITE = 'white'
CHOICES = (
(DEFAULT, 'Default'),
(BLUE, 'Blue'),
(INDIGO, 'Indigo'),
(PURPLE, 'Purple'),
(PINK, 'Pink'),
(RED, 'Red'),
(ORANGE, 'Orange'),
(YELLOW, 'Yellow'),
(GREEN, 'Green'),
(TEAL, 'Teal'),
(CYAN, 'Cyan'),
(GRAY, 'Gray'),
(BLACK, 'Black'),
(WHITE, 'White'),
)
|
{
"content_hash": "0f667850073c3a7ccc61659f66bfbe83",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 115,
"avg_line_length": 28.77386934673367,
"alnum_prop": 0.5316101990918617,
"repo_name": "digitalocean/netbox",
"id": "c5b5bafb9e5d76cffda1186bd090b3160e702814",
"size": "5726",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/utilities/choices.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815170"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
}
|
"""
Locking functionality when launching things from the command line.
Uses a pidfile.
This prevents multiple identical workflows to be launched simultaneously.
"""
from __future__ import print_function
import errno
import hashlib
import os
import sys
from subprocess import Popen, PIPE
from luigi import six
def getpcmd(pid):
"""
Returns command of process.
:param pid:
"""
if os.name == "nt":
# Use wmic command instead of ps on Windows.
cmd = 'wmic path win32_process where ProcessID=%s get Commandline 2> nul' % (pid, )
with os.popen(cmd, 'r') as p:
lines = [line for line in p.readlines() if line.strip("\r\n ") != ""]
if lines:
_, val = lines
return val
elif sys.platform == "darwin":
# Use pgrep instead of /proc on macOS.
pidfile = ".%d.pid" % (pid, )
with open(pidfile, 'w') as f:
f.write(str(pid))
try:
p = Popen(['pgrep', '-lf', '-F', pidfile], stdout=PIPE)
stdout, _ = p.communicate()
line = stdout.decode('utf8').strip()
if line:
_, scmd = line.split(' ', 1)
return scmd
finally:
os.unlink(pidfile)
else:
# Use the /proc filesystem
# At least on android there have been some issues with not all
# process infos being readable. In these cases using the `ps` command
# worked. See the pull request at
# https://github.com/spotify/luigi/pull/1876
try:
with open('/proc/{0}/cmdline'.format(pid), 'r') as fh:
if six.PY3:
return fh.read().replace('\0', ' ').rstrip()
else:
return fh.read().replace('\0', ' ').decode('utf8').rstrip()
except IOError:
# the system may not allow reading the command line
# of a process owned by another user
pass
# Fallback instead of None, for e.g. Cygwin where -o is an "unknown option" for the ps command:
return '[PROCESS_WITH_PID={}]'.format(pid)
def get_info(pid_dir, my_pid=None):
# Check the name and pid of this process
if my_pid is None:
my_pid = os.getpid()
my_cmd = getpcmd(my_pid)
cmd_hash = my_cmd.encode('utf8')
pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'
return my_pid, my_cmd, pid_file
def acquire_for(pid_dir, num_available=1, kill_signal=None):
"""
Makes sure the process is only run once at the same time with the same name.
Notice that we since we check the process name, different parameters to the same
command can spawn multiple processes at the same time, i.e. running
"/usr/bin/my_process" does not prevent anyone from launching
"/usr/bin/my_process --foo bar".
"""
my_pid, my_cmd, pid_file = get_info(pid_dir)
# Create a pid file if it does not exist
try:
os.mkdir(pid_dir)
os.chmod(pid_dir, 0o777)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
# Let variable "pids" be all pids who exist in the .pid-file who are still
# about running the same command.
pids = {pid for pid in _read_pids_file(pid_file) if getpcmd(pid) == my_cmd}
if kill_signal is not None:
for pid in pids:
os.kill(pid, kill_signal)
print('Sent kill signal to Pids: {}'.format(pids))
# We allow for the killer to progress, yet we don't want these to stack
# up! So we only allow it once.
num_available += 1
if len(pids) >= num_available:
# We are already running under a different pid
print('Pid(s) {} already running'.format(pids))
if kill_signal is not None:
print('Note: There have (probably) been 1 other "--take-lock"'
' process which continued to run! Probably no need to run'
' this one as well.')
return False
_write_pids_file(pid_file, pids | {my_pid})
return True
def _read_pids_file(pid_file):
# First setup a python 2 vs 3 compatibility
# http://stackoverflow.com/a/21368622/621449
try:
FileNotFoundError
except NameError:
# Should only happen on python 2
FileNotFoundError = IOError
# If the file happen to not exist, simply return
# an empty set()
try:
with open(pid_file, 'r') as f:
return {int(pid_str.strip()) for pid_str in f if pid_str.strip()}
except FileNotFoundError:
return set()
def _write_pids_file(pid_file, pids_set):
with open(pid_file, 'w') as f:
f.writelines('{}\n'.format(pid) for pid in pids_set)
# Make the .pid-file writable by all (when the os allows for it)
if os.name != 'nt':
s = os.stat(pid_file)
if os.getuid() == s.st_uid:
os.chmod(pid_file, s.st_mode | 0o777)
|
{
"content_hash": "9c0d081b912c83b88516b7aa16177df5",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 99,
"avg_line_length": 33.053333333333335,
"alnum_prop": 0.5865268253327954,
"repo_name": "foursquare/luigi",
"id": "e1a604f540a2561d8720de341efe49b0ff137361",
"size": "5561",
"binary": false,
"copies": "4",
"ref": "refs/heads/fs-2.7.8",
"path": "luigi/lock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5051"
},
{
"name": "Dockerfile",
"bytes": "359"
},
{
"name": "HTML",
"bytes": "41976"
},
{
"name": "JavaScript",
"bytes": "171184"
},
{
"name": "Python",
"bytes": "1929137"
},
{
"name": "Shell",
"bytes": "2627"
}
],
"symlink_target": ""
}
|
"""
hyper
~~~~~~
A module for providing an abstraction layer over the differences between
HTTP/1.1 and HTTP/2.
"""
__version__ = '0.4.0'
from .common.connection import HTTPConnection
from .http20.connection import HTTP20Connection
from .http20.response import HTTP20Response, HTTP20Push
from .http11.connection import HTTP11Connection
from .http11.response import HTTP11Response
# Throw import errors on Python <2.7 and 3.0-3.2.
import sys as _sys
if _sys.version_info < (2,7) or (3,0) <= _sys.version_info < (3,3):
raise ImportError("hyper only supports Python 2.7 and Python 3.3 or higher.")
__all__ = [
HTTPConnection,
HTTP20Response,
HTTP20Push,
HTTP20Connection,
HTTP11Connection,
HTTP11Response,
]
# Set default logging handler.
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
{
"content_hash": "ae64f8ebc0b30da9290b281128693bf5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 26.40625,
"alnum_prop": 0.7325443786982249,
"repo_name": "masaori335/hyper",
"id": "379843f87ee1fc378ce9513525afb80a731deea9",
"size": "869",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "hyper/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Hy",
"bytes": "1249"
},
{
"name": "Makefile",
"bytes": "204"
},
{
"name": "Python",
"bytes": "340769"
},
{
"name": "Shell",
"bytes": "1368"
}
],
"symlink_target": ""
}
|
import mraa
import time
from rgb import rgb
# This just wraps the rgb class for a simple demo
class tadpole(rgb):
def __init__(self):
rgb.__init__(self, 82, 83, 84, True, True, True)
|
{
"content_hash": "6ebc7f2fb742febfadc9519619e2e609",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 24.5,
"alnum_prop": 0.6581632653061225,
"repo_name": "steelee/minnow_max_maker",
"id": "6e3052b888b389463fb9e5272d995f88b4052bb7",
"size": "196",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyDrivers/tadpole.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2006"
},
{
"name": "Makefile",
"bytes": "336"
},
{
"name": "Python",
"bytes": "203960"
},
{
"name": "Shell",
"bytes": "3417"
}
],
"symlink_target": ""
}
|
"""
Frequency counting with Tektronix RSA3408A Spectrum Analyzer
"""
##### Imports
import sys
import time
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
# Own modules
sys.path.append("../../")
sys.path.append("../../drivers/")
import rsa3408a
import lablib.logfile as logfile
import lablib.utils as utils
##### End of imports
print "Spectrum Analyzer carrier frequency:"
try:
configfile = sys.argv[1]
config = ConfigParser.ConfigParser()
config.readfp(open(configfile))
except:
print "Cannot find configuration file."
sys.exit(1)
# Setup instrument
rsagpib = config.getint('Setup', 'rsa_gpib')
rsa = rsa3408a.RSA(rsagpib)
try:
nums = int(raw_input("How many points (press enter for infinite logging): "))
except:
nums = None
## setup logging
log = logfile.setupLog("rsa_log")
# Save configuration info
f = open(configfile)
for line in f:
log("# %s" %line.strip())
f.close()
log("# Unixtime, Carrier_frequency (Hz)")
rsa.write(":INIT:CONT OFF;")
count = 0
for i in xrange(100):
try:
cfreq = float(rsa.ask(":READ:SPECtrum:CFR?"))
now = time.time()
log("%.3f,%.3f" %(now, cfreq))
count += 1
print "Center freq %3d: %.3f Hz" %(count,cfreq)
if nums and (count >= nums):
break
except (KeyboardInterrupt):
break
print("Finished.")
|
{
"content_hash": "2cd54914ea11c98588aad358db447698",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 81,
"avg_line_length": 23.639344262295083,
"alnum_prop": 0.6255201109570042,
"repo_name": "imrehg/labhardware",
"id": "82d49ad592bfa137302cef9215c6314657baf1a1",
"size": "1442",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "projects/allan_cont/rsalog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "535761"
},
{
"name": "Shell",
"bytes": "348"
}
],
"symlink_target": ""
}
|
import ctypes
import os
import time
import argparse
import subprocess
import scipy.sparse as sp
import mxnet as mx
import numpy as np
import numpy.random as rnd
from mxnet.test_utils import rand_ndarray, set_default_device, assert_almost_equal, get_bz2_data
from mxnet.base import check_call, _LIB
from util import estimate_density
PARSER = argparse.ArgumentParser(description="Benchmark sparse operators",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
PARSER.add_argument('--num-omp-threads', type=int,
default=1, help='number of omp threads to set in MXNet')
PARSER.add_argument('--gpu', action='store_true',
help="to be run on gpu")
# TODO: Use logging later
PARSER.add_argument('--verbose', action='store_true',
help="Verbose output")
ARGS = PARSER.parse_args()
# some data information
KDDA = {
'data_mini': 'kdda.t.mini',
'data_name': 'kdda.t',
'data_origin_name': 'kdda.t.bz2',
'url': "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/kdda.t.bz2",
'feature_dim': 20216830,
'm': [1, 8, 32],
'batch_size': [64],
'default_index': {'batch_size': 0,
'output_dim': 2},
'num_batches': 10
}
AVAZU = {
'data_mini': 'avazu-app.t.mini',
'data_name': 'avazu-app.t',
'data_origin_name': 'avazu-app.t.bz2',
'url': "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/avazu-app.t.bz2",
'feature_dim': 1000000,
'm': [1, 1000, 2000],
'batch_size': [128, 256],
'default_index': {'batch_size': 0,
'output_dim': 1},
'num_batches': 10
}
CRITEO = {
'data_mini': 'criteo.t.mini',
'data_name': 'criteo.t',
'data_origin_name': 'criteo.t.bz2',
'url' : "https://s3-us-west-2.amazonaws.com/sparse-dataset/criteo.t.bz2",
'feature_dim': 8388621,
'm': [1, 8, 16, 32, 64],
'batch_size': [64, 128],
'default_index': {'batch_size': 1,
'output_dim': 3},
'num_batches': 10
}
SYNTHETIC1 = {
'feature_dim': [1000000],
'm': [256, 1000],
'density': [0.001, 0.005, 0.01, 0.02, 0.05,
0.1, 0.2, 0.5, 0.65],
'batch_size': [64, 128],
'default_index': {'batch_size': 1,
'density': 2,
'output_dim': 1,
'feature_dim': 0},
'num_repeat': 10
}
SYNTHETIC2 = {
'feature_dim': [8000000, 16000000],
'm': [1, 32],
'density': [0.001, 0.005, 0.01, 0.02, 0.05,
0.1, 0.2, 0.5, 0.65],
'batch_size': [64, 128],
'default_index': {'batch_size': 1,
'density': 2,
'output_dim': 1,
'feature_dim': 0},
'num_repeat': 10
}
def measure_cost(repeat, scipy_trans_lhs, scipy_dns_lhs, func_name, *args, **kwargs):
"""Measure time cost of running a function
"""
mx.nd.waitall()
args_list = []
for arg in args:
args_list.append(arg)
start = time.time()
if scipy_trans_lhs:
args_list[0] = np.transpose(args_list[0]) if scipy_dns_lhs else sp.spmatrix.transpose(args_list[0])
for _ in range(repeat):
func_name(*args_list, **kwargs)
mx.nd.waitall()
end = time.time()
diff = end - start
return diff / repeat
def _get_iter(path, data_shape, batch_size):
data_train = mx.io.LibSVMIter(data_libsvm=path,
data_shape=data_shape,
batch_size=batch_size)
data_iter = iter(data_train)
return data_iter
def _line_count(path):
return int(subprocess.check_output('wc -l {}'.format(path), shell=True).split()[0])
def _compare_sparse_dense(data_dir, file_name, mini_file_name, feature_dim,
output_dim, density, batch_size, num_batches=3, num_repeat=5, transpose=False,
rsp=False):
def create_mini_path(mini_path, path, num_batches):
"""Samples batches of size: batch_size, total number: num_batches
from the dataset files for running benchmarks"""
if not os.path.exists(mini_path):
last = _line_count(path) - num_batches * batch_size
last = last if last >= 1 else 1
start = int(rnd.uniform(1, last))
os.system("sed -n '%d,%dp' %r > %r"
%(start, start + num_batches * batch_size, path, mini_path))
assert os.path.exists(mini_path)
def run_benchmark(mini_path):
"""Run benchmarks
"""
data_shape = (feature_dim, )
train_iter = _get_iter(mini_path, data_shape, batch_size)
weight_row_dim = batch_size if transpose else feature_dim
weight_shape = (weight_row_dim, output_dim)
if not rsp:
weight = mx.nd.random.uniform(low=0, high=1, shape=weight_shape)
else:
weight = rand_ndarray(weight_shape, "row_sparse", density=0.05, distribution="uniform")
total_cost = {}
average_cost = {}
count = 0
total_cost["sparse"] = 0.
total_cost["dense"] = 0.
for _ in train_iter:
csr_data = train_iter.getdata()
dns_data = csr_data.tostype('default')
cost_sparse = measure_cost(num_repeat, False, False, mx.nd.sparse.dot, csr_data, weight, transpose_a=transpose)
cost_dense = measure_cost(num_repeat, False, False, mx.nd.dot, dns_data, weight, transpose_a=transpose)
total_cost["sparse"] += cost_sparse
total_cost["dense"] += cost_dense
count = count + 1
average_cost["sparse"] = total_cost["sparse"] / count
average_cost["dense"] = total_cost["dense"] / count
return (average_cost["sparse"], average_cost["dense"])
def print_result(average_cost_sparse, average_cost_dense):
"""Print result of comparison between sparse and dense
"""
ratio = average_cost_dense / average_cost_sparse
fmt = '{:15.4f} {:10d} {:10d} {:10d} {:20.2f} {:15.2f} {:15.2f} {:10} {:10}'
print(fmt.format(density * 100, batch_size, output_dim, feature_dim,
ratio, average_cost_dense*1000, average_cost_sparse*1000,
transpose, rsp))
mini_path = os.path.join(data_dir, mini_file_name)
path = os.path.join(data_dir, file_name)
create_mini_path(mini_path, path, num_batches)
average_cost_sparse, average_cost_dense = run_benchmark(mini_path)
print_result(average_cost_sparse, average_cost_dense)
def test_dot_real(data_dict):
"""Dot operator testing with real datasets"""
data_dir = os.path.join(os.getcwd(), 'data')
path = os.path.join(data_dir, data_dict['data_name'])
if not os.path.exists(path):
get_bz2_data(
data_dir,
data_dict['data_name'],
data_dict['url'],
data_dict['data_origin_name']
)
assert os.path.exists(path)
k = data_dict['feature_dim']
m = data_dict['m']
batch_size_list = data_dict['batch_size']
default_output_index = data_dict['default_index']['output_dim']
default_batch_size_index = data_dict['default_index']['batch_size']
density = estimate_density(path, data_dict['feature_dim'])
num_batches = data_dict['num_batches']
assert default_batch_size_index < len(batch_size_list)
assert default_output_index < len(m)
if ARGS.verbose:
print("Running Benchmarking on %r data") % data_dict['data_mini']
print('{:>15} {:>10} {:>10} {:>10} {:>20} {:>15} {:>15} {:>10} {:>10}'.format('density(%)',
'n',
'm',
'k',
't_dense/t_sparse',
't_dense(ms)',
't_sparse(ms)',
'is_transpose',
'rhs_rsp'))
for output_dim in m:
_compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
k, output_dim, density,
batch_size_list[default_batch_size_index], num_batches)
_compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
k, output_dim, density,
batch_size_list[default_batch_size_index], num_batches,
transpose=True)
_compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
k, output_dim, density,
batch_size_list[default_batch_size_index], num_batches, rsp=True)
for batch_size in batch_size_list:
_compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
k, m[default_output_index], density, batch_size, num_batches)
_compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
k, m[default_output_index], density, batch_size, num_batches,
transpose=True)
_compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
k, output_dim, density,
batch_size_list[default_batch_size_index], num_batches, rsp=True)
def test_dot_synthetic(data_dict):
"""benchmark sparse mxnet dot and scipy dot operator with matrices of given density.
`t_sparse` is the runtime of the invoked sparse dot operator in ms, while `t_dense` is the
runtime of dot(dns, dns), with the same matrices except that they are in default storage type.
"""
# Benchmark MXNet and Scipys dot operator
def bench_dot(lhs_shape, rhs_shape, lhs_stype, rhs_stype,
lhs_den, rhs_den, trans_lhs, ctx, num_repeat=10, fw="mxnet", distribution="uniform"):
set_default_device(ctx)
assert fw == "mxnet" or fw == "scipy"
# Set funcs
dot_func_sparse = mx.nd.sparse.dot if fw == "mxnet" else sp.spmatrix.dot
dot_func_dense = mx.nd.dot if fw == "mxnet" else np.dot
# Create matrix instances
lhs_nd = rand_ndarray(lhs_shape, lhs_stype, density=lhs_den, distribution=distribution)
# only uniform distribution supported for rhs
if rhs_stype == 'csr':
rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=rhs_den, distribution=distribution)
else:
rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=rhs_den, distribution="uniform")
lhs_dns = None
rhs_dns = None
dense_cost = None
sparse_cost = None
if fw == "mxnet":
lhs_dns = lhs_nd if lhs_stype == 'default' else lhs_nd.tostype('default')
rhs_dns = rhs_nd if rhs_stype == 'default' else rhs_nd.tostype('default')
# One warm up run, verify correctness
out = dot_func_sparse(lhs_nd, rhs_dns, trans_lhs)
out_expected = dot_func_dense(lhs_dns, rhs_dns, trans_lhs)
assert_almost_equal(out.asnumpy(), out_expected.asnumpy(), rtol=1e-1, atol=1e-1)
sparse_cost = measure_cost(num_repeat, False, False, dot_func_sparse, lhs_nd, rhs_nd, trans_lhs)
dense_cost = measure_cost(num_repeat, False, False, dot_func_dense, lhs_dns, rhs_dns, trans_lhs)
else:
lhs_dns = lhs_nd.asnumpy()
rhs_dns = rhs_nd.asnumpy()
lhs_nd = sp.csr_matrix(lhs_nd.asnumpy())
rhs_nd = rhs_nd.asnumpy()
# One warm up run, verify correctness
lhs_nd_copy = sp.spmatrix.transpose(lhs_nd) if trans_lhs else lhs_nd
out = dot_func_sparse(lhs_nd_copy, rhs_dns)
sparse_cost = measure_cost(num_repeat, trans_lhs, False, dot_func_sparse, lhs_nd, rhs_nd)
dense_cost = measure_cost(num_repeat, trans_lhs, True, dot_func_dense, lhs_dns, rhs_dns)
speedup = dense_cost / sparse_cost
# Print results
m = lhs_shape[0]
k = lhs_shape[1]
n = rhs_shape[1]
result_pattern = '{:15.1f} {:15.1f} {:>10} {:8d} {:8d} {:8d} {:13.2f} {:13.2f} {:8.2f}'
results = result_pattern.format(lhs_den*100,
rhs_den*100,
str(ctx),
m,
k,
n,
sparse_cost*1000,
dense_cost*1000,
speedup)
print(results)
def print_benchmark_info(lhs, rhs, lhs_trans, fw):
trans_str = "^T" if lhs_trans else ""
print("========================================================")
print(" %s sparse dot benchmark: dot(%s, %s) = %s ") % (fw, lhs, rhs, rhs)
print(" (matrix multiplication: (m x k)%s * (k x n) = m x n) ") % (trans_str)
print("========================================================")
headline_pattern = '{:>15} {:>15} {:>10} {:>8} {:>8} {:>8} {:>13} {:>13} {:>8}'
headline = headline_pattern.format('lhs_density(%)',
'rhs_density(%)',
'context',
'm', 'k', 'n',
't_sparse(ms)',
't_dense(ms)',
'speedup')
print(headline)
def run_benchmark(ctx=None, lhs="csr", lhs_trans=False, rhs="dns", fw="mxnet", rhs_density=1,
distribution="uniform"):
if rhs_density > 1 or rhs_density < 0:
raise ValueError("rhs_density has to be between 0 and 1")
print_benchmark_info(lhs, rhs, lhs_trans, fw)
if rhs == "csr":
lhs_stype = "default"
rhs_stype = "csr"
assert (lhs_stype == 'default'), "Only dot(default, csr) supported"
# Arrange dimensions according to use case. For below csr will have num_rows << num_cols
feature_dim_list = data_dict['batch_size']
batch_size_list = data_dict['m']
output_dim_list = data_dict['feature_dim']
density_list = data_dict['density']
default_output_index = data_dict['default_index']['feature_dim']
default_density_index = data_dict['default_index']['density']
default_feature_index = data_dict['default_index']['batch_size']
default_batch_size_index = data_dict['default_index']['output_dim']
num_repeat = data_dict['num_repeat']
else:
lhs_stype = "csr"
rhs_stype = "row_sparse" if rhs == "rsp" else "default"
feature_dim_list = data_dict['feature_dim']
output_dim_list = data_dict['m']
batch_size_list = data_dict['batch_size']
density_list = data_dict['density']
default_output_index = data_dict['default_index']['output_dim']
default_batch_size_index = data_dict['default_index']['batch_size']
default_feature_index = data_dict['default_index']['feature_dim']
default_density_index = data_dict['default_index']['density']
num_repeat = data_dict['num_repeat']
for output_dim in output_dim_list:
if lhs_trans:
output_row_dim = batch_size_list[default_batch_size_index]
else:
output_row_dim = feature_dim_list[default_feature_index]
bench_dot((batch_size_list[default_batch_size_index],
feature_dim_list[default_feature_index]),
(output_row_dim, output_dim),
lhs_stype, rhs_stype,
density_list[default_density_index], rhs_density,
lhs_trans, ctx, num_repeat=num_repeat,
fw=fw, distribution=distribution)
for feature_dim in feature_dim_list:
if lhs_trans:
output_row_dim = batch_size_list[default_batch_size_index]
else:
output_row_dim = feature_dim
bench_dot((batch_size_list[default_batch_size_index], feature_dim),
(output_row_dim, output_dim_list[default_output_index]),
lhs_stype, rhs_stype, density_list[default_density_index], rhs_density,
lhs_trans, ctx, num_repeat=num_repeat, fw=fw, distribution=distribution)
for batch_size in batch_size_list:
if lhs_trans:
output_row_dim = batch_size
else:
output_row_dim = feature_dim_list[default_feature_index]
bench_dot((batch_size, feature_dim_list[default_feature_index]),
(output_row_dim,
output_dim_list[default_output_index]),
lhs_stype, rhs_stype, density_list[default_density_index],
rhs_density, lhs_trans, ctx, num_repeat=num_repeat,
fw=fw, distribution=distribution)
for density in density_list:
if lhs_trans:
output_row_dim = batch_size_list[default_batch_size_index]
else:
output_row_dim = feature_dim_list[default_feature_index]
bench_dot((batch_size_list[default_batch_size_index],
feature_dim_list[default_feature_index]),
(output_row_dim,
output_dim_list[default_output_index]),
lhs_stype, rhs_stype, density, density, lhs_trans, ctx,
num_repeat=num_repeat, fw=fw, distribution=distribution)
check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(ARGS.num_omp_threads)))
context = mx.gpu() if ARGS.gpu else mx.cpu()
# TODO(anirudh): make the data dicts to config which can be passed at runtime
distributions = ["uniform", "powerlaw"]
for distribution in distributions:
run_benchmark(context, lhs="csr",
rhs="default", lhs_trans=False,
fw="mxnet", rhs_density=1,
distribution=distribution)
run_benchmark(context, lhs="csr",
rhs="default", lhs_trans=True,
fw="mxnet", rhs_density=1,
distribution=distribution)
run_benchmark(context, lhs="csr",
rhs="rsp", lhs_trans=False,
fw="mxnet", rhs_density=0.05,
distribution=distribution)
run_benchmark(context, lhs="default",
rhs="csr", lhs_trans=False,
fw="mxnet", rhs_density=0.001,
distribution=distribution)
if not ARGS.gpu:
run_benchmark(context, lhs="csr",
rhs="default", lhs_trans=False,
fw="scipy", rhs_density=1,
distribution=distribution)
run_benchmark(context, lhs="csr",
rhs="default", lhs_trans=True,
fw="scipy", rhs_density=1,
distribution=distribution)
if __name__ == "__main__":
begin_time = time.time()
test_dot_real(KDDA)
test_dot_real(AVAZU)
test_dot_real(CRITEO)
test_dot_synthetic(SYNTHETIC1)
test_dot_synthetic(SYNTHETIC2)
total_time = time.time() - begin_time
print("total time is %f") % total_time
|
{
"content_hash": "ffeea5d64a75fab5ce8cc6a4d074d9c1",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 123,
"avg_line_length": 44.766146993318486,
"alnum_prop": 0.5244278606965174,
"repo_name": "DickJC123/mxnet",
"id": "a2dfd03a6bd343a1ce5c38b97795235bd6ce70ac",
"size": "20886",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "benchmark/python/sparse/dot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151356"
},
{
"name": "C++",
"bytes": "12029257"
},
{
"name": "CMake",
"bytes": "213440"
},
{
"name": "Cuda",
"bytes": "1528224"
},
{
"name": "Cython",
"bytes": "26285"
},
{
"name": "Dockerfile",
"bytes": "54893"
},
{
"name": "Groovy",
"bytes": "132682"
},
{
"name": "Jupyter Notebook",
"bytes": "1889643"
},
{
"name": "Makefile",
"bytes": "8991"
},
{
"name": "PowerShell",
"bytes": "6699"
},
{
"name": "Python",
"bytes": "8615578"
},
{
"name": "Shell",
"bytes": "172547"
}
],
"symlink_target": ""
}
|
from couchdbkit import ResourceNotFound
from django.conf import settings
from corehq.util.quickcache import quickcache
from dimagi.utils.couch.database import get_db
@quickcache([], timeout=60)
def get_indicator_config():
try:
doc = get_db().open_doc('INDICATOR_CONFIGURATION')
except ResourceNotFound:
return {}
else:
return doc.get('namespaces', {})
def get_namespaces(domain, as_choices=False):
available_namespaces = get_indicator_config()
if as_choices:
return available_namespaces.get(domain, ())
else:
return [n[0] for n in available_namespaces.get(domain, [])]
def get_namespace_name(domain, namespace):
namespaces = get_namespaces(domain, as_choices=True)
namespaces = dict(namespaces)
return namespaces.get(namespace)
def get_indicator_domains():
return get_indicator_config().keys()
def get_mvp_domains():
from mvp.models import MVP
if settings.UNIT_TESTING:
return MVP.DOMAINS
return get_indicator_domains() or MVP.DOMAINS
|
{
"content_hash": "d8c533c4049a532c2ee09dc8f7950a91",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 67,
"avg_line_length": 26.794871794871796,
"alnum_prop": 0.6995215311004784,
"repo_name": "puttarajubr/commcare-hq",
"id": "6ea001c57566f73d1c5a026e56e656fb4bf0312a",
"size": "1045",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "corehq/apps/indicators/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
}
|
import twitter
from django.apps import AppConfig
from django.db.models.signals import post_save
def truncate_headline(headline, n_char):
last = headline[-n_char - 3]
headline = headline[:-n_char -3]
i = len(headline)
while last not in " ,.;:" and i:
i -= 1
last = headline[i]
if i != -1:
headline = headline[:i]
return headline + "..."
_twitter_template = Template(settings.MICROBLOG_TWITTER_MESSAGE_TEMPLATE)
def post_update_on_twitter(sender, instance, created, **kwargs):
if settings.MICROBLOG_TWITTER_LANGUAGES is not None and instance.language not in settings.MICROBLOG_TWITTER_LANGUAGES:
return
post = instance.post
if not post.is_published():
return
try:
if not isinstance(settings.MICROBLOG_TWITTER_POST_URL_MANGLER, str):
url = settings.MICROBLOG_TWITTER_POST_URL_MANGLER(instance)
else:
module, attr = settings.MICROBLOG_TWITTER_POST_URL_MANGLER.rsplit('.', 1)
mod = import_module(module)
url = getattr(mod, attr)(instance)
except Exception, e:
message = 'Post: "%s"\n\nCannot retrieve the url: "%s"' % (instance.headline, str(e))
mail.mail_admins('[blog] error preparing the tweet', message)
return
existent = set(( x.value for x in Spam.objects.filter(post=post, method='t') ))
recipients = set((settings.MICROBLOG_TWITTER_USERNAME,)) - existent
if not recipients:
return
context = Context({
'content': instance,
'headline': instance.headline,
'url': url,
})
status = _twitter_template.render(context)
diff_len = len(status) - 140
if diff_len > 0:
context = Context({
'content': instance,
'headline': truncate_headline(instance.headline, diff_len),
'url': url,
})
status = _twitter_template.render(context)
if settings.MICROBLOG_TWITTER_DEBUG:
print 'Tweet for', instance.headline.encode('utf-8')
print status
print '--------------------------------------------'
return
log.info('"%s" tweet on "%s"', instance.headline.encode('utf-8'), settings.MICROBLOG_TWITTER_USERNAME)
try:
api = twitter.Api(settings.MICROBLOG_TWITTER_USERNAME, settings.MICROBLOG_TWITTER_PASSWORD)
api.PostUpdate(status)
s = Spam(post=post, method='t', value=settings.MICROBLOG_TWITTER_USERNAME)
s.save()
except Exception, e:
message = 'Post: "%s"\n\nCannot post status update: "%s"' % (instance.headline, str(e))
mail.mail_admins('[blog] error tweeting the new status', message)
return
def post_update_on_email(sender, instance, created, **kwargs):
if settings.MICROBLOG_EMAIL_LANGUAGES is not None and instance.language not in settings.MICROBLOG_EMAIL_LANGUAGES:
return
post = instance.post
if not post.is_published():
return
existent = set(( x.value for x in Spam.objects.filter(post=post, method='e') ))
recipients = set(settings.MICROBLOG_EMAIL_RECIPIENTS) - existent
if not recipients:
return
ctx = Context({
'content': instance,
})
from django.utils.html import strip_tags
from lxml import html
from lxml.html.clean import clean_html
subject = strip_tags(_email_templates['subject'].render(ctx))
try:
hdoc = html.fromstring(_email_templates['body'].render(ctx))
except Exception, e:
message = 'Post: "%s"\n\nCannot parse as html: "%s"' % (subject, str(e))
mail.mail_admins('[blog] error while sending mail', message)
return
# dalla doc di lxml:
# The module lxml.html.clean provides a Cleaner class for cleaning up
# HTML pages. It supports removing embedded or script content, special
# tags, CSS style annotations and much more. Say, you have an evil web
# page from an untrusted source that contains lots of content that
# upsets browsers and tries to run evil code on the client side:
#
# Noi non dobbiamo proteggerci da codice maligno, ma vista la
# situazione dei client email, possiamo rimuovere embed, javascript,
# iframe.; tutte cose che non vengono quasi mai renderizzate per bene
hdoc = clean_html(hdoc)
# rendo tutti i link assoluti, in questo modo funzionano anche in un
# client di posta
hdoc.make_links_absolute(dsettings.DEFAULT_URL_PREFIX)
body_html = html.tostring(hdoc)
# per i client di posta che non supportano l'html ecco una versione in
# solo testo
import html2text
h = html2text.HTML2Text()
h.ignore_images = True
body_text = h.handle(body_html)
for r in recipients:
log.info('"%s" email to "%s"', instance.headline.encode('utf-8'), r)
email = mail.EmailMultiAlternatives(subject, body_text, dsettings.DEFAULT_FROM_EMAIL, [r])
email.attach_alternative(body_html, 'text/html')
email.send()
s = Spam(post=post, method='e', value=r)
s.save()
class MicroblogConfig(AppConfig):
name = 'microblog'
verbose_name = "Microblog"
def ready(self):
import moderation
if settings.MICROBLOG_EMAIL_INTEGRATION:
_email_templates = {
'subject': Template(settings.MICROBLOG_EMAIL_SUBJECT_TEMPLATE),
'body': Template(settings.MICROBLOG_EMAIL_BODY_TEMPLATE),
}
post_save.connect(post_update_on_email, sender=PostContent)
if settings.MICROBLOG_TWITTER_INTEGRATION:
post_save.connect(post_update_on_twitter, sender=PostContent)
|
{
"content_hash": "e240c38314352f3cb1ced20fa2cabece",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 122,
"avg_line_length": 37.939189189189186,
"alnum_prop": 0.642386464826358,
"repo_name": "barrachri/epcon",
"id": "4339b8089473e5c97b8746dfaba46e517fed69a2",
"size": "5615",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "microblog/apps.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "1490"
},
{
"name": "CSS",
"bytes": "4751434"
},
{
"name": "HTML",
"bytes": "2177936"
},
{
"name": "JavaScript",
"bytes": "3465605"
},
{
"name": "Makefile",
"bytes": "3338"
},
{
"name": "PHP",
"bytes": "4506"
},
{
"name": "Python",
"bytes": "1255065"
},
{
"name": "Ruby",
"bytes": "1870"
},
{
"name": "Shell",
"bytes": "1679"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
import mock
from rally import consts
from rally import exceptions
from rally.plugins.openstack.wrappers import network
from tests.unit import test
from neutronclient.common import exceptions as neutron_exceptions
from novaclient import exceptions as nova_exceptions
SVC = "rally.plugins.openstack.wrappers.network."
class NovaNetworkWrapperTestCase(test.TestCase):
class Net(object):
def __init__(self, **kwargs):
if "tenant_id" in kwargs:
kwargs["project_id"] = kwargs.pop("tenant_id")
self.__dict__.update(kwargs)
def get_wrapper(self, *skip_cidrs, **kwargs):
mock_clients = mock.Mock()
mock_clients.nova.return_value.networks.list.return_value = [
self.Net(cidr=cidr) for cidr in skip_cidrs]
return network.NovaNetworkWrapper(mock_clients, kwargs)
def test__init__(self):
skip_cidrs = ["foo_cidr", "bar_cidr"]
service = self.get_wrapper(*skip_cidrs)
self.assertEqual(service.skip_cidrs, skip_cidrs)
service.client.networks.list.assert_called_once_with()
@mock.patch("rally.plugins.openstack.wrappers.network.generate_cidr")
def test__generate_cidr(self, mock_generate_cidr):
skip_cidrs = [5, 7]
cidrs = iter(range(7))
mock_generate_cidr.side_effect = (
lambda start_cidr: start_cidr + next(cidrs)
)
service = self.get_wrapper(*skip_cidrs, start_cidr=3)
self.assertEqual(service._generate_cidr(), 3)
self.assertEqual(service._generate_cidr(), 4)
self.assertEqual(service._generate_cidr(), 6) # 5 is skipped
self.assertEqual(service._generate_cidr(), 8) # 7 is skipped
self.assertEqual(service._generate_cidr(), 9)
self.assertEqual(mock_generate_cidr.mock_calls,
[mock.call(start_cidr=3)] * 7)
@mock.patch("rally.common.utils.generate_random_name",
return_value="foo_name")
def test_create_network(self, mock_generate_random_name):
service = self.get_wrapper()
service.client.networks.create.side_effect = (
lambda **kwargs: self.Net(id="foo_id", **kwargs))
service._generate_cidr = mock.Mock(return_value="foo_cidr")
net = service.create_network("foo_tenant", bar="spam")
self.assertEqual(net, {"id": "foo_id",
"name": "foo_name",
"cidr": "foo_cidr",
"status": "ACTIVE",
"external": False,
"tenant_id": "foo_tenant"})
mock_generate_random_name.assert_called_once_with("rally_net_")
service._generate_cidr.assert_called_once_with()
service.client.networks.create.assert_called_once_with(
project_id="foo_tenant", cidr="foo_cidr", label="foo_name")
def test_delete_network(self):
service = self.get_wrapper()
service.client.networks.delete.return_value = "foo_deleted"
self.assertEqual(service.delete_network({"id": "foo_id"}),
"foo_deleted")
service.client.networks.delete.assert_called_once_with("foo_id")
def test_list_networks(self):
service = self.get_wrapper()
service.client.networks.list.reset_mock()
service.client.networks.list.return_value = [
self.Net(id="foo_id", project_id="foo_tenant", cidr="foo_cidr",
label="foo_label"),
self.Net(id="bar_id", project_id="bar_tenant", cidr="bar_cidr",
label="bar_label")]
expected = [
{"id": "foo_id", "cidr": "foo_cidr", "name": "foo_label",
"status": "ACTIVE", "external": False, "tenant_id": "foo_tenant"},
{"id": "bar_id", "cidr": "bar_cidr", "name": "bar_label",
"status": "ACTIVE", "external": False, "tenant_id": "bar_tenant"}]
self.assertEqual(expected, service.list_networks())
service.client.networks.list.assert_called_once_with()
def test__get_floating_ip(self):
wrap = self.get_wrapper()
wrap.client.floating_ips.get.return_value = mock.Mock(id="foo_id",
ip="foo_ip")
fip = wrap._get_floating_ip("fip_id")
wrap.client.floating_ips.get.assert_called_once_with("fip_id")
self.assertEqual(fip, "foo_id")
wrap.client.floating_ips.get.side_effect = (
nova_exceptions.NotFound(""))
self.assertIsNone(wrap._get_floating_ip("fip_id"))
self.assertRaises(exceptions.GetResourceNotFound,
wrap._get_floating_ip, "fip_id", do_raise=True)
def test_create_floating_ip(self):
wrap = self.get_wrapper()
wrap.client.floating_ips.create.return_value = mock.Mock(id="foo_id",
ip="foo_ip")
fip = wrap.create_floating_ip(ext_network="bar_net", bar="spam")
self.assertEqual(fip, {"ip": "foo_ip", "id": "foo_id"})
wrap.client.floating_ips.create.assert_called_once_with("bar_net")
net = mock.Mock()
net.name = "foo_net"
wrap.client.floating_ip_pools.list.return_value = [net]
fip = wrap.create_floating_ip()
self.assertEqual(fip, {"ip": "foo_ip", "id": "foo_id"})
wrap.client.floating_ips.create.assert_called_with("foo_net")
def test_delete_floating_ip(self):
wrap = self.get_wrapper()
fip_found = iter(range(3))
def get_fip(*args, **kwargs):
for i in fip_found:
return "fip_id"
raise exceptions.GetResourceNotFound(resource="")
wrap._get_floating_ip = mock.Mock(side_effect=get_fip)
wrap.delete_floating_ip("fip_id")
wrap.client.floating_ips.delete.assert_called_once_with("fip_id")
self.assertFalse(wrap._get_floating_ip.called)
wrap.delete_floating_ip("fip_id", wait=True)
self.assertEqual(
[mock.call("fip_id", do_raise=True)] * 4,
wrap._get_floating_ip.mock_calls)
def test_supports_extension(self):
wrap = self.get_wrapper()
self.assertFalse(wrap.supports_extension("extension")[0])
self.assertTrue(wrap.supports_extension("security-group")[0])
class NeutronWrapperTestCase(test.TestCase):
def get_wrapper(self, *skip_cidrs, **kwargs):
return network.NeutronWrapper(mock.Mock(), kwargs)
def test_SUBNET_IP_VERSION(self):
self.assertEqual(network.NeutronWrapper.SUBNET_IP_VERSION, 4)
@mock.patch("rally.plugins.openstack.wrappers.network.generate_cidr")
def test__generate_cidr(self, mock_generate_cidr):
cidrs = iter(range(5))
mock_generate_cidr.side_effect = (
lambda start_cidr: start_cidr + next(cidrs)
)
service = self.get_wrapper(start_cidr=3)
self.assertEqual(service._generate_cidr(), 3)
self.assertEqual(service._generate_cidr(), 4)
self.assertEqual(service._generate_cidr(), 5)
self.assertEqual(service._generate_cidr(), 6)
self.assertEqual(service._generate_cidr(), 7)
self.assertEqual(mock_generate_cidr.mock_calls,
[mock.call(start_cidr=3)] * 5)
def test_external_networks(self):
wrap = self.get_wrapper()
wrap.client.list_networks.return_value = {"networks": "foo_networks"}
self.assertEqual(wrap.external_networks, "foo_networks")
wrap.client.list_networks.assert_called_once_with(
**{"router:external": True})
def test_get_network(self):
wrap = self.get_wrapper()
neutron_net = {"id": "foo_id",
"name": "foo_name",
"tenant_id": "foo_tenant",
"status": "foo_status",
"router:external": "foo_external",
"subnets": "foo_subnets"}
expected_net = {"id": "foo_id",
"name": "foo_name",
"tenant_id": "foo_tenant",
"status": "foo_status",
"external": "foo_external",
"router_id": None,
"subnets": "foo_subnets"}
wrap.client.show_network.return_value = {"network": neutron_net}
net = wrap.get_network(net_id="foo_id")
self.assertEqual(net, expected_net)
wrap.client.show_network.assert_called_once_with("foo_id")
wrap.client.show_network.side_effect = (
neutron_exceptions.NeutronClientException)
self.assertRaises(network.NetworkWrapperException, wrap.get_network,
net_id="foo_id")
wrap.client.list_networks.return_value = {"networks": [neutron_net]}
net = wrap.get_network(name="foo_name")
self.assertEqual(net, expected_net)
wrap.client.list_networks.assert_called_once_with(name="foo_name")
wrap.client.list_networks.return_value = {"networks": []}
self.assertRaises(network.NetworkWrapperException, wrap.get_network,
name="foo_name")
@mock.patch("rally.common.utils.generate_random_name")
def test_create_v1_pool(self, mock_generate_random_name):
mock_generate_random_name.return_value = "foo_name"
subnet = "subnet_id"
tenant = "foo_tenant"
service = self.get_wrapper()
expected_pool = {"pool": {
"id": "pool_id",
"name": "foo_name",
"subnet_id": subnet,
"tenant_id": tenant}}
service.client.create_pool.return_value = expected_pool
resultant_pool = service.create_v1_pool(tenant, subnet)
service.client.create_pool.assert_called_once_with({
"pool": {"lb_method": "ROUND_ROBIN",
"subnet_id": subnet,
"tenant_id": tenant,
"protocol": "HTTP",
"name": "foo_name"}})
self.assertEqual(resultant_pool, expected_pool)
@mock.patch("rally.common.utils.generate_random_name")
def test_create_network(self, mock_generate_random_name):
mock_generate_random_name.return_value = "foo_name"
service = self.get_wrapper()
service.client.create_network.return_value = {
"network": {"id": "foo_id",
"name": "foo_name",
"status": "foo_status"}}
net = service.create_network("foo_tenant")
mock_generate_random_name.assert_called_once_with("rally_net_")
service.client.create_network.assert_called_once_with({
"network": {"tenant_id": "foo_tenant", "name": "foo_name"}})
self.assertEqual(net, {"id": "foo_id",
"name": "foo_name",
"status": "foo_status",
"external": False,
"tenant_id": "foo_tenant",
"router_id": None,
"subnets": []})
@mock.patch("rally.common.utils.generate_random_name")
def test_create_network_with_subnets(self, mock_generate_random_name):
subnets_num = 4
mock_generate_random_name.return_value = "foo_name"
service = self.get_wrapper()
subnets_cidrs = iter(range(subnets_num))
subnets_ids = iter(range(subnets_num))
service._generate_cidr = mock.Mock(
side_effect=lambda: "cidr-%d" % next(subnets_cidrs))
service.client.create_subnet = mock.Mock(
side_effect=lambda i: {
"subnet": {"id": "subnet-%d" % next(subnets_ids)}})
service.client.create_network.return_value = {
"network": {"id": "foo_id",
"name": "foo_name",
"status": "foo_status"}}
net = service.create_network("foo_tenant", subnets_num=subnets_num)
service.client.create_network.assert_called_once_with({
"network": {"tenant_id": "foo_tenant", "name": "foo_name"}})
self.assertEqual(net, {"id": "foo_id",
"name": "foo_name",
"status": "foo_status",
"external": False,
"router_id": None,
"tenant_id": "foo_tenant",
"subnets": ["subnet-%d" % i
for i in range(subnets_num)]})
self.assertEqual(
service.client.create_subnet.mock_calls,
[mock.call({"subnet": {"name": "foo_name",
"enable_dhcp": True,
"network_id": "foo_id",
"tenant_id": "foo_tenant",
"ip_version": service.SUBNET_IP_VERSION,
"dns_nameservers": ["8.8.8.8", "8.8.4.4"],
"cidr": "cidr-%d" % i}})
for i in range(subnets_num)])
@mock.patch("rally.common.utils.generate_random_name")
def test_create_network_with_router(self, mock_generate_random_name):
mock_generate_random_name.return_value = "foo_name"
service = self.get_wrapper()
service.create_router = mock.Mock(return_value={"id": "foo_router"})
service.client.create_network.return_value = {
"network": {"id": "foo_id",
"name": "foo_name",
"status": "foo_status"}}
net = service.create_network("foo_tenant", add_router=True)
self.assertEqual(net, {"id": "foo_id",
"name": "foo_name",
"status": "foo_status",
"external": False,
"tenant_id": "foo_tenant",
"router_id": "foo_router",
"subnets": []})
service.create_router.assert_called_once_with(external=True,
tenant_id="foo_tenant")
@mock.patch("rally.common.utils.generate_random_name")
def test_create_network_with_router_and_subnets(self,
mock_generate_random_name):
subnets_num = 4
mock_generate_random_name.return_value = "foo_name"
service = self.get_wrapper()
service._generate_cidr = mock.Mock(return_value="foo_cidr")
service.create_router = mock.Mock(return_value={"id": "foo_router"})
service.client.create_subnet = mock.Mock(
return_value={"subnet": {"id": "foo_subnet"}})
service.client.create_network.return_value = {
"network": {"id": "foo_id",
"name": "foo_name",
"status": "foo_status"}}
net = service.create_network("foo_tenant", add_router=True,
subnets_num=subnets_num,
dns_nameservers=["foo_nameservers"])
self.assertEqual(net, {"id": "foo_id",
"name": "foo_name",
"status": "foo_status",
"external": False,
"tenant_id": "foo_tenant",
"router_id": "foo_router",
"subnets": ["foo_subnet"] * subnets_num})
service.create_router.assert_called_once_with(external=True,
tenant_id="foo_tenant")
self.assertEqual(
service.client.create_subnet.mock_calls,
[mock.call({"subnet": {"name": "foo_name",
"enable_dhcp": True,
"network_id": "foo_id",
"tenant_id": "foo_tenant",
"ip_version": service.SUBNET_IP_VERSION,
"dns_nameservers": ["foo_nameservers"],
"cidr": "foo_cidr"}})] * subnets_num)
self.assertEqual(service.client.add_interface_router.mock_calls,
[mock.call("foo_router", {"subnet_id": "foo_subnet"})
for i in range(subnets_num)])
@mock.patch("rally.plugins.openstack.wrappers.network.NeutronWrapper"
".supports_extension", return_value=(False, ""))
def test_delete_network(self, mock_neutron_wrapper_supports_extension):
service = self.get_wrapper()
service.client.list_ports.return_value = {"ports": []}
service.client.delete_network.return_value = "foo_deleted"
result = service.delete_network({"id": "foo_id", "router_id": None,
"subnets": []})
self.assertEqual(result, "foo_deleted")
self.assertEqual(service.client.remove_gateway_router.mock_calls, [])
self.assertEqual(
service.client.remove_interface_router.mock_calls, [])
self.assertEqual(service.client.delete_router.mock_calls, [])
self.assertEqual(service.client.delete_subnet.mock_calls, [])
service.client.delete_network.assert_called_once_with("foo_id")
def test_delete_v1_pool(self):
service = self.get_wrapper()
pool = {"pool": {"id": "pool-id"}}
service.delete_v1_pool(pool["pool"]["id"])
service.client.delete_pool.called_once_with([mock.call("pool-id")])
@mock.patch("rally.plugins.openstack.wrappers.network.NeutronWrapper"
".supports_extension", return_value=(True, ""))
def test_delete_network_with_dhcp_and_router_and_ports_and_subnets(
self, mock_neutron_wrapper_supports_extension):
service = self.get_wrapper()
agents = ["foo_agent", "bar_agent"]
subnets = ["foo_subnet", "bar_subnet"]
ports = ["foo_port", "bar_port"]
service.client.list_dhcp_agent_hosting_networks.return_value = (
{"agents": [{"id": agent_id} for agent_id in agents]})
service.client.list_ports.return_value = (
{"ports": [{"id": port_id} for port_id in ports]})
service.client.delete_network.return_value = "foo_deleted"
result = service.delete_network(
{"id": "foo_id", "router_id": "foo_router", "subnets": subnets,
"lb_pools": []})
self.assertEqual(result, "foo_deleted")
self.assertEqual(
service.client.remove_network_from_dhcp_agent.mock_calls,
[mock.call(agent_id, "foo_id") for agent_id in agents])
self.assertEqual(service.client.remove_gateway_router.mock_calls,
[mock.call("foo_router")])
self.assertEqual(
service.client.remove_interface_router.mock_calls,
[mock.call("foo_router", {"subnet_id": subnet_id})
for subnet_id in subnets])
self.assertEqual(service.client.delete_router.mock_calls,
[mock.call("foo_router")])
self.assertEqual(service.client.delete_port.mock_calls,
[mock.call(port_id) for port_id in ports])
self.assertEqual(service.client.delete_subnet.mock_calls,
[mock.call(subnet_id) for subnet_id in subnets])
service.client.delete_network.assert_called_once_with("foo_id")
mock_neutron_wrapper_supports_extension.assert_called_once_with(
"dhcp_agent_scheduler")
def test_list_networks(self):
service = self.get_wrapper()
service.client.list_networks.return_value = {"networks": "foo_nets"}
self.assertEqual(service.list_networks(), "foo_nets")
service.client.list_networks.assert_called_once_with()
@mock.patch(SVC + "NeutronWrapper.external_networks")
def test_create_floating_ip(self, mock_neutron_wrapper_external_networks):
wrap = self.get_wrapper()
wrap.create_port = mock.Mock(return_value={"id": "port_id"})
wrap.client.create_floatingip = mock.Mock(
return_value={"floatingip": {"id": "fip_id",
"floating_ip_address": "fip_ip"}})
self.assertRaises(ValueError, wrap.create_floating_ip)
mock_neutron_wrapper_external_networks.__get__ = lambda *args: []
self.assertRaises(network.NetworkWrapperException,
wrap.create_floating_ip, tenant_id="foo_tenant")
mock_neutron_wrapper_external_networks.__get__ = (
lambda *args: [{"id": "ext_id"}]
)
fip = wrap.create_floating_ip(tenant_id="foo_tenant")
self.assertEqual(fip, {"id": "fip_id", "ip": "fip_ip"})
wrap.get_network = mock.Mock(
return_value={"id": "foo_net", "external": True})
wrap.create_floating_ip(tenant_id="foo_tenant", ext_network="ext_net")
wrap.get_network = mock.Mock(
return_value={"id": "foo_net", "external": False})
wrap.create_floating_ip(tenant_id="foo_tenant")
self.assertRaises(network.NetworkWrapperException,
wrap.create_floating_ip, tenant_id="foo_tenant",
ext_network="ext_net")
def test_delete_floating_ip(self):
wrap = self.get_wrapper()
wrap.delete_floating_ip("fip_id")
wrap.delete_floating_ip("fip_id", ignored_kwarg="bar")
self.assertEqual(wrap.client.delete_floatingip.mock_calls,
[mock.call("fip_id")] * 2)
@mock.patch(SVC + "NeutronWrapper.external_networks")
@mock.patch("rally.common.utils.generate_random_name")
def test_create_router(self, mock_generate_random_name,
mock_neutron_wrapper_external_networks):
wrap = self.get_wrapper()
mock_generate_random_name.return_value = "random_name"
wrap.client.create_router.return_value = {"router": "foo_router"}
mock_neutron_wrapper_external_networks.__get__ = (
lambda *args: [{"id": "ext_id"}]
)
router = wrap.create_router(name="foo_name")
wrap.client.create_router.assert_called_once_with(
{"router": {"name": "foo_name"}})
self.assertEqual(router, "foo_router")
router = wrap.create_router(external=True, foo="bar")
wrap.client.create_router.assert_called_with(
{"router": {"name": "random_name",
"external_gateway_info": {
"network_id": "ext_id",
"enable_snat": True},
"foo": "bar"}})
@mock.patch("rally.common.utils.generate_random_name")
def test_create_port(self, mock_generate_random_name):
wrap = self.get_wrapper()
mock_generate_random_name.return_value = "random_name"
wrap.client.create_port.return_value = {"port": "foo_port"}
port = wrap.create_port("foo_net", name="foo_name")
wrap.client.create_port.assert_called_once_with(
{"port": {"network_id": "foo_net", "name": "foo_name"}})
self.assertEqual(port, "foo_port")
port = wrap.create_port("foo_net", foo="bar")
wrap.client.create_port.assert_called_with(
{"port": {"network_id": "foo_net",
"name": "random_name", "foo": "bar"}})
def test_supports_extension(self):
wrap = self.get_wrapper()
wrap.client.list_extensions.return_value = (
{"extensions": [{"alias": "extension"}]})
self.assertTrue(wrap.supports_extension("extension")[0])
wrap.client.list_extensions.return_value = (
{"extensions": [{"alias": "extension"}]})
self.assertFalse(wrap.supports_extension("dummy-group")[0])
wrap.client.list_extensions.return_value = {}
self.assertFalse(wrap.supports_extension("extension")[0])
class FunctionsTestCase(test.TestCase):
def test_generate_cidr(self):
with mock.patch("rally.plugins.openstack.wrappers.network.cidr_incr",
iter(range(1, 4))):
self.assertEqual(network.generate_cidr(), "10.2.1.0/24")
self.assertEqual(network.generate_cidr(), "10.2.2.0/24")
self.assertEqual(network.generate_cidr(), "10.2.3.0/24")
with mock.patch("rally.plugins.openstack.wrappers.network.cidr_incr",
iter(range(1, 4))):
start_cidr = "1.1.0.0/26"
self.assertEqual(network.generate_cidr(start_cidr), "1.1.0.64/26")
self.assertEqual(network.generate_cidr(start_cidr), "1.1.0.128/26")
self.assertEqual(network.generate_cidr(start_cidr), "1.1.0.192/26")
def test_wrap(self):
mock_clients = mock.Mock()
mock_clients.nova().networks.list.return_value = []
mock_clients.services.return_value = {"foo": consts.Service.NEUTRON}
self.assertIsInstance(network.wrap(mock_clients, {}),
network.NeutronWrapper)
mock_clients.services.return_value = {"foo": "bar"}
self.assertIsInstance(network.wrap(mock_clients, {}),
network.NovaNetworkWrapper)
|
{
"content_hash": "a8c24a25ddb22100bf4b660a5906e354",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 79,
"avg_line_length": 47.65478424015009,
"alnum_prop": 0.5543307086614173,
"repo_name": "aplanas/rally",
"id": "db29374f782362c74333bce7fdbabf915cc244b5",
"size": "26030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/wrappers/test_network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "48167"
},
{
"name": "Python",
"bytes": "2620059"
},
{
"name": "Shell",
"bytes": "43889"
}
],
"symlink_target": ""
}
|
"""
Web Interface Session Mapper
"""
import datetime
import uuid
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Text
from apiary.types import JSONEncodedValue
from apiary.types import IPAddress
from apiary.types import UUID
from apiary.mappers import Base
class Session(Base):
"""Define the mapper for carrying session data"""
__tablename__ = 'sessions'
id = Column(UUID, primary_key=True, nullable=False, default=uuid.uuid4)
authenticated = Column(Boolean, default=False, nullable=False)
groups = Column(JSONEncodedValue)
last_request_at = Column(DateTime, nullable=False,
default=datetime.datetime.now())
last_request_uri = Column(Text, nullable=False)
profile = Column(JSONEncodedValue)
remote_ip = Column(IPAddress, nullable=False)
started_at = Column(DateTime, nullable=False,
default=datetime.datetime.now())
def __repr__(self):
"""Return the representation of the object
:rtype: str
"""
return '<Session %s (%s)>' % (self.id, self.remote_ip)
|
{
"content_hash": "ad6d05f526afb5c087f833300c5ddd06",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 28.21951219512195,
"alnum_prop": 0.6853932584269663,
"repo_name": "gmr/apiary",
"id": "6fc66ec723a6faf37ecfe8cfa52642a1fd3db072",
"size": "1157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apiary/mappers/session.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "207962"
},
{
"name": "Python",
"bytes": "79966"
}
],
"symlink_target": ""
}
|
def define_tables():
# Copied from git commit 96b9f1419453d8079dd1631c329f04d6e005baae from
# git://hforge.org/itools.git
config_uri = 'urn:oasis:names:tc:opendocument:xmlns:config:1.0'
dc_uri = 'http://purl.org/dc/elements/1.1/'
form_uri = 'urn:oasis:names:tc:opendocument:xmlns:form:1.0'
meta_uri = 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0'
number_uri = 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0'
office_uri = 'urn:oasis:names:tc:opendocument:xmlns:office:1.0'
presentation_uri = 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0'
text_uri = 'urn:oasis:names:tc:opendocument:xmlns:text:1.0'
svg_uri = 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0'
inline_elements = [
(text_uri, 'page-count'),
(text_uri, 'page-number'),
(text_uri, 'a'),
(text_uri, 'line-break'),
(text_uri, 'ruby-base'),
(text_uri, 's'),
(text_uri, 'span'),
(text_uri, 'tab')]
no_translate_content_elements = [
# Config
(config_uri, 'config-item'),
# Dublin core
(dc_uri, 'creator'),
(dc_uri, 'date'),
#(dc_uri, 'description'),
(dc_uri, 'language'),
#(dc_uri, 'subject'),
#(dc_uri, 'title'),
# Form
(form_uri, 'item'),
(form_uri, 'option'),
# Meta
(meta_uri, 'creation-date'),
(meta_uri, 'date-string'),
(meta_uri, 'editing-cycles'),
(meta_uri, 'editing-duration'),
(meta_uri, 'generator'),
(meta_uri, 'initial-creator'),
#(meta_uri, 'keyword'),
(meta_uri, 'printed-by'),
(meta_uri, 'print-date'),
(meta_uri, 'user-defined'),
# Number
(number_uri, 'currency-symbol'),
(number_uri, 'embedded-text'),
(number_uri, 'text'),
# Office
(office_uri, 'binary-data'),
# Presentation
(presentation_uri, 'date-time-decl'),
#(presentation_uri, 'footer-decl'),
#(presentation_uri, 'header-decl'),
# Text
(text_uri, 'author-initials'),
(text_uri, 'author-name'),
# XXX (text_uri, 'bibliography-mark'),
(text_uri, 'bookmark-ref'),
#(text_uri, 'chapter'),
(text_uri, 'character-count'),
#(text_uri, 'conditional-text'),
(text_uri, 'creation-date'),
(text_uri, 'creation-time'),
(text_uri, 'creator'),
(text_uri, 'date'),
(text_uri, 'dde-connection'),
#(text_uri, 'description'),
(text_uri, 'editing-cycles'),
(text_uri, 'editing-duration'),
(text_uri, 'expression'),
(text_uri, 'file-name'),
#(text_uri, 'hidden-paragraph'),
#(text_uri, 'hidden-text'),
(text_uri, 'image-count'),
#(text_uri, 'index-entry-span'),
(text_uri, 'index-title-template'),
(text_uri, 'initial-creator'),
#(text_uri, 'keywords'),
(text_uri, 'linenumbering-separator'),
(text_uri, 'measure'),
(text_uri, 'modification-date'),
(text_uri, 'modification-time'),
#(text_uri, 'note-citation'),
#(text_uri, 'note-continuation-notice-backward'),
#(text_uri, 'note-continuation-notice-forward'),
(text_uri, 'note-ref'),
(text_uri, 'number'),
(text_uri, 'object-count'),
(text_uri, 'page-continuation'),
(text_uri, 'page-count'),
(text_uri, 'page-number'),
(text_uri, 'page-variable-get'),
(text_uri, 'page-variable-set'),
(text_uri, 'paragraph-count'),
#(text_uri, 'placeholder'),
(text_uri, 'print-date'),
(text_uri, 'print-time'),
(text_uri, 'printed-by'),
(text_uri, 'reference-ref'),
#(text_uri, 'ruby-text'),
(text_uri, 'script'),
(text_uri, 'sender-city'),
(text_uri, 'sender-company'),
(text_uri, 'sender-country'),
(text_uri, 'sender-email'),
(text_uri, 'sender-fax'),
(text_uri, 'sender-firstname'),
(text_uri, 'sender-initials'),
(text_uri, 'sender-lastname'),
(text_uri, 'sender-phone-private'),
(text_uri, 'sender-phone-work'),
#(text_uri, 'sender-position'),
(text_uri, 'sender-postal-code'),
(text_uri, 'sender-state-or-province'),
(text_uri, 'sender-street'),
#(text_uri, 'sender-title'),
(text_uri, 'sequence'),
(text_uri, 'sequence-ref'),
(text_uri, 'sheet-name'),
#(text_uri, 'subject'),
(text_uri, 'table-count'),
(text_uri, 'table-formula'),
(text_uri, 'template-name'),
(text_uri, 'text-input'),
(text_uri, 'time'),
#(text_uri, 'title'),
(text_uri, 'user-defined'),
(text_uri, 'user-field-get'),
(text_uri, 'user-field-input'),
(text_uri, 'variable-get'),
(text_uri, 'variable-input'),
(text_uri, 'variable-set'),
(text_uri, 'word-count'),
# SVG
#(svg_uri, 'title'),
#(svg_uri, 'desc')
# From translate
(text_uri, 'tracked-changes'),
]
globals()['inline_elements'] = inline_elements
globals()['no_translate_content_elements'] = no_translate_content_elements
try:
from itools.odf.schema import inline_elements
from itools.odf.schema import no_translate_content_elements
except:
define_tables()
|
{
"content_hash": "f9ab9cb44c5b3c4fb6c94097f036d06f",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 79,
"avg_line_length": 33.3780487804878,
"alnum_prop": 0.5369017172086226,
"repo_name": "staranjeet/fjord",
"id": "b32139ef7a5069429cd79fbf7b15e3a675af26e9",
"size": "6238",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vendor/packages/translate-toolkit/translate/storage/odf_shared.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "158619"
},
{
"name": "HTML",
"bytes": "127302"
},
{
"name": "JavaScript",
"bytes": "296754"
},
{
"name": "Python",
"bytes": "853569"
},
{
"name": "Shell",
"bytes": "11673"
},
{
"name": "Smarty",
"bytes": "780"
}
],
"symlink_target": ""
}
|
from __future__ import division
softwareVersion = '0.4.4'
verbose = 1
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000 # This is obsolete with the change to protocol v3 but the singleCleaner thread still hasn't been updated so we need this a little longer.
lengthOfTimeToHoldOnToAllPubkeys = 2419200 # Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 # Equals three hours
useVeryEasyProofOfWorkForTesting = False # If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
# Libraries.
import collections
import ConfigParser
import os
import pickle
import Queue
import random
from i2p import socket
import sys
import stat
import threading
import time
import shutil # used for moving the data folder and copying keys.dat
import datetime
from os import path, environ
from struct import Struct
import traceback
# Project imports.
from addresses import *
import highlevelcrypto
import shared
#import helper_startup
from helper_sql import *
i2psession = 'i2pbm'
myDestination = ''
config = ConfigParser.SafeConfigParser()
myECCryptorObjects = {}
MyECSubscriptionCryptorObjects = {}
myAddressesByHash = {} #The key in this dictionary is the RIPE hash which is encoded in an address and value is the address itself.
myAddressesByTag = {} # The key in this dictionary is the tag generated from the address.
broadcastSendersForWhichImWatching = {}
workerQueue = Queue.Queue()
UISignalQueue = Queue.Queue()
addressGeneratorQueue = Queue.Queue()
knownNodesLock = threading.Lock()
knownNodes = {}
sendDataQueues = [] #each sendData thread puts its queue in this list.
inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
inventoryLock = threading.Lock() #Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
printLock = threading.Lock()
objectProcessorQueueSizeLock = threading.Lock()
objectProcessorQueueSize = 0 # in Bytes. We maintain this to prevent nodes from flooing us with objects which take up too much memory. If this gets too big we'll sleep before asking for further objects.
appdata = '' #holds the location of the application data storage directory
statusIconColor = 'red'
connectedHostsList = {} #List of hosts to which we are connected. Used to guarantee that the outgoingSynSender threads won't connect to the same remote node twice.
shutdown = 0 #Set to 1 by the doCleanShutdown function. Used to tell the proof of work worker threads to exit.
alreadyAttemptedConnectionsList = {
} # This is a list of nodes to which we have already attempted a connection
alreadyAttemptedConnectionsListLock = threading.Lock()
alreadyAttemptedConnectionsListResetTime = int(
time.time()) # used to clear out the alreadyAttemptedConnectionsList periodically so that we will retry connecting to hosts to which we have already tried to connect.
numberOfObjectsThatWeHaveYetToGetPerPeer = {}
neededPubkeys = {}
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack(
'>Q', random.randrange(1, 18446744073709551615))
successfullyDecryptMessageTimings = [
] # A list of the amounts of time it took to successfully decrypt msg messages
apiAddressGeneratorReturnQueue = Queue.Queue(
) # The address generator thread uses this queue to get information back to the API thread.
ackdataForWhichImWatching = {}
clientHasReceivedIncomingConnections = False #used by API command clientStatus
numberOfMessagesProcessed = 0
numberOfBroadcastsProcessed = 0
numberOfPubkeysProcessed = 0
numberOfInventoryLookupsPerformed = 0
numberOfBytesReceived = 0 # Used for the 'network status' page
numberOfBytesSent = 0 # Used for the 'network status' page
numberOfBytesReceivedLastSecond = 0 # used for the bandwidth rate limit
numberOfBytesSentLastSecond = 0 # used for the bandwidth rate limit
lastTimeWeResetBytesReceived = 0 # used for the bandwidth rate limit
lastTimeWeResetBytesSent = 0 # used for the bandwidth rate limit
sendDataLock = threading.Lock() # used for the bandwidth rate limit
receiveDataLock = threading.Lock() # used for the bandwidth rate limit
daemon = False
inventorySets = {} # key = streamNumer, value = a set which holds the inventory object hashes that we are aware of. This is used whenever we receive an inv message from a peer to check to see what items are new to us. We don't delete things out of it; instead, the singleCleaner thread clears and refills it every couple hours.
needToWriteKnownNodesToDisk = False # If True, the singleCleaner will write it to disk eventually.
maximumLengthOfTimeToBotherResendingMessages = 0
objectProcessorQueue = Queue.Queue(
) # receiveDataThreads dump objects they hear on the network into this queue to be processed.
streamsInWhichIAmParticipating = {}
#If changed, these values will cause particularly unexpected behavior: You won't be able to either send or receive messages because the proof of work you do (or demand) won't match that done or demanded by others. Don't change them!
networkDefaultProofOfWorkNonceTrialsPerByte = 1000 #The amount of work that should be performed (and demanded) per byte of the payload.
networkDefaultPayloadLengthExtraBytes = 1000 #To make sending short messages a little more difficult, this value is added to the payload length for use in calculating the proof of work target.
# Remember here the RPC port read from namecoin.conf so we can restore to
# it as default whenever the user changes the "method" selection for
# namecoin integration to "namecoind".
namecoinDefaultRpcPort = "8336"
# When using py2exe or py2app, the variable frozen is added to the sys
# namespace. This can be used to setup a different code path for
# binary distributions vs source distributions.
frozen = getattr(sys,'frozen', None)
# If the trustedpeer option is specified in keys.dat then this will
# contain a Peer which will be connected to instead of using the
# addresses advertised by other peers. The client will only connect to
# this peer and the timing attack mitigation will be disabled in order
# to download data faster. The expected use case is where the user has
# a fast connection to a trusted server where they run a BitMessage
# daemon permanently. If they then run a second instance of the client
# on a local machine periodically when they want to check for messages
# it will sync with the network a lot faster without compromising
# security.
trustedPeer = None
#Compiled struct for packing/unpacking headers
#New code should use CreatePacket instead of Header.pack
Header = Struct('!L12sL4s')
#Scrape peers from a seedless server and announce ourselves
import seedless_util
def do_scrapePeers():
print "Scrape/Announce on Seedless underway."
seedless_util.service = "pybitmsg-i2p"
try:
peersList = seedless_util.scrapePeers(myDestination)
except Exception as e:
print e
print "%d peers scraped from Seedless." % len(peersList)
for dest in peersList:
peer = shared.Peer( str(dest) )
knownNodes[1][peer] = int( time.time() )
#Create a packet
def CreatePacket(command, payload=''):
payload_length = len(payload)
checksum = hashlib.sha512(payload).digest()[0:4]
b = bytearray(Header.size + payload_length)
Header.pack_into(b, 0, 0xE9BEB4D9, command, payload_length, checksum)
b[Header.size:] = payload
return bytes(b)
def isInSqlInventory(hash):
queryreturn = sqlQuery('''select hash from inventory where hash=?''', hash)
return queryreturn != []
def assembleVersionMessage(remoteDest, myStreamNumber):
payload = ''
payload += pack('>L', 3) # protocol version.
payload += pack('>q', 1) # bitflags of the services I offer.
payload += pack('>q', int(time.time()))
payload += pack(
'>q', 1) # boolservices of remote connection; ignored by the remote host.
payload += remoteDest
payload += pack('>q', 1) # bitflags of the services I offer.
payload += 'VXVZRtml-XDgkwFcehckXBQ1qOx8whwjYlPZnKyIp3L5OFhwF6moUjkAoN~4J5TmdBLP5jxoOEwe5pC6TcgkKAvEXLqGvb607LPr9XhhWdgfHFyfcEG1zGhMziisOSHwmnUAjlvd5FT9H7ouv2on5JvLAHRiqe-vO0Ifz~dnkQyhd-IouWArdTlXQqhm7ArMS1-vHQKaslktY9BrFS8ZxKojbAMxcrBrt-9IND1f9-KpRBwtKp0Hup6jzIk3cNGbP4eadZ3F-Zic6oy-ktsH0iz5FBKmpMdc36SQDG8rReMjngKZntl4OhxjAZ7eYLllA6T3X5wdICkoqNJEobByGx9TEYXq6bVlyp7aoxGuB8~piqJWoCqbgfcIDUznP050YoCKp3Uk6u9DmROP4pckzg910FdKSF3TRlebKRRzB7KHWXV~CY3xZEp8CKblBljJEw3FNv0IZ5Guq0tNi9bjs6uXtY1IPviEN9cVfmT3EZ5WK8b~3JdvZrDGKoWAJkRAAAAA'
# This will be ignored by the remote host. The actual remote connected IP will be used.
random.seed()
payload += eightBytesOfRandomDataUsedToDetectConnectionsToSelf
userAgent = '/PyBitmessage:' + shared.softwareVersion + '/'
payload += encodeVarint(len(userAgent))
payload += userAgent
payload += encodeVarint(
1) # The number of streams about which I care. PyBitmessage currently only supports 1 per connection.
payload += encodeVarint(myStreamNumber)
return CreatePacket('version', payload)
def assembleErrorMessage(fatal=0, banTime=0, inventoryVector='', errorText=''):
payload = encodeVarint(fatal)
payload += encodeVarint(banTime)
payload += encodeVarint(len(inventoryVector))
payload += inventoryVector
payload += encodeVarint(len(errorText))
payload += errorText
return CreatePacket('error', payload)
def lookupAppdataFolder():
APPNAME = "PyBitmessage-I2P"
if "BITMESSAGE_HOME" in environ:
dataFolder = environ["BITMESSAGE_HOME"]
if dataFolder[-1] not in [os.path.sep, os.path.altsep]:
dataFolder += os.path.sep
elif sys.platform == 'darwin':
if "HOME" in environ:
dataFolder = path.join(os.environ["HOME"], "Library/Application Support/", APPNAME) + '/'
else:
stringToLog = 'Could not find home folder, please report this message and your OS X version to the BitMessage Github.'
if 'logger' in globals():
logger.critical(stringToLog)
else:
print stringToLog
sys.exit()
elif 'win32' in sys.platform or 'win64' in sys.platform:
dataFolder = path.join(environ['APPDATA'].decode(sys.getfilesystemencoding(), 'ignore'), APPNAME) + path.sep
else:
from shutil import move
try:
dataFolder = path.join(environ["XDG_CONFIG_HOME"], APPNAME)
except KeyError:
dataFolder = path.join(environ["HOME"], ".config", APPNAME)
# Migrate existing data to the proper location if this is an existing install
try:
move(path.join(environ["HOME"], ".%s" % APPNAME), dataFolder)
stringToLog = "Moving data folder to %s" % (dataFolder)
if 'logger' in globals():
logger.info(stringToLog)
else:
print stringToLog
except IOError:
# Old directory may not exist.
pass
dataFolder = dataFolder + '/'
return dataFolder
def isAddressInMyAddressBook(address):
queryreturn = sqlQuery(
'''select address from addressbook where address=?''',
address)
return queryreturn != []
#At this point we should really just have a isAddressInMy(book, address)...
def isAddressInMySubscriptionsList(address):
queryreturn = sqlQuery(
'''select * from subscriptions where address=?''',
str(address))
return queryreturn != []
def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address):
if isAddressInMyAddressBook(address):
return True
queryreturn = sqlQuery('''SELECT address FROM whitelist where address=? and enabled = '1' ''', address)
if queryreturn <> []:
return True
queryreturn = sqlQuery(
'''select address from subscriptions where address=? and enabled = '1' ''',
address)
if queryreturn <> []:
return True
return False
def safeConfigGetBoolean(section,field):
try:
return config.getboolean(section,field)
except Exception, err:
return False
def decodeWalletImportFormat(WIFstring):
fullString = arithmetic.changebase(WIFstring,58,256)
privkey = fullString[:-4]
if fullString[-4:] != hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4]:
logger.critical('Major problem! When trying to decode one of your private keys, the checksum '
'failed. Here are the first 6 characters of the PRIVATE key: %s' % str(WIFstring)[:6])
os._exit(0)
return ""
else:
#checksum passed
if privkey[0] == '\x80':
return privkey[1:]
else:
logger.critical('Major problem! When trying to decode one of your private keys, the '
'checksum passed but the key doesn\'t begin with hex 80. Here is the '
'PRIVATE key: %s' % str(WIFstring))
os._exit(0)
return ""
def reloadMyAddressHashes():
logger.debug('reloading keys from keys.dat file')
myECCryptorObjects.clear()
myAddressesByHash.clear()
myAddressesByTag.clear()
#myPrivateKeys.clear()
keyfileSecure = checkSensitiveFilePermissions(appdata + 'keys.dat')
configSections = config.sections()
hasEnabledKeys = False
for addressInKeysFile in configSections:
if addressInKeysFile <> 'bitmessagesettings':
isEnabled = config.getboolean(addressInKeysFile, 'enabled')
if isEnabled:
hasEnabledKeys = True
status,addressVersionNumber,streamNumber,hash = decodeAddress(addressInKeysFile)
if addressVersionNumber == 2 or addressVersionNumber == 3 or addressVersionNumber == 4:
# Returns a simple 32 bytes of information encoded in 64 Hex characters,
# or null if there was an error.
privEncryptionKey = decodeWalletImportFormat(
config.get(addressInKeysFile, 'privencryptionkey')).encode('hex')
if len(privEncryptionKey) == 64:#It is 32 bytes encoded as 64 hex characters
myECCryptorObjects[hash] = highlevelcrypto.makeCryptor(privEncryptionKey)
myAddressesByHash[hash] = addressInKeysFile
tag = hashlib.sha512(hashlib.sha512(encodeVarint(
addressVersionNumber) + encodeVarint(streamNumber) + hash).digest()).digest()[32:]
myAddressesByTag[tag] = addressInKeysFile
else:
logger.error('Error in reloadMyAddressHashes: Can\'t handle address versions other than 2, 3, or 4.\n')
if not keyfileSecure:
fixSensitiveFilePermissions(appdata + 'keys.dat', hasEnabledKeys)
def reloadBroadcastSendersForWhichImWatching():
broadcastSendersForWhichImWatching.clear()
MyECSubscriptionCryptorObjects.clear()
queryreturn = sqlQuery('SELECT address FROM subscriptions where enabled=1')
logger.debug('reloading subscriptions...')
for row in queryreturn:
address, = row
status,addressVersionNumber,streamNumber,hash = decodeAddress(address)
if addressVersionNumber == 2:
broadcastSendersForWhichImWatching[hash] = 0
#Now, for all addresses, even version 2 addresses, we should create Cryptor objects in a dictionary which we will use to attempt to decrypt encrypted broadcast messages.
if addressVersionNumber <= 3:
privEncryptionKey = hashlib.sha512(encodeVarint(addressVersionNumber)+encodeVarint(streamNumber)+hash).digest()[:32]
MyECSubscriptionCryptorObjects[hash] = highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex'))
else:
doubleHashOfAddressData = hashlib.sha512(hashlib.sha512(encodeVarint(
addressVersionNumber) + encodeVarint(streamNumber) + hash).digest()).digest()
tag = doubleHashOfAddressData[32:]
privEncryptionKey = doubleHashOfAddressData[:32]
MyECSubscriptionCryptorObjects[tag] = highlevelcrypto.makeCryptor(privEncryptionKey.encode('hex'))
def isProofOfWorkSufficient(data,
nonceTrialsPerByte=0,
payloadLengthExtraBytes=0):
if nonceTrialsPerByte < networkDefaultProofOfWorkNonceTrialsPerByte:
nonceTrialsPerByte = networkDefaultProofOfWorkNonceTrialsPerByte
if payloadLengthExtraBytes < networkDefaultPayloadLengthExtraBytes:
payloadLengthExtraBytes = networkDefaultPayloadLengthExtraBytes
endOfLifeTime, = unpack('>Q', data[8:16])
TTL = endOfLifeTime - int(time.time())
if TTL < 300:
TTL = 300
POW, = unpack('>Q', hashlib.sha512(hashlib.sha512(data[
:8] + hashlib.sha512(data[8:]).digest()).digest()).digest()[0:8])
return POW <= 2 ** 64 / (nonceTrialsPerByte*(len(data) + payloadLengthExtraBytes + ((TTL*(len(data)+payloadLengthExtraBytes))/(2 ** 16))))
def doCleanShutdown():
global shutdown
shutdown = 1 #Used to tell proof of work worker threads and the objectProcessorThread to exit.
broadcastToSendDataQueues((0, 'shutdown', 'no data'))
with shared.objectProcessorQueueSizeLock:
data = 'no data'
shared.objectProcessorQueueSize += len(data)
objectProcessorQueue.put(('checkShutdownVariable',data))
knownNodesLock.acquire()
UISignalQueue.put(('updateStatusBar','Saving the knownNodes list of peers to disk...'))
output = open(appdata + 'knownnodes.dat', 'wb')
logger.info('finished opening knownnodes.dat. Now pickle.dump')
pickle.dump(knownNodes, output)
logger.info('Completed pickle.dump. Closing output...')
output.close()
knownNodesLock.release()
logger.info('Finished closing knownnodes.dat output file.')
UISignalQueue.put(('updateStatusBar','Done saving the knownNodes list of peers to disk.'))
logger.info('Flushing inventory in memory out to disk...')
UISignalQueue.put((
'updateStatusBar',
'Flushing inventory in memory out to disk. This should normally only take a second...'))
flushInventory()
# Verify that the objectProcessor has finished exiting. It should have incremented the
# shutdown variable from 1 to 2. This must finish before we command the sqlThread to exit.
while shutdown == 1:
time.sleep(.1)
# This one last useless query will guarantee that the previous flush committed and that the
# objectProcessorThread committed before we close the program.
sqlQuery('SELECT address FROM subscriptions')
logger.info('Finished flushing inventory.')
sqlStoredProcedure('exit')
# Wait long enough to guarantee that any running proof of work worker threads will check the
# shutdown variable and exit. If the main thread closes before they do then they won't stop.
time.sleep(.25)
if safeConfigGetBoolean('bitmessagesettings','daemon'):
logger.info('Clean shutdown complete.')
os._exit(0)
# If you want to command all of the sendDataThreads to do something, like shutdown or send some data, this
# function puts your data into the queues for each of the sendDataThreads. The sendDataThreads are
# responsible for putting their queue into (and out of) the sendDataQueues list.
def broadcastToSendDataQueues(data):
# logger.debug('running broadcastToSendDataQueues')
for q in sendDataQueues:
q.put(data)
def flushInventory():
#Note that the singleCleanerThread clears out the inventory dictionary from time to time, although it only clears things that have been in the dictionary for a long time. This clears the inventory dictionary Now.
with SqlBulkExecute() as sql:
for hash, storedValue in inventory.items():
objectType, streamNumber, payload, expiresTime, tag = storedValue
sql.execute('''INSERT INTO inventory VALUES (?,?,?,?,?,?)''',
hash,objectType,streamNumber,payload,expiresTime,tag)
del inventory[hash]
def fixPotentiallyInvalidUTF8Data(text):
try:
unicode(text,'utf-8')
return text
except:
output = 'Part of the message is corrupt. The message cannot be displayed the normal way.\n\n' + repr(text)
return output
# Checks sensitive file permissions for inappropriate umask during keys.dat creation.
# (Or unwise subsequent chmod.)
#
# Returns true iff file appears to have appropriate permissions.
def checkSensitiveFilePermissions(filename):
if sys.platform == 'win32':
# TODO: This might deserve extra checks by someone familiar with
# Windows systems.
return True
elif sys.platform[:7] == 'freebsd':
# FreeBSD file systems are the same as major Linux file systems
present_permissions = os.stat(filename)[0]
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
return present_permissions & disallowed_permissions == 0
else:
try:
# Skip known problems for non-Win32 filesystems without POSIX permissions.
import subprocess
fstype = subprocess.check_output('stat -f -c "%%T" %s' % (filename),
shell=True,
stderr=subprocess.STDOUT)
if 'fuseblk' in fstype:
logger.info('Skipping file permissions check for %s. Filesystem fuseblk detected.',
filename)
return True
except:
# Swallow exception here, but we might run into trouble later!
logger.error('Could not determine filesystem type. %s', filename)
present_permissions = os.stat(filename)[0]
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
return present_permissions & disallowed_permissions == 0
# Fixes permissions on a sensitive file.
def fixSensitiveFilePermissions(filename, hasEnabledKeys):
if hasEnabledKeys:
logger.warning('Keyfile had insecure permissions, and there were enabled keys. '
'The truly paranoid should stop using them immediately.')
else:
logger.warning('Keyfile had insecure permissions, but there were no enabled keys.')
try:
present_permissions = os.stat(filename)[0]
disallowed_permissions = stat.S_IRWXG | stat.S_IRWXO
allowed_permissions = ((1<<32)-1) ^ disallowed_permissions
new_permissions = (
allowed_permissions & present_permissions)
os.chmod(filename, new_permissions)
logger.info('Keyfile permissions automatically fixed.')
except Exception, e:
logger.exception('Keyfile permissions could not be fixed.')
raise
def isBitSetWithinBitfield(fourByteString, n):
# Uses MSB 0 bit numbering across 4 bytes of data
n = 31 - n
x, = unpack('>L', fourByteString)
return x & 2**n != 0
def decryptAndCheckPubkeyPayload(data, address):
"""
Version 4 pubkeys are encrypted. This function is run when we already have the
address to which we want to try to send a message. The 'data' may come either
off of the wire or we might have had it already in our inventory when we tried
to send a msg to this particular address.
"""
try:
status, addressVersion, streamNumber, ripe = decodeAddress(address)
readPosition = 20 # bypass the nonce, time, and object type
embeddedAddressVersion, varintLength = decodeVarint(data[readPosition:readPosition + 10])
readPosition += varintLength
embeddedStreamNumber, varintLength = decodeVarint(data[readPosition:readPosition + 10])
readPosition += varintLength
storedData = data[20:readPosition] # We'll store the address version and stream number (and some more) in the pubkeys table.
if addressVersion != embeddedAddressVersion:
logger.info('Pubkey decryption was UNsuccessful due to address version mismatch.')
return 'failed'
if streamNumber != embeddedStreamNumber:
logger.info('Pubkey decryption was UNsuccessful due to stream number mismatch.')
return 'failed'
tag = data[readPosition:readPosition + 32]
readPosition += 32
signedData = data[8:readPosition] # the time through the tag. More data is appended onto signedData below after the decryption.
encryptedData = data[readPosition:]
# Let us try to decrypt the pubkey
toAddress, cryptorObject = shared.neededPubkeys[tag]
if toAddress != address:
logger.critical('decryptAndCheckPubkeyPayload failed due to toAddress mismatch. This is very peculiar. toAddress: %s, address %s' % (toAddress, address))
# the only way I can think that this could happen is if someone encodes their address data two different ways.
# That sort of address-malleability should have been caught by the UI or API and an error given to the user.
return 'failed'
try:
decryptedData = cryptorObject.decrypt(encryptedData)
except:
# Someone must have encrypted some data with a different key
# but tagged it with a tag for which we are watching.
logger.info('Pubkey decryption was unsuccessful.')
return 'failed'
readPosition = 0
bitfieldBehaviors = decryptedData[readPosition:readPosition + 4]
readPosition += 4
publicSigningKey = '\x04' + decryptedData[readPosition:readPosition + 64]
readPosition += 64
publicEncryptionKey = '\x04' + decryptedData[readPosition:readPosition + 64]
readPosition += 64
specifiedNonceTrialsPerByte, specifiedNonceTrialsPerByteLength = decodeVarint(
decryptedData[readPosition:readPosition + 10])
readPosition += specifiedNonceTrialsPerByteLength
specifiedPayloadLengthExtraBytes, specifiedPayloadLengthExtraBytesLength = decodeVarint(
decryptedData[readPosition:readPosition + 10])
readPosition += specifiedPayloadLengthExtraBytesLength
storedData += decryptedData[:readPosition]
signedData += decryptedData[:readPosition]
signatureLength, signatureLengthLength = decodeVarint(
decryptedData[readPosition:readPosition + 10])
readPosition += signatureLengthLength
signature = decryptedData[readPosition:readPosition + signatureLength]
if highlevelcrypto.verify(signedData, signature, publicSigningKey.encode('hex')):
logger.info('ECDSA verify passed (within decryptAndCheckPubkeyPayload)')
else:
logger.info('ECDSA verify failed (within decryptAndCheckPubkeyPayload)')
return 'failed'
sha = hashlib.new('sha512')
sha.update(publicSigningKey + publicEncryptionKey)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
embeddedRipe = ripeHasher.digest()
if embeddedRipe != ripe:
# Although this pubkey object had the tag were were looking for and was
# encrypted with the correct encryption key, it doesn't contain the
# correct pubkeys. Someone is either being malicious or using buggy software.
logger.info('Pubkey decryption was UNsuccessful due to RIPE mismatch.')
return 'failed'
# Everything checked out. Insert it into the pubkeys table.
logger.info('within decryptAndCheckPubkeyPayload, addressVersion: %s, streamNumber: %s \n\
ripe %s\n\
publicSigningKey in hex: %s\n\
publicEncryptionKey in hex: %s' % (addressVersion,
streamNumber,
ripe.encode('hex'),
publicSigningKey.encode('hex'),
publicEncryptionKey.encode('hex')
)
)
t = (address, addressVersion, storedData, int(time.time()), 'yes')
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''', *t)
return 'successful'
except varintDecodeError as e:
logger.info('Pubkey decryption was UNsuccessful due to a malformed varint.')
return 'failed'
except Exception as e:
logger.critical('Pubkey decryption was UNsuccessful because of an unhandled exception! This is definitely a bug! \n%s' % traceback.format_exc())
return 'failed'
Peer = collections.namedtuple('Peer', ['dest'])
def checkAndShareObjectWithPeers(data):
"""
This function is called after either receiving an object off of the wire
or after receiving one as ackdata.
Returns the length of time that we should reserve to process this message
if we are receiving it off of the wire.
"""
if len(data) > 2 ** 18:
logger.info('The payload length of this object is too large (%s bytes). Ignoring it.' % len(data))
return 0
# Let us check to make sure that the proof of work is sufficient.
if not isProofOfWorkSufficient(data):
logger.info('Proof of work is insufficient.')
return 0
endOfLifeTime, = unpack('>Q', data[8:16])
if endOfLifeTime - int(time.time()) > 28 * 24 * 60 * 60 + 10800: # The TTL may not be larger than 28 days + 3 hours of wiggle room
logger.info('This object\'s End of Life time is too far in the future. Ignoring it. Time is %s' % endOfLifeTime)
return 0
if endOfLifeTime - int(time.time()) < - 3600: # The EOL time was more than an hour ago. That's too much.
logger.info('This object\'s End of Life time was more than an hour ago. Ignoring the object. Time is %s' % endOfLifeTime)
return 0
intObjectType, = unpack('>I', data[16:20])
try:
if intObjectType == 0:
_checkAndShareGetpubkeyWithPeers(data)
return 0.1
elif intObjectType == 1:
_checkAndSharePubkeyWithPeers(data)
return 0.1
elif intObjectType == 2:
_checkAndShareMsgWithPeers(data)
return 0.6
elif intObjectType == 3:
_checkAndShareBroadcastWithPeers(data)
return 0.6
else:
_checkAndShareUndefinedObjectWithPeers(data)
return 0.6
except varintDecodeError as e:
logger.debug("There was a problem with a varint while checking to see whether it was appropriate to share an object with peers. Some details: %s" % e)
except Exception as e:
logger.critical('There was a problem while checking to see whether it was appropriate to share an object with peers. This is definitely a bug! \n%s' % traceback.format_exc())
return 0
def _checkAndShareUndefinedObjectWithPeers(data):
embeddedTime, = unpack('>Q', data[8:16])
readPosition = 20 # bypass nonce, time, and object type
objectVersion, objectVersionLength = decodeVarint(
data[readPosition:readPosition + 9])
readPosition += objectVersionLength
streamNumber, streamNumberLength = decodeVarint(
data[readPosition:readPosition + 9])
if not streamNumber in streamsInWhichIAmParticipating:
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
return
inventoryHash = calculateInventoryHash(data)
shared.numberOfInventoryLookupsPerformed += 1
inventoryLock.acquire()
if inventoryHash in inventory:
logger.debug('We have already received this undefined object. Ignoring.')
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
logger.debug('We have already received this undefined object (it is stored on disk in the SQL inventory). Ignoring it.')
inventoryLock.release()
return
objectType, = unpack('>I', data[16:20])
inventory[inventoryHash] = (
objectType, streamNumber, data, embeddedTime,'')
inventorySets[streamNumber].add(inventoryHash)
inventoryLock.release()
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
def _checkAndShareMsgWithPeers(data):
embeddedTime, = unpack('>Q', data[8:16])
readPosition = 20 # bypass nonce, time, and object type
objectVersion, objectVersionLength = decodeVarint(
data[readPosition:readPosition + 9])
readPosition += objectVersionLength
streamNumber, streamNumberLength = decodeVarint(
data[readPosition:readPosition + 9])
if not streamNumber in streamsInWhichIAmParticipating:
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
return
readPosition += streamNumberLength
inventoryHash = calculateInventoryHash(data)
shared.numberOfInventoryLookupsPerformed += 1
inventoryLock.acquire()
if inventoryHash in inventory:
logger.debug('We have already received this msg message. Ignoring.')
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
logger.debug('We have already received this msg message (it is stored on disk in the SQL inventory). Ignoring it.')
inventoryLock.release()
return
# This msg message is valid. Let's let our peers know about it.
objectType = 2
inventory[inventoryHash] = (
objectType, streamNumber, data, embeddedTime,'')
inventorySets[streamNumber].add(inventoryHash)
inventoryLock.release()
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
# Now let's enqueue it to be processed ourselves.
# If we already have too much data in the queue to be processed, just sleep for now.
while shared.objectProcessorQueueSize > 120000000:
time.sleep(2)
with shared.objectProcessorQueueSizeLock:
shared.objectProcessorQueueSize += len(data)
objectProcessorQueue.put((objectType,data))
def _checkAndShareGetpubkeyWithPeers(data):
if len(data) < 42:
logger.info('getpubkey message doesn\'t contain enough data. Ignoring.')
return
if len(data) > 200:
logger.info('getpubkey is abnormally long. Sanity check failed. Ignoring object.')
embeddedTime, = unpack('>Q', data[8:16])
readPosition = 20 # bypass the nonce, time, and object type
requestedAddressVersionNumber, addressVersionLength = decodeVarint(
data[readPosition:readPosition + 10])
readPosition += addressVersionLength
streamNumber, streamNumberLength = decodeVarint(
data[readPosition:readPosition + 10])
if not streamNumber in streamsInWhichIAmParticipating:
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
return
readPosition += streamNumberLength
shared.numberOfInventoryLookupsPerformed += 1
inventoryHash = calculateInventoryHash(data)
inventoryLock.acquire()
if inventoryHash in inventory:
logger.debug('We have already received this getpubkey request. Ignoring it.')
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
logger.debug('We have already received this getpubkey request (it is stored on disk in the SQL inventory). Ignoring it.')
inventoryLock.release()
return
objectType = 0
inventory[inventoryHash] = (
objectType, streamNumber, data, embeddedTime,'')
inventorySets[streamNumber].add(inventoryHash)
inventoryLock.release()
# This getpubkey request is valid. Forward to peers.
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
# Now let's queue it to be processed ourselves.
# If we already have too much data in the queue to be processed, just sleep for now.
while shared.objectProcessorQueueSize > 120000000:
time.sleep(2)
with shared.objectProcessorQueueSizeLock:
shared.objectProcessorQueueSize += len(data)
objectProcessorQueue.put((objectType,data))
def _checkAndSharePubkeyWithPeers(data):
if len(data) < 146 or len(data) > 440: # sanity check
return
embeddedTime, = unpack('>Q', data[8:16])
readPosition = 20 # bypass the nonce, time, and object type
addressVersion, varintLength = decodeVarint(
data[readPosition:readPosition + 10])
readPosition += varintLength
streamNumber, varintLength = decodeVarint(
data[readPosition:readPosition + 10])
readPosition += varintLength
if not streamNumber in streamsInWhichIAmParticipating:
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
return
if addressVersion >= 4:
tag = data[readPosition:readPosition + 32]
logger.debug('tag in received pubkey is: %s' % tag.encode('hex'))
else:
tag = ''
shared.numberOfInventoryLookupsPerformed += 1
inventoryHash = calculateInventoryHash(data)
inventoryLock.acquire()
if inventoryHash in inventory:
logger.debug('We have already received this pubkey. Ignoring it.')
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
logger.debug('We have already received this pubkey (it is stored on disk in the SQL inventory). Ignoring it.')
inventoryLock.release()
return
objectType = 1
inventory[inventoryHash] = (
objectType, streamNumber, data, embeddedTime, tag)
inventorySets[streamNumber].add(inventoryHash)
inventoryLock.release()
# This object is valid. Forward it to peers.
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
# Now let's queue it to be processed ourselves.
# If we already have too much data in the queue to be processed, just sleep for now.
while shared.objectProcessorQueueSize > 120000000:
time.sleep(2)
with shared.objectProcessorQueueSizeLock:
shared.objectProcessorQueueSize += len(data)
objectProcessorQueue.put((objectType,data))
def _checkAndShareBroadcastWithPeers(data):
if len(data) < 180:
logger.debug('The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message.')
return
embeddedTime, = unpack('>Q', data[8:16])
readPosition = 20 # bypass the nonce, time, and object type
broadcastVersion, broadcastVersionLength = decodeVarint(
data[readPosition:readPosition + 10])
readPosition += broadcastVersionLength
if broadcastVersion >= 2:
streamNumber, streamNumberLength = decodeVarint(data[readPosition:readPosition + 10])
readPosition += streamNumberLength
if not streamNumber in streamsInWhichIAmParticipating:
logger.debug('The streamNumber %s isn\'t one we are interested in.' % streamNumber)
return
if broadcastVersion >= 3:
tag = data[readPosition:readPosition+32]
else:
tag = ''
shared.numberOfInventoryLookupsPerformed += 1
inventoryLock.acquire()
inventoryHash = calculateInventoryHash(data)
if inventoryHash in inventory:
logger.debug('We have already received this broadcast object. Ignoring.')
inventoryLock.release()
return
elif isInSqlInventory(inventoryHash):
logger.debug('We have already received this broadcast object (it is stored on disk in the SQL inventory). Ignoring it.')
inventoryLock.release()
return
# It is valid. Let's let our peers know about it.
objectType = 3
inventory[inventoryHash] = (
objectType, streamNumber, data, embeddedTime, tag)
inventorySets[streamNumber].add(inventoryHash)
inventoryLock.release()
# This object is valid. Forward it to peers.
logger.debug('advertising inv with hash: %s' % inventoryHash.encode('hex'))
broadcastToSendDataQueues((streamNumber, 'advertiseobject', inventoryHash))
# Now let's queue it to be processed ourselves.
# If we already have too much data in the queue to be processed, just sleep for now.
while shared.objectProcessorQueueSize > 120000000:
time.sleep(2)
with shared.objectProcessorQueueSizeLock:
shared.objectProcessorQueueSize += len(data)
objectProcessorQueue.put((objectType,data))
def openKeysFile():
if 'linux' in sys.platform:
subprocess.call(["xdg-open", shared.appdata + 'keys.dat'])
else:
os.startfile(shared.appdata + 'keys.dat')
def writeKeysFile():
fileName = shared.appdata + 'keys.dat'
fileNameBak = fileName + "." + datetime.datetime.now().strftime("%Y%j%H%M%S%f") + '.bak'
# create a backup copy to prevent the accidental loss due to the disk write failure
try:
shutil.copyfile(fileName, fileNameBak)
# The backup succeeded.
fileNameExisted = True
except:
# The backup failed. This can happen if the file didn't exist before.
fileNameExisted = False
# write the file
with open(fileName, 'wb') as configfile:
shared.config.write(configfile)
# delete the backup
if fileNameExisted:
os.remove(fileNameBak)
from debug import logger
|
{
"content_hash": "20a3c4c837d62dc16b51683c6ef863e0",
"timestamp": "",
"source": "github",
"line_count": 879,
"max_line_length": 533,
"avg_line_length": 47.833902161547215,
"alnum_prop": 0.6933120867621176,
"repo_name": "metamarcdw/PyBitmessage-I2P",
"id": "74fb65f18fc04304a13bb9a0cd186bf79ab7ae86",
"size": "42046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/shared.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2834"
},
{
"name": "Python",
"bytes": "1071960"
},
{
"name": "QMake",
"bytes": "11274"
},
{
"name": "Shell",
"bytes": "14814"
}
],
"symlink_target": ""
}
|
from concurrent.futures import ThreadPoolExecutor
from concurrent import futures
import contextlib
import datetime
import functools
import socket
import subprocess
import sys
import threading
import time
import types
from unittest import mock
import unittest
from tornado.escape import native_str
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError, PeriodicCallback
from tornado.log import app_log
from tornado.testing import AsyncTestCase, bind_unused_port, ExpectLog, gen_test
from tornado.test.util import skipIfNonUnix, skipOnTravis
import typing
if typing.TYPE_CHECKING:
from typing import List # noqa: F401
class TestIOLoop(AsyncTestCase):
def test_add_callback_return_sequence(self):
# A callback returning {} or [] shouldn't spin the CPU, see Issue #1803.
self.calls = 0
loop = self.io_loop
test = self
old_add_callback = loop.add_callback
def add_callback(self, callback, *args, **kwargs):
test.calls += 1
old_add_callback(callback, *args, **kwargs)
loop.add_callback = types.MethodType(add_callback, loop)
loop.add_callback(lambda: {})
loop.add_callback(lambda: [])
loop.add_timeout(datetime.timedelta(milliseconds=50), loop.stop)
loop.start()
self.assertLess(self.calls, 10)
@skipOnTravis
def test_add_callback_wakeup(self):
# Make sure that add_callback from inside a running IOLoop
# wakes up the IOLoop immediately instead of waiting for a timeout.
def callback():
self.called = True
self.stop()
def schedule_callback():
self.called = False
self.io_loop.add_callback(callback)
# Store away the time so we can check if we woke up immediately
self.start_time = time.time()
self.io_loop.add_timeout(self.io_loop.time(), schedule_callback)
self.wait()
self.assertAlmostEqual(time.time(), self.start_time, places=2)
self.assertTrue(self.called)
@skipOnTravis
def test_add_callback_wakeup_other_thread(self):
def target():
# sleep a bit to let the ioloop go into its poll loop
time.sleep(0.01)
self.stop_time = time.time()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=target)
self.io_loop.add_callback(thread.start)
self.wait()
delta = time.time() - self.stop_time
self.assertLess(delta, 0.1)
thread.join()
def test_add_timeout_timedelta(self):
self.io_loop.add_timeout(datetime.timedelta(microseconds=1), self.stop)
self.wait()
def test_multiple_add(self):
sock, port = bind_unused_port()
try:
self.io_loop.add_handler(
sock.fileno(), lambda fd, events: None, IOLoop.READ
)
# Attempting to add the same handler twice fails
# (with a platform-dependent exception)
self.assertRaises(
Exception,
self.io_loop.add_handler,
sock.fileno(),
lambda fd, events: None,
IOLoop.READ,
)
finally:
self.io_loop.remove_handler(sock.fileno())
sock.close()
def test_remove_without_add(self):
# remove_handler should not throw an exception if called on an fd
# was never added.
sock, port = bind_unused_port()
try:
self.io_loop.remove_handler(sock.fileno())
finally:
sock.close()
def test_add_callback_from_signal(self):
# cheat a little bit and just run this normally, since we can't
# easily simulate the races that happen with real signal handlers
self.io_loop.add_callback_from_signal(self.stop)
self.wait()
def test_add_callback_from_signal_other_thread(self):
# Very crude test, just to make sure that we cover this case.
# This also happens to be the first test where we run an IOLoop in
# a non-main thread.
other_ioloop = IOLoop()
thread = threading.Thread(target=other_ioloop.start)
thread.start()
other_ioloop.add_callback_from_signal(other_ioloop.stop)
thread.join()
other_ioloop.close()
def test_add_callback_while_closing(self):
# add_callback should not fail if it races with another thread
# closing the IOLoop. The callbacks are dropped silently
# without executing.
closing = threading.Event()
def target():
other_ioloop.add_callback(other_ioloop.stop)
other_ioloop.start()
closing.set()
other_ioloop.close(all_fds=True)
other_ioloop = IOLoop()
thread = threading.Thread(target=target)
thread.start()
closing.wait()
for i in range(1000):
other_ioloop.add_callback(lambda: None)
@skipIfNonUnix # just because socketpair is so convenient
def test_read_while_writeable(self):
# Ensure that write events don't come in while we're waiting for
# a read and haven't asked for writeability. (the reverse is
# difficult to test for)
client, server = socket.socketpair()
try:
def handler(fd, events):
self.assertEqual(events, IOLoop.READ)
self.stop()
self.io_loop.add_handler(client.fileno(), handler, IOLoop.READ)
self.io_loop.add_timeout(
self.io_loop.time() + 0.01, functools.partial(server.send, b"asdf")
)
self.wait()
self.io_loop.remove_handler(client.fileno())
finally:
client.close()
server.close()
def test_remove_timeout_after_fire(self):
# It is not an error to call remove_timeout after it has run.
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop)
self.wait()
self.io_loop.remove_timeout(handle)
def test_remove_timeout_cleanup(self):
# Add and remove enough callbacks to trigger cleanup.
# Not a very thorough test, but it ensures that the cleanup code
# gets executed and doesn't blow up. This test is only really useful
# on PollIOLoop subclasses, but it should run silently on any
# implementation.
for i in range(2000):
timeout = self.io_loop.add_timeout(self.io_loop.time() + 3600, lambda: None)
self.io_loop.remove_timeout(timeout)
# HACK: wait two IOLoop iterations for the GC to happen.
self.io_loop.add_callback(lambda: self.io_loop.add_callback(self.stop))
self.wait()
def test_remove_timeout_from_timeout(self):
calls = [False, False]
# Schedule several callbacks and wait for them all to come due at once.
# t2 should be cancelled by t1, even though it is already scheduled to
# be run before the ioloop even looks at it.
now = self.io_loop.time()
def t1():
calls[0] = True
self.io_loop.remove_timeout(t2_handle)
self.io_loop.add_timeout(now + 0.01, t1)
def t2():
calls[1] = True
t2_handle = self.io_loop.add_timeout(now + 0.02, t2)
self.io_loop.add_timeout(now + 0.03, self.stop)
time.sleep(0.03)
self.wait()
self.assertEqual(calls, [True, False])
def test_timeout_with_arguments(self):
# This tests that all the timeout methods pass through *args correctly.
results = [] # type: List[int]
self.io_loop.add_timeout(self.io_loop.time(), results.append, 1)
self.io_loop.add_timeout(datetime.timedelta(seconds=0), results.append, 2)
self.io_loop.call_at(self.io_loop.time(), results.append, 3)
self.io_loop.call_later(0, results.append, 4)
self.io_loop.call_later(0, self.stop)
self.wait()
# The asyncio event loop does not guarantee the order of these
# callbacks.
self.assertEqual(sorted(results), [1, 2, 3, 4])
def test_add_timeout_return(self):
# All the timeout methods return non-None handles that can be
# passed to remove_timeout.
handle = self.io_loop.add_timeout(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_at_return(self):
handle = self.io_loop.call_at(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_later_return(self):
handle = self.io_loop.call_later(0, lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_close_file_object(self):
"""When a file object is used instead of a numeric file descriptor,
the object should be closed (by IOLoop.close(all_fds=True),
not just the fd.
"""
# Use a socket since they are supported by IOLoop on all platforms.
# Unfortunately, sockets don't support the .closed attribute for
# inspecting their close status, so we must use a wrapper.
class SocketWrapper(object):
def __init__(self, sockobj):
self.sockobj = sockobj
self.closed = False
def fileno(self):
return self.sockobj.fileno()
def close(self):
self.closed = True
self.sockobj.close()
sockobj, port = bind_unused_port()
socket_wrapper = SocketWrapper(sockobj)
io_loop = IOLoop()
io_loop.add_handler(socket_wrapper, lambda fd, events: None, IOLoop.READ)
io_loop.close(all_fds=True)
self.assertTrue(socket_wrapper.closed)
def test_handler_callback_file_object(self):
"""The handler callback receives the same fd object it passed in."""
server_sock, port = bind_unused_port()
fds = []
def handle_connection(fd, events):
fds.append(fd)
conn, addr = server_sock.accept()
conn.close()
self.stop()
self.io_loop.add_handler(server_sock, handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(("127.0.0.1", port))
self.wait()
self.io_loop.remove_handler(server_sock)
self.io_loop.add_handler(server_sock.fileno(), handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(("127.0.0.1", port))
self.wait()
self.assertIs(fds[0], server_sock)
self.assertEqual(fds[1], server_sock.fileno())
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_mixed_fd_fileobj(self):
server_sock, port = bind_unused_port()
def f(fd, events):
pass
self.io_loop.add_handler(server_sock, f, IOLoop.READ)
with self.assertRaises(Exception):
# The exact error is unspecified - some implementations use
# IOError, others use ValueError.
self.io_loop.add_handler(server_sock.fileno(), f, IOLoop.READ)
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_reentrant(self):
"""Calling start() twice should raise an error, not deadlock."""
returned_from_start = [False]
got_exception = [False]
def callback():
try:
self.io_loop.start()
returned_from_start[0] = True
except Exception:
got_exception[0] = True
self.stop()
self.io_loop.add_callback(callback)
self.wait()
self.assertTrue(got_exception[0])
self.assertFalse(returned_from_start[0])
def test_exception_logging(self):
"""Uncaught exceptions get logged by the IOLoop."""
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_future(self):
"""The IOLoop examines exceptions from Futures and logs them."""
@gen.coroutine
def callback():
self.io_loop.add_callback(self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_native_coro(self):
"""The IOLoop examines exceptions from awaitables and logs them."""
async def callback():
# Stop the IOLoop two iterations after raising an exception
# to give the exception time to be logged.
self.io_loop.add_callback(self.io_loop.add_callback, self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_spawn_callback(self):
# Both add_callback and spawn_callback run directly on the IOLoop,
# so their errors are logged without stopping the test.
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
# A spawned callback is run directly on the IOLoop, so it will be
# logged without stopping the test.
self.io_loop.spawn_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
@skipIfNonUnix
def test_remove_handler_from_handler(self):
# Create two sockets with simultaneous read events.
client, server = socket.socketpair()
try:
client.send(b"abc")
server.send(b"abc")
# After reading from one fd, remove the other from the IOLoop.
chunks = []
def handle_read(fd, events):
chunks.append(fd.recv(1024))
if fd is client:
self.io_loop.remove_handler(server)
else:
self.io_loop.remove_handler(client)
self.io_loop.add_handler(client, handle_read, self.io_loop.READ)
self.io_loop.add_handler(server, handle_read, self.io_loop.READ)
self.io_loop.call_later(0.1, self.stop)
self.wait()
# Only one fd was read; the other was cleanly removed.
self.assertEqual(chunks, [b"abc"])
finally:
client.close()
server.close()
@gen_test
def test_init_close_race(self):
# Regression test for #2367
def f():
for i in range(10):
loop = IOLoop()
loop.close()
yield gen.multi([self.io_loop.run_in_executor(None, f) for i in range(2)])
# Deliberately not a subclass of AsyncTestCase so the IOLoop isn't
# automatically set as current.
class TestIOLoopCurrent(unittest.TestCase):
def setUp(self):
self.io_loop = None
IOLoop.clear_current()
def tearDown(self):
if self.io_loop is not None:
self.io_loop.close()
def test_default_current(self):
self.io_loop = IOLoop()
# The first IOLoop with default arguments is made current.
self.assertIs(self.io_loop, IOLoop.current())
# A second IOLoop can be created but is not made current.
io_loop2 = IOLoop()
self.assertIs(self.io_loop, IOLoop.current())
io_loop2.close()
def test_non_current(self):
self.io_loop = IOLoop(make_current=False)
# The new IOLoop is not initially made current.
self.assertIsNone(IOLoop.current(instance=False))
# Starting the IOLoop makes it current, and stopping the loop
# makes it non-current. This process is repeatable.
for i in range(3):
def f():
self.current_io_loop = IOLoop.current()
self.io_loop.stop()
self.io_loop.add_callback(f)
self.io_loop.start()
self.assertIs(self.current_io_loop, self.io_loop)
# Now that the loop is stopped, it is no longer current.
self.assertIsNone(IOLoop.current(instance=False))
def test_force_current(self):
self.io_loop = IOLoop(make_current=True)
self.assertIs(self.io_loop, IOLoop.current())
with self.assertRaises(RuntimeError):
# A second make_current=True construction cannot succeed.
IOLoop(make_current=True)
# current() was not affected by the failed construction.
self.assertIs(self.io_loop, IOLoop.current())
class TestIOLoopCurrentAsync(AsyncTestCase):
@gen_test
def test_clear_without_current(self):
# If there is no current IOLoop, clear_current is a no-op (but
# should not fail). Use a thread so we see the threading.Local
# in a pristine state.
with ThreadPoolExecutor(1) as e:
yield e.submit(IOLoop.clear_current)
class TestIOLoopFutures(AsyncTestCase):
def test_add_future_threads(self):
with futures.ThreadPoolExecutor(1) as pool:
def dummy():
pass
self.io_loop.add_future(
pool.submit(dummy), lambda future: self.stop(future)
)
future = self.wait()
self.assertTrue(future.done())
self.assertTrue(future.result() is None)
@gen_test
def test_run_in_executor_gen(self):
event1 = threading.Event()
event2 = threading.Event()
def sync_func(self_event, other_event):
self_event.set()
other_event.wait()
# Note that return value doesn't actually do anything,
# it is just passed through to our final assertion to
# make sure it is passed through properly.
return self_event
# Run two synchronous functions, which would deadlock if not
# run in parallel.
res = yield [
IOLoop.current().run_in_executor(None, sync_func, event1, event2),
IOLoop.current().run_in_executor(None, sync_func, event2, event1),
]
self.assertEqual([event1, event2], res)
@gen_test
def test_run_in_executor_native(self):
event1 = threading.Event()
event2 = threading.Event()
def sync_func(self_event, other_event):
self_event.set()
other_event.wait()
return self_event
# Go through an async wrapper to ensure that the result of
# run_in_executor works with await and not just gen.coroutine
# (simply passing the underlying concurrrent future would do that).
async def async_wrapper(self_event, other_event):
return await IOLoop.current().run_in_executor(
None, sync_func, self_event, other_event
)
res = yield [async_wrapper(event1, event2), async_wrapper(event2, event1)]
self.assertEqual([event1, event2], res)
@gen_test
def test_set_default_executor(self):
count = [0]
class MyExecutor(futures.ThreadPoolExecutor):
def submit(self, func, *args):
count[0] += 1
return super(MyExecutor, self).submit(func, *args)
event = threading.Event()
def sync_func():
event.set()
executor = MyExecutor(1)
loop = IOLoop.current()
loop.set_default_executor(executor)
yield loop.run_in_executor(None, sync_func)
self.assertEqual(1, count[0])
self.assertTrue(event.is_set())
class TestIOLoopRunSync(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_sync_result(self):
with self.assertRaises(gen.BadYieldError):
self.io_loop.run_sync(lambda: 42)
def test_sync_exception(self):
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(lambda: 1 / 0)
def test_async_result(self):
@gen.coroutine
def f():
yield gen.moment
raise gen.Return(42)
self.assertEqual(self.io_loop.run_sync(f), 42)
def test_async_exception(self):
@gen.coroutine
def f():
yield gen.moment
1 / 0
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(f)
def test_current(self):
def f():
self.assertIs(IOLoop.current(), self.io_loop)
self.io_loop.run_sync(f)
def test_timeout(self):
@gen.coroutine
def f():
yield gen.sleep(1)
self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
def test_native_coroutine(self):
@gen.coroutine
def f1():
yield gen.moment
async def f2():
await f1()
self.io_loop.run_sync(f2)
class TestPeriodicCallbackMath(unittest.TestCase):
def simulate_calls(self, pc, durations):
"""Simulate a series of calls to the PeriodicCallback.
Pass a list of call durations in seconds (negative values
work to simulate clock adjustments during the call, or more or
less equivalently, between calls). This method returns the
times at which each call would be made.
"""
calls = []
now = 1000
pc._next_timeout = now
for d in durations:
pc._update_next(now)
calls.append(pc._next_timeout)
now = pc._next_timeout + d
return calls
def dummy(self):
pass
def test_basic(self):
pc = PeriodicCallback(self.dummy, 10000)
self.assertEqual(
self.simulate_calls(pc, [0] * 5), [1010, 1020, 1030, 1040, 1050]
)
def test_overrun(self):
# If a call runs for too long, we skip entire cycles to get
# back on schedule.
call_durations = [9, 9, 10, 11, 20, 20, 35, 35, 0, 0, 0]
expected = [
1010,
1020,
1030, # first 3 calls on schedule
1050,
1070, # next 2 delayed one cycle
1100,
1130, # next 2 delayed 2 cycles
1170,
1210, # next 2 delayed 3 cycles
1220,
1230, # then back on schedule.
]
pc = PeriodicCallback(self.dummy, 10000)
self.assertEqual(self.simulate_calls(pc, call_durations), expected)
def test_clock_backwards(self):
pc = PeriodicCallback(self.dummy, 10000)
# Backwards jumps are ignored, potentially resulting in a
# slightly slow schedule (although we assume that when
# time.time() and time.monotonic() are different, time.time()
# is getting adjusted by NTP and is therefore more accurate)
self.assertEqual(
self.simulate_calls(pc, [-2, -1, -3, -2, 0]), [1010, 1020, 1030, 1040, 1050]
)
# For big jumps, we should perhaps alter the schedule, but we
# don't currently. This trace shows that we run callbacks
# every 10s of time.time(), but the first and second calls are
# 110s of real time apart because the backwards jump is
# ignored.
self.assertEqual(self.simulate_calls(pc, [-100, 0, 0]), [1010, 1020, 1030])
def test_jitter(self):
random_times = [0.5, 1, 0, 0.75]
expected = [1010, 1022.5, 1030, 1041.25]
call_durations = [0] * len(random_times)
pc = PeriodicCallback(self.dummy, 10000, jitter=0.5)
def mock_random():
return random_times.pop(0)
with mock.patch("random.random", mock_random):
self.assertEqual(self.simulate_calls(pc, call_durations), expected)
class TestIOLoopConfiguration(unittest.TestCase):
def run_python(self, *statements):
stmt_list = [
"from tornado.ioloop import IOLoop",
"classname = lambda x: x.__class__.__name__",
] + list(statements)
args = [sys.executable, "-c", "; ".join(stmt_list)]
return native_str(subprocess.check_output(args)).strip()
def test_default(self):
# When asyncio is available, it is used by default.
cls = self.run_python("print(classname(IOLoop.current()))")
self.assertEqual(cls, "AsyncIOMainLoop")
cls = self.run_python("print(classname(IOLoop()))")
self.assertEqual(cls, "AsyncIOLoop")
def test_asyncio(self):
cls = self.run_python(
'IOLoop.configure("tornado.platform.asyncio.AsyncIOLoop")',
"print(classname(IOLoop.current()))",
)
self.assertEqual(cls, "AsyncIOMainLoop")
def test_asyncio_main(self):
cls = self.run_python(
"from tornado.platform.asyncio import AsyncIOMainLoop",
"AsyncIOMainLoop().install()",
"print(classname(IOLoop.current()))",
)
self.assertEqual(cls, "AsyncIOMainLoop")
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "2c9f9166bc6e7593855277a2b6c5b591",
"timestamp": "",
"source": "github",
"line_count": 719,
"max_line_length": 88,
"avg_line_length": 35.24478442280946,
"alnum_prop": 0.6002525551477842,
"repo_name": "NoyaInRain/tornado",
"id": "293ea3562a2520e3b6cf66680a1de4cbb48e49da",
"size": "25341",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tornado/test/ioloop_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1070"
},
{
"name": "CSS",
"bytes": "7736"
},
{
"name": "HTML",
"bytes": "12417"
},
{
"name": "JavaScript",
"bytes": "6073"
},
{
"name": "Python",
"bytes": "1467142"
},
{
"name": "Ruby",
"bytes": "1733"
},
{
"name": "Shell",
"bytes": "5045"
}
],
"symlink_target": ""
}
|
"""
Management class for Storage-related functions (attach, detach, etc).
"""
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from nova import exception
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
class VolumeOps(object):
"""Management class for Volume-related tasks."""
def __init__(self, session):
self._session = session
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
"""Attach volume to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
return self._attach_volume(connection_info, vm_ref,
instance_name, mountpoint, hotplug)
def connect_volume(self, connection_info):
"""Attach volume to hypervisor, but not the VM."""
return self._attach_volume(connection_info)
def _attach_volume(self, connection_info, vm_ref=None, instance_name=None,
dev_number=None, hotplug=False):
self._check_is_supported_driver_type(connection_info)
connection_data = connection_info['data']
sr_ref, sr_uuid = self._connect_to_volume_provider(connection_data,
instance_name)
try:
vdi_ref = self._connect_hypervisor_to_volume(sr_ref,
connection_data)
vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
LOG.info('Connected volume (vdi_uuid): %s', vdi_uuid)
if vm_ref:
self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
dev_number, hotplug)
return (sr_uuid, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
# NOTE(sirp): Forgetting the SR will have the effect of
# cleaning up the VDI and VBD records, so no need to handle
# that explicitly.
volume_utils.forget_sr(self._session, sr_ref)
def _check_is_supported_driver_type(self, connection_info):
driver_type = connection_info['driver_volume_type']
if driver_type not in ['iscsi', 'xensm']:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
def _connect_to_volume_provider(self, connection_data, instance_name):
sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info(
connection_data, 'Disk-for:%s' % instance_name)
sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if not sr_ref:
# introduce SR because not already present
sr_ref = volume_utils.introduce_sr(
self._session, sr_uuid, sr_label, sr_params)
return (sr_ref, sr_uuid)
def _connect_hypervisor_to_volume(self, sr_ref, connection_data):
# connection_data can have credentials in it so make sure to scrub
# those before logging.
LOG.debug("Connect volume to hypervisor: %s",
strutils.mask_password(connection_data))
if 'vdi_uuid' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
vdi_uuid=connection_data['vdi_uuid'])
elif 'target_lun' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
target_lun=connection_data['target_lun'])
else:
# NOTE(sirp): This will introduce the first VDI in the SR
vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref)
return vdi_ref
def _attach_volume_to_vm(self, vdi_ref, vm_ref, instance_name, mountpoint,
hotplug):
LOG.debug('Attach_volume vdi: %(vdi_ref)s vm: %(vm_ref)s',
{'vdi_ref': vdi_ref, 'vm_ref': vm_ref})
dev_number = volume_utils.get_device_number(mountpoint)
# osvol is added to the vbd so we can spot which vbds are volumes
vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
dev_number, bootable=False,
osvol=True)
if hotplug:
# NOTE(johngarbutt) can only call VBD.plug on a running vm
running = not vm_utils.is_vm_shutdown(self._session, vm_ref)
if running:
LOG.debug("Plugging VBD: %s", vbd_ref)
self._session.VBD.plug(vbd_ref, vm_ref)
LOG.info('Dev %(dev_number)s attached to'
' instance %(instance_name)s',
{'instance_name': instance_name, 'dev_number': dev_number})
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s",
{'instance_name': instance_name, 'mountpoint': mountpoint})
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
device_number = volume_utils.get_device_number(mountpoint)
vbd_ref = volume_utils.find_vbd_by_number(self._session, vm_ref,
device_number)
if vbd_ref is None:
# NOTE(sirp): If we don't find the VBD then it must have been
# detached previously.
LOG.warning('Skipping detach because VBD for %s was not found',
instance_name)
else:
self._detach_vbds_and_srs(vm_ref, [vbd_ref])
LOG.info('Mountpoint %(mountpoint)s detached from instance'
' %(instance_name)s',
{'instance_name': instance_name,
'mountpoint': mountpoint})
def _detach_vbds_and_srs(self, vm_ref, vbd_refs):
is_vm_shutdown = vm_utils.is_vm_shutdown(self._session, vm_ref)
for vbd_ref in vbd_refs:
# find sr before we destroy the vbd
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
if not is_vm_shutdown:
vm_utils.unplug_vbd(self._session, vbd_ref, vm_ref)
vm_utils.destroy_vbd(self._session, vbd_ref)
# Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
def detach_all(self, vm_ref):
"""Detach all cinder volumes."""
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
if vbd_refs:
self._detach_vbds_and_srs(vm_ref, vbd_refs)
def _get_all_volume_vbd_refs(self, vm_ref):
"""Return VBD refs for all Nova/Cinder volumes."""
vbd_refs = self._session.VM.get_VBDs(vm_ref)
for vbd_ref in vbd_refs:
other_config = self._session.VBD.get_other_config(vbd_ref)
if other_config.get('osvol'):
yield vbd_ref
def find_bad_volumes(self, vm_ref):
"""Find any volumes with their connection severed.
Certain VM operations (e.g. `VM.start`, `VM.reboot`, etc.) will not
work when a VBD is present that points to a non-working volume. To work
around this, we scan for non-working volumes and detach them before
retrying a failed operation.
"""
bad_devices = []
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
for vbd_ref in vbd_refs:
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
try:
# TODO(sirp): bug1152401 This relies on a 120 sec timeout
# within XenServer, update this to fail-fast when this is fixed
# upstream
self._session.SR.scan(sr_ref)
except self._session.XenAPI.Failure as exc:
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
device = self._session.VBD.get_device(vbd_ref)
bad_devices.append('/dev/%s' % device)
else:
raise
return bad_devices
def safe_cleanup_from_vdis(self, vdi_refs):
# A helper method to detach volumes that are not associated with an
# instance
for vdi_ref in vdi_refs:
try:
sr_ref = volume_utils.find_sr_from_vdi(self._session, vdi_ref)
except exception.StorageError as exc:
LOG.debug(exc.format_message())
continue
try:
# Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
except Exception:
LOG.debug('Ignoring error while purging sr: %s', sr_ref,
exc_info=True)
|
{
"content_hash": "c665928b3fd1a8c800cf0eb08a01ac17",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 79,
"avg_line_length": 42.08056872037915,
"alnum_prop": 0.56751886473702,
"repo_name": "jianghuaw/nova",
"id": "9195be10a73e97796da6ae34eeb80ed89a74a426",
"size": "9538",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/virt/xenapi/volumeops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2 if not is_sequence else None,
steps_per_epoch=5 if is_sequence else None,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
training_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2) \
.repeat()
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_with_dataset_and_partial_batch(self):
model = self._get_model(input_shape=(2,))
def generator():
# Have a partial batch at the end.
for _ in range(9):
yield np.random.random(2), 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x=training, validation_data=validation)
# Make sure the value of val_ metrics are not zeros.
log_content = printed.contents()
val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content)
self.assertLen(val_loss, 1)
self.assertGreater(float(val_loss[0]), 0.0)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=30,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1]] * 16
train_label = [[0]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
class Bias(base_layer.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegexp(IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
def test_callback_params_samples(self):
x, y = np.ones((64, 3)), np.ones((64, 2))
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=2, input_dim=3)
model.compile('sgd', 'mse')
callback = keras.callbacks.Callback()
model.evaluate(x, y, callbacks=[callback])
self.assertEqual(callback.params['samples'], 64)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Check for V2 scalar summaries, which have a different PB
# structure.
if event.summary.value[
0].metadata.plugin_data.plugin_name == 'scalars':
container = result.scalars
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def test_custom_summary(self):
if not testing_utils.should_run_tf_function():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = summary_pb2.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with summary_ops_v2.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return summary_ops_v2.write(
tag=tag,
tensor=math_ops.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', math_ops.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
class MostRecentlyModifiedFileMatchingPatternTest(test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "757ebf247f13c78122972e4cae4b4b12",
"timestamp": "",
"source": "github",
"line_count": 1965,
"max_line_length": 80,
"avg_line_length": 34.250381679389314,
"alnum_prop": 0.6131615702356542,
"repo_name": "arborh/tensorflow",
"id": "46c11a148386a9cacb8047ffa042b7d2c7fcbda1",
"size": "67991",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/callbacks_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76730781"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299305"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38757009"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os,sys,sip,time
from datetime import datetime,timedelta
from qtpy.QtWidgets import QTreeWidgetItem,QMenu,QApplication,QAction,QMainWindow
from qtpy import QtGui,QtWidgets
from qtpy.QtCore import Qt,QUrl,QDate
from Graph import graphpage
from layout import Ui_MainWindow
from pandas import DataFrame as df
import pandas as pd
import tushare as ts
import cPickle
import numpy as np
import warnings
warnings.filterwarnings("ignore")
list1 = []
class MyUi(QMainWindow):
def __init__(self):
super(MyUi, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
cwd = os.getcwd()
cwd = str(cwd)
if os.path.isfile(cwd+"/time"):
with open("time","r") as outfile:#reads current time
history = cPickle.load(outfile)
if (datetime.now()-history).total_seconds()<43200: #measures if time elapse>12 hours
print("Less than 12 hours. Loading previously saved Pickle...")
#with open("time","w") as infile: #update time
#cPickle.dump(datetime.now(),infile)
else:
print("More than 12 hours. Updating Pickle...")
data = ts.get_industry_classified()
with open("class","w+") as outfile:
cPickle.dump(data,outfile)
now = datetime.now()
with open("time", "w+") as outfile: #update time
cPickle.dump(now, outfile)
else:
print("No Pickle found!") #If this is first time using tuchart in this directory
data = df()
data = ts.get_industry_classified()
with open('class', 'w+') as outfile: #records pickle
cPickle.dump(data, outfile)
now = datetime.now()
with open("time", "w+") as outfile:
cPickle.dump(now,outfile)
with open("class", "r") as infile: # reads current time
series = cPickle.load(infile)
#series = pd.read_json(cwd + "\\class.json")
#series = ts.get_industry_classified()
series = pd.DataFrame(series)
curdate = time.strftime("%Y/%m/%d") # gets current time to put into dateedit
curdateQ = QDate.fromString(curdate,"yyyy/MM/dd")
dateobj = datetime.strptime(curdate, "%Y/%m/%d")#converts to datetime object
past = dateobj - timedelta(days = 7) #minus a week to start date
pasttime = datetime.strftime(past, "%Y/%m/%d")
pastQ = QDate.fromString(pasttime,"yyyy/MM/dd") #convert to qtime so that widget accepts the values
pastL = dateobj - timedelta(days=30) # minus a month to start date
pasttimeL = datetime.strftime(pastL, "%Y/%m/%d")
pastQL = QDate.fromString(pasttimeL, "yyyy/MM/dd")
np_indexes = np.array([['sh', '上证指数', '大盘指数'],
['sz', '深证成指', '大盘指数'],
['hs300', '沪深300指数', '大盘指数'],
['sz50', '上证50', '大盘指数'],
['zxb', '中小板', '大盘指数'],
['cyb', '创业板', '大盘指数']])
indexes = df(data=np_indexes,
index=range(5000, 5006),
columns=["code", "name", "c_name"])
series = indexes.append(series)
list1_bfr = series["c_name"].tolist() #Get industry categories. Filters out redundant ones
list1 = list(set(list1_bfr))
list1.sort(key=list1_bfr.index)
#w = database()
#zsparent = QTreeWidgetItem(self.ui.treeWidget)
#zsparent.setText(0,"股票指数")
#zsnames =["上证指数-sh","深圳成指-sz","沪深300指数-hs300","上证50-"]
self.init_treeWidget(list1,series)
self.ui.treeWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget.customContextMenuRequested.connect(self.openMenu)
#self.ui.webView.setGeometry(QtCore.QRect(0, 30,1550, 861))
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "render.html")) #path to read html file
local_url = QUrl.fromLocalFile(file_path)
self.ui.webView.load(local_url)
#self.ui.commandLinkButton.setFixedSize(50, 50)
self.ui.search_btn.clicked.connect(lambda: self.search_comp(series))
self.ui.init_code_btn.clicked.connect(lambda: self.code_sort_tree(series))
self.ui.init_category_btn.clicked.connect(lambda: self.init_treeWidget(list1, series))
self.ui.commandLinkButton.clicked.connect(self.classify) #when the arrow button is clicked, trigger events
#self.ui.commandLinkButton.clicked.connect(lambda action: self.classify(action, self.ui.treewidget))
# QSizePolicy
try:
retain_size = self.ui.dateEdit_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.dateEdit_2.setSizePolicy(retain_size)
retain_size = self.ui.comboBox.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.comboBox.setSizePolicy(retain_size)
retain_size = self.ui.label_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.label_2.setSizePolicy(retain_size)
except AttributeError:
print("No PYQT5 Binding! Widgets might be deformed")
self.ui.dateEdit.setDate(pastQL)
self.ui.dateEdit_2.setDate(curdateQ)#populate widgets
self.ui.dateEdit.setCalendarPopup(True)
self.ui.dateEdit_2.setCalendarPopup(True)
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])
self.ui.treeWidget_2.setDragDropMode(self.ui.treeWidget_2.InternalMove)
self.ui.treeWidget_2.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget_2.customContextMenuRequested.connect(self.openWidgetMenu)
#self.ui.toolbutton.clicked.connect(lambda action: self.graphmerge(action, CombineKeyword))
self.ui.combobox.currentIndexChanged.connect(lambda: self.modifycombo(pastQL,pastQ))
def init_treeWidget(self, list1, series):
self.ui.treeWidget.clear()
for j in list1:
parent = QTreeWidgetItem(self.ui.treeWidget) #populate treewidget with names
parent.setText(0,j)
var = series.loc[series["c_name"] == j]
list2 = var["code"].tolist()
name = var["name"].tolist()
#var = showcollection(i) #Display database items
for idx,val in enumerate(list2):
child = QTreeWidgetItem(parent)
child.setText(0, name[idx]+"-"+str(val))
#for i in Drag:
#grandson = QTreeWidgetItem(child) #Commented out because increases program response time
#grandson.setText(0, i)
#self.ui.treeWidget.itemDoubleClicked.connect(self.onClickItem) #Display Collection items
def code_sort_tree(self, companies):
self.ui.treeWidget.clear()
sorted_comps = companies.sort_values(["code"])
code_list = sorted_comps["code"].tolist()
name_list = sorted_comps["name"].tolist()
shares_parent = QTreeWidgetItem(self.ui.treeWidget)
shares_parent.setText(0, "个股行情")
for idx, val in enumerate(code_list):
child = QTreeWidgetItem(shares_parent)
child.setText(0, name_list[idx] + "-" + str(val))
self.ui.treeWidget.expandToDepth(0)
def search_comp(self, companies):
self.ui.treeWidget.clear()
text = self.ui.search_lineEdit.text()
filtered_codes = companies[companies['code'].str.contains(text)]
filtered_names = companies[companies['name'].str.contains(text)]
filtered_comps = filtered_codes.append(filtered_names)
code_list = filtered_comps["code"].tolist()
name_list = filtered_comps["name"].tolist()
parent = QTreeWidgetItem(self.ui.treeWidget)
parent.setText(0, "搜索结果")
for idx, val in enumerate(code_list):
child = QTreeWidgetItem(parent)
child.setText(0, name_list[idx] + "-" + str(val))
self.ui.treeWidget.expandToDepth(0)
def modifycombo(self,pastQL,pastQ):
if self.ui.combobox.currentText()==u"复权": #if 复权 is selected, clear all existing queries to avoid value conflict
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.dateEdit.setDate(pastQL)
self.ui.interval_label.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["hfq", "qfq"])
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"K线":
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.dateEdit.setDate(pastQL)
self.ui.interval_label.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])#same as above
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"分笔数据":
self.ui.interval_label.hide()
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.dateEdit.setDate(pastQ)
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"历史分钟":
self.ui.interval_label.hide()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["1min","5min","15min","30min","60min"])
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.dateEdit.setDate(pastQ)
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"十大股东":
self.ui.interval_label.hide()
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.treeWidget_2.clear()
def openMenu(self,position):
indexes = self.ui.treeWidget.selectedIndexes()
item = self.ui.treeWidget.itemAt(position)
db_origin = ""
#if item.parent():
# db_origin = item.parent().text(0)
collec = str(item.text(0).encode("utf-8"))
if len(indexes) > 0:
level = 0
index = indexes[0]
while index.parent().isValid():
index = index.parent()
level = level + 1
menu = QMenu()
#print((collec, db_origin))
if level ==0:
pass
else:
#keyarray = GetKeys(collec, db_origin)
#if "Open" in keyarray:
if self.ui.combobox.currentText()==u"K线":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))#open up different menu with different kind of graphs
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
#menu.addAction(QAction("P_change", menu, checkable=True))
#menu.addAction(QAction("Turnover",menu,checkable=True))
if self.ui.combobox.currentText()==u"复权":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"分笔数据":
menu.addAction(QAction("分笔", menu, checkable=True))
if self.ui.combobox.currentText()==u"历史分钟":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"十大股东":
menu.addAction(QAction("季度饼图", menu, checkable=True))
#menu.addAction(QAction("持股比例", menu, checkable=True))
#for g in keyarray:
#menu.addAction(QAction(g, menu, checkable=True))
menu.triggered.connect(lambda action: self.methodSelected(action, collec))
menu.exec_(self.ui.treeWidget.viewport().mapToGlobal(position))
def methodSelected(self, action, collec):
# print(action.text()) #Choice
# if (self.ui.treewidget.count() == 5):
# self.ui.label.setText("Maximum number of queries")
# return
# self.ui.label.setText("")
Choice = action.text()
Stock = collec
# print(collec) #Stock Name
# print(db_origin) #DataBase name
# list1 = [self.tr(Stock+"-"+Choice+"-"+db_origin)]
# self.ui.treewidget.addItems(list1)
parent = QTreeWidgetItem(self.ui.treeWidget_2)
parent.setText(0, Stock.decode("utf-8") + "-" + Choice)
def openWidgetMenu(self,position):
indexes = self.ui.treeWidget_2.selectedIndexes()
item = self.ui.treeWidget_2.itemAt(position)
if item == None:
return
#item = self.ui.listWidget.itemAt(position)
if len(indexes) > 0:
menu = QMenu()
menu.addAction(QAction("Delete", menu,checkable = True))#This function is perhaps useless
#menu.triggered.connect(self.eraseItem)
item = self.ui.treeWidget_2.itemAt(position)
#collec = str(item.text())
menu.triggered.connect(lambda action: self.ListMethodSelected(action, item))
menu.exec_(self.ui.treeWidget_2.viewport().mapToGlobal(position))
def ListMethodSelected(self, action, item):
if action.text() == "Delete":
self.eraseItem()
if action.text() == "Combine":
global CombineKeyword
collec = str(item.text())
CombineKeyword.append(collec)#Useless function(maybe?)
list1 = [self.tr(collec)]
self.ui.listwidget.addItems(list1)
self.eraseItem()
def eraseItem(self):
for x in self.ui.treeWidget_2.selectedItems():#delete with write click menu
#item = self.ui.treewidget.takeItem(self.ui.treewidget.currentRow())
sip.delete(x)
#item.delete
def classify(self, folder):
startdate = self.ui.dateEdit.date()
startdate = startdate.toPyDate()
startdate = startdate.strftime("%Y/%m/%d")#converts date from dateedit to tushare readable date
enddate = self.ui.dateEdit_2.date()
enddate = enddate.toPyDate()
enddate = enddate.strftime("%Y/%m/%d")
option = self.ui.comboBox.currentText()
option = str(option)
#if (self.ui.treewidget) == 0:
#self.ui.label.setText("Need to select at least one query")
#return
root = self.ui.treeWidget_2.invisibleRootItem()# This is for iterating child items
child_count = root.childCount()
texts = []
if child_count==0:
return
for i in range(child_count):
item = root.child(i)
text = item.text(0)#with 3 part'stock_name'+'-'+'code'+'-'+action
texts.append(text)
labels = [k for k in texts]
#items = ([x.encode("utf-8") for x in labels])
width = self.ui.webView.width()#give width and height of user's screen so that graphs can be generated with dynamic size
height = self.ui.webView.height()
graphpage(labels, startdate,enddate,option,width, height)#labels:复权ork线or分笔 option:hfq, qfq or 15, 30, D, etc
self.ui.webView.reload()#refreshes webengine
self.ui.webView.repaint()
self.ui.webView.update()
def graphmerge(self, combineKeyword):
sth = ""
for i in combineKeyword:
if sth == "":
sth = sth + i
else :
sth = sth + "\n" + "&"+ "-"+i
list1 = sth
return sth
global CombineKeyword
CombineKeyword = []
self.ui.listwidget.clear() #combine stuff so that different graphs can be drawn together
app = QApplication(sys.argv)
w = MyUi()
w.show()
sys.exit(app.exec_())
|
{
"content_hash": "9260765587bfe5d60494cd62d0863813",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 128,
"avg_line_length": 45.847184986595174,
"alnum_prop": 0.5872755979182503,
"repo_name": "Seedarchangel/TuChart",
"id": "30ed89e67168349855b84e451319ff4c7d616199",
"size": "17357",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/Tuchart/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133370"
}
],
"symlink_target": ""
}
|
from girder import events
from girder.models.model_base import ValidationException
from . import rest, constants
def validateSettings(event):
key, val = event.info['key'], event.info['value']
if key == constants.PluginSettings.GOOGLE_CLIENT_ID:
if not val:
raise ValidationException(
'Google client ID must not be empty.', 'value')
event.preventDefault().stopPropagation()
elif key == constants.PluginSettings.GOOGLE_CLIENT_SECRET:
if not val:
raise ValidationException(
'Google client secret must not be empty.', 'value')
event.preventDefault().stopPropagation()
def load(info):
events.bind('model.setting.validate', 'oauth', validateSettings)
info['apiRoot'].oauth = rest.OAuth()
|
{
"content_hash": "9be39e0f790950ef4967ad5133598172",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 34.56521739130435,
"alnum_prop": 0.6666666666666666,
"repo_name": "chrismattmann/girder",
"id": "7bfe27d598598c389ccb33b6de7eefbd48d82edd",
"size": "1584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/oauth/server/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "36635"
},
{
"name": "CSS",
"bytes": "156740"
},
{
"name": "HTML",
"bytes": "161646"
},
{
"name": "JavaScript",
"bytes": "1358011"
},
{
"name": "Mako",
"bytes": "1483"
},
{
"name": "Python",
"bytes": "1202964"
},
{
"name": "Ruby",
"bytes": "9923"
},
{
"name": "Shell",
"bytes": "3298"
}
],
"symlink_target": ""
}
|
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 11332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
{
"content_hash": "b5029ac11a72fd58519e580a040d91af",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 84,
"avg_line_length": 25.416326530612245,
"alnum_prop": 0.6452545366950377,
"repo_name": "neo7530/wabcoin",
"id": "b5b039f9346c4f94c920bc2352e7a8076d42776e",
"size": "6435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/pyminer/pyminer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103378"
},
{
"name": "C++",
"bytes": "2534152"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "29384"
},
{
"name": "Objective-C",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69717"
},
{
"name": "Shell",
"bytes": "9702"
},
{
"name": "TypeScript",
"bytes": "5232379"
}
],
"symlink_target": ""
}
|
"""
Expose public exceptions & warnings
"""
from pandas._libs.tslibs import OutOfBoundsDatetime
class PerformanceWarning(Warning):
"""
Warning raised when there is a possible performance impact.
"""
class UnsupportedFunctionCall(ValueError):
"""
Exception raised when attempting to call a numpy function
on a pandas object, but that function is not supported by
the object e.g. ``np.cumsum(groupby_object)``.
"""
class UnsortedIndexError(KeyError):
"""
Error raised when attempting to get a slice of a MultiIndex,
and the index has not been lexsorted. Subclass of `KeyError`.
.. versionadded:: 0.20.0
"""
class ParserError(ValueError):
"""
Exception that is raised by an error encountered in parsing file contents.
This is a generic error raised for errors encountered when functions like
`read_csv` or `read_html` are parsing contents of a file.
See Also
--------
read_csv : Read CSV (comma-separated) file into a DataFrame.
read_html : Read HTML table into a DataFrame.
"""
class DtypeWarning(Warning):
"""
Warning raised when reading different dtypes in a column from a file.
Raised for a dtype incompatibility. This can happen whenever `read_csv`
or `read_table` encounter non-uniform dtypes in a column(s) of a given
CSV file.
See Also
--------
read_csv : Read CSV (comma-separated) file into a DataFrame.
read_table : Read general delimited file into a DataFrame.
Notes
-----
This warning is issued when dealing with larger files because the dtype
checking happens per chunk read.
Despite the warning, the CSV file is read with mixed types in a single
column which will be an object type. See the examples below to better
understand this issue.
Examples
--------
This example creates and reads a large CSV file with a column that contains
`int` and `str`.
>>> df = pd.DataFrame({'a': (['1'] * 100000 + ['X'] * 100000 +
... ['1'] * 100000),
... 'b': ['b'] * 300000})
>>> df.to_csv('test.csv', index=False)
>>> df2 = pd.read_csv('test.csv')
... # DtypeWarning: Columns (0) have mixed types
Important to notice that ``df2`` will contain both `str` and `int` for the
same input, '1'.
>>> df2.iloc[262140, 0]
'1'
>>> type(df2.iloc[262140, 0])
<class 'str'>
>>> df2.iloc[262150, 0]
1
>>> type(df2.iloc[262150, 0])
<class 'int'>
One way to solve this issue is using the `dtype` parameter in the
`read_csv` and `read_table` functions to explicit the conversion:
>>> df2 = pd.read_csv('test.csv', sep=',', dtype={'a': str})
No warning was issued.
>>> import os
>>> os.remove('test.csv')
"""
class EmptyDataError(ValueError):
"""
Exception that is thrown in `pd.read_csv` (by both the C and
Python engines) when empty data or header is encountered.
"""
class ParserWarning(Warning):
"""
Warning raised when reading a file that doesn't use the default 'c' parser.
Raised by `pd.read_csv` and `pd.read_table` when it is necessary to change
parsers, generally from the default 'c' parser to 'python'.
It happens due to a lack of support or functionality for parsing a
particular attribute of a CSV file with the requested engine.
Currently, 'c' unsupported options include the following parameters:
1. `sep` other than a single character (e.g. regex separators)
2. `skipfooter` higher than 0
3. `sep=None` with `delim_whitespace=False`
The warning can be avoided by adding `engine='python'` as a parameter in
`pd.read_csv` and `pd.read_table` methods.
See Also
--------
pd.read_csv : Read CSV (comma-separated) file into DataFrame.
pd.read_table : Read general delimited file into DataFrame.
Examples
--------
Using a `sep` in `pd.read_csv` other than a single character:
>>> import io
>>> csv = u'''a;b;c
... 1;1,8
... 1;2,1'''
>>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') # doctest: +SKIP
... # ParserWarning: Falling back to the 'python' engine...
Adding `engine='python'` to `pd.read_csv` removes the Warning:
>>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python')
"""
class MergeError(ValueError):
"""
Error raised when problems arise during merging due to problems
with input data. Subclass of `ValueError`.
"""
class NullFrequencyError(ValueError):
"""
Error raised when a null `freq` attribute is used in an operation
that needs a non-null frequency, particularly `DatetimeIndex.shift`,
`TimedeltaIndex.shift`, `PeriodIndex.shift`.
"""
class AccessorRegistrationWarning(Warning):
"""Warning for attribute conflicts in accessor registration."""
class AbstractMethodError(NotImplementedError):
"""Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance, methodtype='method'):
types = {'method', 'classmethod', 'staticmethod', 'property'}
if methodtype not in types:
msg = 'methodtype must be one of {}, got {} instead.'.format(
methodtype, types)
raise ValueError(msg)
self.methodtype = methodtype
self.class_instance = class_instance
def __str__(self):
if self.methodtype == 'classmethod':
name = self.class_instance.__name__
else:
name = self.class_instance.__class__.__name__
msg = "This {methodtype} must be defined in the concrete class {name}"
return msg.format(methodtype=self.methodtype, name=name)
|
{
"content_hash": "77fb07977afe55ee3a691d57a549ae23",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 30.647368421052633,
"alnum_prop": 0.6409067490984028,
"repo_name": "MJuddBooth/pandas",
"id": "7d5a7f1a99e41ed3b5c8dd24a4ce8a18fae68bb9",
"size": "5839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/errors/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406766"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14858932"
},
{
"name": "Shell",
"bytes": "29575"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
from OpenGLCffi.GLES2 import params
@params(api='gles2', prms=['index', 'divisor'])
def glVertexAttribDivisorNV(index, divisor):
pass
|
{
"content_hash": "4b9183d3d387952d93703d47a4cfb283",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 47,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.7518248175182481,
"repo_name": "cydenix/OpenGLCffi",
"id": "f6fb81bbdcaddcca43fd34c9bb9b812230b0ff68",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenGLCffi/GLES2/EXT/NV/instanced_arrays.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "C++",
"bytes": "188"
},
{
"name": "Python",
"bytes": "1853617"
}
],
"symlink_target": ""
}
|
from setuptools import setup
fn = 'lino/setup_info.py'
exec(compile(open(fn, "rb").read(), fn, 'exec'))
# above line is equivalent to the line below, except that it works
# also in Python 3:
# execfile(fn)
if __name__ == '__main__':
setup(**SETUP_INFO)
|
{
"content_hash": "9b85dfb1311c1751186a3b4c8ff8928e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 66,
"avg_line_length": 25.9,
"alnum_prop": 0.6563706563706564,
"repo_name": "khchine5/lino",
"id": "182070960d62b008df5c09c2f51b42c56fffcb96",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "704"
},
{
"name": "CSS",
"bytes": "1372581"
},
{
"name": "Emacs Lisp",
"bytes": "277895"
},
{
"name": "HTML",
"bytes": "1146746"
},
{
"name": "Hack",
"bytes": "3416"
},
{
"name": "JavaScript",
"bytes": "1212734"
},
{
"name": "PHP",
"bytes": "56272"
},
{
"name": "Python",
"bytes": "2484371"
},
{
"name": "Shell",
"bytes": "5752"
}
],
"symlink_target": ""
}
|
import base
class Action(base.BaseV30):
def write_memory(self, **kwargs):
payload = {
"memory": {
"primary": True
}
}
self._post("/write/memory/", payload, **kwargs)
|
{
"content_hash": "57a9a1db805d76459def1298b239c53f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 19.75,
"alnum_prop": 0.47257383966244726,
"repo_name": "dkiser/acos-client",
"id": "727e224c23b5027495b9bc3ccd7f6bd77c9c5454",
"size": "860",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "acos_client/v30/action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "191687"
}
],
"symlink_target": ""
}
|
from google.cloud import dialogflow_v2beta1
def sample_get_version():
# Create a client
client = dialogflow_v2beta1.VersionsClient()
# Initialize request argument(s)
request = dialogflow_v2beta1.GetVersionRequest(
name="name_value",
)
# Make the request
response = client.get_version(request=request)
# Handle the response
print(response)
# [END dialogflow_v2beta1_generated_Versions_GetVersion_sync]
|
{
"content_hash": "d742a4651d31e2eefd903fdedc84aba4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 61,
"avg_line_length": 23.736842105263158,
"alnum_prop": 0.7095343680709535,
"repo_name": "googleapis/python-dialogflow",
"id": "aa77130f379c8bcffeda7a357ecaf65f39fab93b",
"size": "1836",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v2beta1_generated_versions_get_version_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
set_a = set(
map(int, input().split())
)
set_count = int(input())
result = True
for _ in range(set_count):
current_set = set(
map(int, input().split())
)
if not set_a >= current_set:
result = False
break
print(result)
|
{
"content_hash": "e45ee28e12d35e984302239c0f5130d4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 33,
"avg_line_length": 14.833333333333334,
"alnum_prop": 0.5318352059925093,
"repo_name": "avenet/hackerrank",
"id": "41e372c503f45bc8cb1c71e9223a8b360e4ddc42",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sets/check_strict_superset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "944"
},
{
"name": "Python",
"bytes": "118800"
}
],
"symlink_target": ""
}
|
import sys
from networking_mlnx.plugins.mlnx.agent import mlnx_eswitch_neutron_agent
from oslo_config import cfg
from neutron.i18n import _LE, _LI
from neutron.common import config as common_config
from neutron.common import utils
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.agent import config # noqa
LOG = logging.getLogger(__name__)
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
interface_mappings = utils.parse_mappings(
cfg.CONF.ESWITCH.physical_interface_mappings)
except ValueError as e:
LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
"Agent terminated!"), e)
sys.exit(1)
LOG.info(_LI("Interface mappings: %s"), interface_mappings)
try:
agent = mlnx_eswitch_neutron_agent.MlnxEswitchNeutronAgent(
interface_mappings)
except Exception as e:
LOG.error(_LE("Failed on Agent initialisation : %s. "
"Agent terminated!"), e)
sys.exit(1)
# Start everything.
LOG.info(_LI("Agent initialised successfully, now running... "))
agent.run()
sys.exit(0)
if __name__ == '__main__':
main()
|
{
"content_hash": "f035bed8f24668c0f85ce7a5e29d9f9a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 28.88372093023256,
"alnum_prop": 0.6537842190016103,
"repo_name": "cloudbase/neutron-virtualbox",
"id": "19c610640fe6d604c096d7be2928cefeecc6d769",
"size": "1835",
"binary": false,
"copies": "2",
"ref": "refs/heads/virtualbox_agent",
"path": "neutron/plugins/mlnx/agent/eswitch_neutron_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "8448838"
},
{
"name": "Shell",
"bytes": "12510"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^$', 'what_profile.views.profile'),
url(r'^part/buffer_up_down_data$', 'what_profile.parts.buffer_up_down_data'),
url(r'^part/profile_history', 'what_profile.parts.profile_history'),
)
|
{
"content_hash": "bb153117f4e88ebf91746253d86a7bc4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 81,
"avg_line_length": 34.875,
"alnum_prop": 0.6774193548387096,
"repo_name": "MADindustries/WhatManager2",
"id": "ef83267230b365ffd247166465f7c51a47086e51",
"size": "279",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "what_profile/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202636"
},
{
"name": "HTML",
"bytes": "139705"
},
{
"name": "JavaScript",
"bytes": "632927"
},
{
"name": "Python",
"bytes": "508225"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
}
|
import re
# Python-LDAP
import ldap
# Django
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
__all__ = ['validate_ldap_dn', 'validate_ldap_dn_with_user',
'validate_ldap_bind_dn', 'validate_ldap_filter',
'validate_ldap_filter_with_user',
'validate_tacacsplus_disallow_nonascii']
def validate_ldap_dn(value, with_user=False):
if with_user:
if '%(user)s' not in value:
raise ValidationError(_('DN must include "%%(user)s" placeholder for username: %s') % value)
dn_value = value.replace('%(user)s', 'USER')
else:
dn_value = value
try:
ldap.dn.str2dn(dn_value.encode('utf-8'))
except ldap.DECODING_ERROR:
raise ValidationError(_('Invalid DN: %s') % value)
def validate_ldap_dn_with_user(value):
validate_ldap_dn(value, with_user=True)
def validate_ldap_bind_dn(value):
if not re.match(r'^[A-Za-z][A-Za-z0-9._-]*?\\[A-Za-z0-9 ._-]+?$', value.strip()) and \
not re.match(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', value.strip()):
validate_ldap_dn(value)
def validate_ldap_filter(value, with_user=False):
value = value.strip()
if not value:
return
if with_user:
if '%(user)s' not in value:
raise ValidationError(_('DN must include "%%(user)s" placeholder for username: %s') % value)
dn_value = value.replace('%(user)s', 'USER')
else:
dn_value = value
if re.match(r'^\([A-Za-z0-9-]+?=[^()]+?\)$', dn_value):
return
elif re.match(r'^\([&|!]\(.*?\)\)$', dn_value):
try:
map(validate_ldap_filter, ['(%s)' % x for x in dn_value[3:-2].split(')(')])
return
except ValidationError:
pass
raise ValidationError(_('Invalid filter: %s') % value)
def validate_ldap_filter_with_user(value):
validate_ldap_filter(value, with_user=True)
def validate_tacacsplus_disallow_nonascii(value):
try:
value.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
raise ValidationError(_('TACACS+ secret does not allow non-ascii characters'))
|
{
"content_hash": "c9ce77d4c77ae61b71d274bb58206e9b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 104,
"avg_line_length": 32.23529411764706,
"alnum_prop": 0.5994525547445255,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "7e89958236d1c891a8a37c8cdc16bb03308e2c31",
"size": "2201",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/sso/validators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Determine if your browser is detectable by anti-bot services.
Some sites use scripts to detect Selenium and then block you.
To evade detection, add --uc as a pytest command-line option.
"""
from seleniumbase import BaseCase
class UndetectedTest(BaseCase):
def test_browser_is_undetected(self):
self.open("https://nowsecure.nl")
try:
self.assert_text("OH YEAH, you passed!", "h1", timeout=6.75)
self.post_message("Selenium wasn't detected!", duration=1.6)
self._print("\n Success! Website did not detect Selenium! ")
except Exception:
self.fail('Selenium was detected! Try using: "pytest --uc"')
|
{
"content_hash": "8f3b1910c5f46e5c8e7a570b28299c59",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 40.8235294117647,
"alnum_prop": 0.6498559077809798,
"repo_name": "seleniumbase/SeleniumBase",
"id": "00b0a6ddd612c2bbb4089984df045cc0d66bce23",
"size": "694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/verify_undetected.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1503"
},
{
"name": "Dockerfile",
"bytes": "3823"
},
{
"name": "Gherkin",
"bytes": "5076"
},
{
"name": "HTML",
"bytes": "10180"
},
{
"name": "JavaScript",
"bytes": "1338"
},
{
"name": "Python",
"bytes": "2298163"
},
{
"name": "Shell",
"bytes": "13488"
}
],
"symlink_target": ""
}
|
""" Provides adding dns_evironment functionality """
from aquilon.utils import validate_nlist_key
from aquilon.worker.broker import BrokerCommand
from aquilon.aqdb.model import DnsEnvironment
class CommandAddDnsEnvironment(BrokerCommand):
required_parameters = ["dns_environment"]
def render(self, session, dns_environment, comments, **_):
validate_nlist_key("DNS environment", dns_environment)
DnsEnvironment.get_unique(session, dns_environment, preclude=True)
db_dnsenv = DnsEnvironment(name=dns_environment, comments=comments)
session.add(db_dnsenv)
session.flush()
return
|
{
"content_hash": "926bfe80cea2608991dc017d5bf53bfc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 33.578947368421055,
"alnum_prop": 0.7335423197492164,
"repo_name": "guillaume-philippon/aquilon",
"id": "be16801378b2402e4410e2ec271b1c0682f2463e",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/aquilon/worker/commands/add_dns_environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
}
|
from . import chart
from warnings import warn
class ChartScatter(chart.Chart):
"""
A class for writing the Excel XLSX Scatter charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartScatter, self).__init__()
if options is None:
options = {}
self.subtype = options.get('subtype')
if not self.subtype:
self.subtype = 'marker_only'
self.cross_between = 'midCat'
self.horiz_val_axis = 0
self.val_axis_position = 'b'
self.smooth_allowed = True
self.requires_category = True
# Set the available data label positions for this chart type.
self.label_position_default = 'right'
self.label_positions = {
'center': 'ctr',
'right': 'r',
'left': 'l',
'above': 't',
'below': 'b',
# For backward compatibility.
'top': 't',
'bottom': 'b'}
def combine(self, chart=None):
"""
Create a combination chart with a secondary chart.
Note: Override parent method to add a warning.
Args:
chart: The secondary chart to combine with the primary chart.
Returns:
Nothing.
"""
if chart is None:
return
warn('Combined chart not currently supported with scatter chart '
'as the primary chart')
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:scatterChart element.
self._write_scatter_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_scatter_chart(self, args):
# Write the <c:scatterChart> element.
if args['primary_axes']:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not len(series):
return
style = 'lineMarker'
subtype = self.subtype
# Set the user defined chart subtype.
if subtype == 'marker_only':
style = 'lineMarker'
if subtype == 'straight_with_markers':
style = 'lineMarker'
if subtype == 'straight':
style = 'lineMarker'
self.default_marker = {'type': 'none'}
if subtype == 'smooth_with_markers':
style = 'smoothMarker'
if subtype == 'smooth':
style = 'smoothMarker'
self.default_marker = {'type': 'none'}
# Add default formatting to the series data.
self._modify_series_formatting()
self._xml_start_tag('c:scatterChart')
# Write the c:scatterStyle element.
self._write_scatter_style(style)
# Write the series elements.
for data in series:
self._write_ser(data)
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag('c:scatterChart')
def _write_ser(self, series):
# Over-ridden to write c:xVal/c:yVal instead of c:cat/c:val elements.
# Write the <c:ser> element.
index = self.series_index
self.series_index += 1
self._xml_start_tag('c:ser')
# Write the c:idx element.
self._write_idx(index)
# Write the c:order element.
self._write_order(index)
# Write the series name.
self._write_series_name(series)
# Write the c:spPr element.
self._write_sp_pr(series)
# Write the c:marker element.
self._write_marker(series.get('marker'))
# Write the c:dPt element.
self._write_d_pt(series.get('points'))
# Write the c:dLbls element.
self._write_d_lbls(series.get('labels'))
# Write the c:trendline element.
self._write_trendline(series.get('trendline'))
# Write the c:errBars element.
self._write_error_bars(series.get('error_bars'))
# Write the c:xVal element.
self._write_x_val(series)
# Write the c:yVal element.
self._write_y_val(series)
# Write the c:smooth element.
if 'smooth' in self.subtype and series['smooth'] is None:
# Default is on for smooth scatter charts.
self._write_c_smooth(True)
else:
self._write_c_smooth(series['smooth'])
self._xml_end_tag('c:ser')
def _write_plot_area(self):
# Over-ridden to have 2 valAx elements for scatter charts instead
# of catAx/valAx.
#
# Write the <c:plotArea> element.
self._xml_start_tag('c:plotArea')
# Write the c:layout element.
self._write_layout(self.plotarea.get('layout'), 'plot')
# Write the subclass chart elements for primary and secondary axes.
self._write_chart_type({'primary_axes': 1})
self._write_chart_type({'primary_axes': 0})
# Write c:catAx and c:valAx elements for series using primary axes.
self._write_cat_val_axis({'x_axis': self.x_axis,
'y_axis': self.y_axis,
'axis_ids': self.axis_ids,
'position': 'b',
})
tmp = self.horiz_val_axis
self.horiz_val_axis = 1
self._write_val_axis({'x_axis': self.x_axis,
'y_axis': self.y_axis,
'axis_ids': self.axis_ids,
'position': 'l',
})
self.horiz_val_axis = tmp
# Write c:valAx and c:catAx elements for series using secondary axes
self._write_cat_val_axis({'x_axis': self.x2_axis,
'y_axis': self.y2_axis,
'axis_ids': self.axis2_ids,
'position': 'b',
})
self.horiz_val_axis = 1
self._write_val_axis({'x_axis': self.x2_axis,
'y_axis': self.y2_axis,
'axis_ids': self.axis2_ids,
'position': 'l',
})
# Write the c:spPr element for the plotarea formatting.
self._write_sp_pr(self.plotarea)
self._xml_end_tag('c:plotArea')
def _write_x_val(self, series):
# Write the <c:xVal> element.
formula = series.get('categories')
data_id = series.get('cat_data_id')
data = self.formula_data[data_id]
self._xml_start_tag('c:xVal')
# Check the type of cached data.
data_type = self._get_data_type(data)
# TODO. Can a scatter plot have non-numeric data.
if data_type == 'str':
# Write the c:numRef element.
self._write_str_ref(formula, data, data_type)
else:
# Write the c:numRef element.
self._write_num_ref(formula, data, data_type)
self._xml_end_tag('c:xVal')
def _write_y_val(self, series):
# Write the <c:yVal> element.
formula = series.get('values')
data_id = series.get('val_data_id')
data = self.formula_data[data_id]
self._xml_start_tag('c:yVal')
# Unlike Cat axes data should only be numeric.
# Write the c:numRef element.
self._write_num_ref(formula, data, 'num')
self._xml_end_tag('c:yVal')
def _write_scatter_style(self, val):
# Write the <c:scatterStyle> element.
attributes = [('val', val)]
self._xml_empty_tag('c:scatterStyle', attributes)
def _modify_series_formatting(self):
# Add default formatting to the series data unless it has already been
# specified by the user.
subtype = self.subtype
# The default scatter style "markers only" requires a line type.
if subtype == 'marker_only':
# Go through each series and define default values.
for series in self.series:
# Set a line type unless there is already a user defined type.
if not series['line']['defined']:
series['line'] = {'width': 2.25,
'none': 1,
'defined': 1,
}
def _write_d_pt_point(self, index, point):
# Write an individual <c:dPt> element. Override the parent method to
# add markers.
self._xml_start_tag('c:dPt')
# Write the c:idx element.
self._write_idx(index)
self._xml_start_tag('c:marker')
# Write the c:spPr element.
self._write_sp_pr(point)
self._xml_end_tag('c:marker')
self._xml_end_tag('c:dPt')
|
{
"content_hash": "fa65b389dd0404e8a28a85c462c41c1e",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 79,
"avg_line_length": 30.134615384615383,
"alnum_prop": 0.4969155498830036,
"repo_name": "jmcnamara/XlsxWriter",
"id": "941a886bd6b600251e579b2ca11ceab6f143eff9",
"size": "9654",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/chart_scatter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
}
|
from functools import wraps
from contextlib import AbstractContextManager, ExitStack, contextmanager
from abc import ABC, abstractmethod
from enum import Enum
from operator import *
from funklib.core.prelude import flip, const
from functools import wraps,partial
from collections import namedtuple
def from_context(cm):
"""Extract the value produced by a context manager"""
with cm as x:
return x
class MatchFailure(Exception):
"""Exception raised in case of a pattern match failure"""
def __init__(self, matched=None, pattern=None):
self.matched = matched
self.pattern = pattern
def __repr__(self):
return "MatchFailure(pattern={}, matched={})".format(
self.matched, self.pattern
)
def __str__(self):
return "MatchFailure: value {!r} does not match pattern {!r}".format(self.matched, self.pattern)
class MatchSuccess(Exception):
"""Exception raised in case of match success"""
pass
class matchstatus(Enum):
pending = 0
failed = 1
succeeded = 2
class match:
def __init__(self, value):
self._value = value
# self._tried = 0
# self._actives = []
# self._status = matchstatus.pending
@property
def value(self):
return self._value
@contextmanager
def subcases(self):
try:
with match(self._value) as m:
yield m
raise MatchSuccess
except MatchFailure:
return
@contextmanager
def case(self, pattern=None):
"""Creates a case context.
If an extractor is provided, binds an extractor context to the 'as' clause.
Silence MatchFailure exceptions and raise MatchSuccess if all goes okay."""
try:
if pattern:
yield pattern.of(self._value)
else:
yield None
raise MatchSuccess
except MatchFailure:
return
@contextmanager
def ignore(self):
"""Equivalent to self.case(ignore),
introduce a context without binding anything."""
yield None
raise MatchSuccess
def __enter__(self):
return self
def __exit__(self, t, ex, tb):
if t is MatchSuccess:
return True
if ex is None:
raise MatchFailure("No pattern matches value {!r}".format(self._value))
def __repr__(self):
return "Match({})".format(self._value)
class Pattern(ABC):
def __call__(self, x):
return self.__match__(x)
@abstractmethod
def __match__(self, x):
"""Try and match its argument
and return a value or a tuple of values, or raise MatchFailure"""
pass
@contextmanager
def of(self, x):
yield self.__match__(x)
class ClassPattern(ABC):
@classmethod
@abstractmethod
def __match__(cls, x):
"""Try and match its argument
and return a value or a tuple of values, or raise MatchFailure"""
pass
@classmethod
@contextmanager
def of(cls, x):
yield cls.__match__(x)
class StaticPattern(ABC):
@staticmethod
@abstractmethod
def __match__(x):
"""Try and match its argument
and return a value or a tuple of values, or raise MatchFailure"""
pass
@classmethod
@contextmanager
def of(cls, x):
yield cls.__match__(x)
@contextmanager
def match_except(*exceptions):
"""Context manager that transforms
specified exceptions in MatchFailure exceptions
:param exceptions: exceptions to be transformed into a match failure"""
try:
yield None
except exceptions as ex:
raise MatchFailure() from ex
class Key(Pattern):
"""Pattern that match a gettable object that contains a given key,
exposing the value associated with that key"""
def __init__(self, key):
self.key = key
@match_except(KeyError, TypeError)
def __match__(self, x):
return x[self.key]
class Keys(Pattern):
"""Pattern that match a mapping which includes certain keys"""
def __init__(self, keys):
self.keys = keys
@match_except(KeyError, TypeError)
def __match__(self, x):
return tuple(x[k] for k in self.keys)
class Attr(Pattern):
"""Pattern that match an object which has a specified attribute,
exposing that attribute"""
def __init__(self, attribute):
self.attribute = attribute
@match_except(AttributeError)
def __match__(self, x):
return getattr(x, self.attribute)
class Attrs(Pattern):
"""Pattern that match an object which has all specified attributes,
exposing all those attributes"""
def __init__(self, *attributes):
self.attributes = attributes
@match_except(AttributeError)
def __match__(self, x):
return tuple(getattr(x, attr) for attr in self.attributes)
class Any(Pattern):
"""Pattern that match any value"""
def __match__(self, x):
return x
def pattern(Pattern):
def __init__(self, func):
self.pattern = func
def __match__(self, x):
return self.pattern(x)
_ignore = const(None)
ignore = pattern(_ignore)
def ismatch(value, pattern):
"""Evaluate a match pattern, return True if match else False"""
try:
pattern.__match__(value)
return True
except MatchFailure:
return False
class Symbol(str): pass
_NoDefault = Symbol("NoDefault")
def getmatch(value, pattern, default=_NoDefault):
try:
return pattern.__match__(value)
except MatchFailure:
if default is not _NoDefault:
return default
else:
raise
def predicate_method(f):
@wraps(f)
def wrapper(self, arg):
if f(self, arg):
return arg
else:
raise MatchFailure()
return wrapper
def predicate_classmethod(f):
@wraps(f)
def wrapper(cls, arg):
if f(cls, arg):
return arg
else:
raise MatchFailure
return wrapper
def predicate_function(f):
@wraps(f)
def wrapper(arg):
if f(arg):
return arg
else:
raise MatchFailure
return wrapper
class Predicate(Pattern):
"""Base class for 'predicate' objects implementing the match protocol"""
def __init__(self, predicate):
self.predicate = predicate
def __match__(self, x):
if self.predicate(x):
return x
else:
raise MatchFailure(matched=x, pattern=self)
def __repr__(self):
return "Predicate({})".format(self.predicate)
class Is(Predicate):
def __init__(self, identity):
self.identity = identity
self.predicate = partial(is_, identity)
def __match__(self, x):
if x is self.identity:
return x
else:
raise MatchFailure(matched=x, pattern=self)
class Equal(Predicate):
def __init__(self, value):
self.equal = value
self.predicate = partial(eq, value)
@predicate_method
def __match__(self, x):
if x == self.equal:
return x
else:
raise MatchFailure(matched=x, pattern=self)
class In(Predicate):
def __init__(self, iterable):
self.container = iterable
self.predicate = partial(contains, iterable)
@predicate_method
def __match__(self, x):
if x in self.container:
return x
else:
raise MatchFailure(matched=x, pattern=self)
class Compose(Pattern):
"""
Pattern combiner that applies patterns in chain,
matching the composition of all patterns,
and failing if any of them fails
"""
def __init__(self, *patterns):
self.patterns = patterns
def __match__(self, x):
m = x
for p in reversed(self.patterns):
m = getmatch(m, p)
return m
class AsPredicate(Pattern):
def __init__(self, pattern):
self.pattern = pattern
def __match__(self, x):
if ismatch(x, self.pattern):
return x
else:
raise MatchFailure(matched=x, pattern=self.pattern)
WithMatch = namedtuple("WithMatch", ("value", "match"))
class With(Pattern):
def __init__(self, pattern):
self.pattern = pattern
def __match__(self, x):
m = getmatch(x, self.pattern)
return WithMatch(value=x, match=m)
class All(Predicate):
"""Predicate combiner that match a value which is matched by all subpredicates"""
def __init__(self, *predicates):
def _all(x):
return all(map(partial(ismatch, x), predicates))
self.predicates = predicates
self.predicate = _all
@predicate_method
def __match__(self, x):
return all(map(partial(ismatch, x), self.predicates))
class AnyOf(Predicate):
"""Predicates combiner that match a value which is matched by any(at least one) subpredicates"""
def __init__(self, *predicates):
def _any(x):
return any(map(partial(ismatch, x), predicates))
self.predicates = predicates
self.predicate = _any
@predicate_method
def __match__(self, x):
return any(map(partial(ismatch, x), self.predicates))
class OneOf(Predicate):
"""Predicates combiner that match a value which is matched by one and only one subpredicate"""
def __init__(self, *predicates):
def _oneof(x):
return len(tuple(map(partial(ismatch, x), self.predicates))) == 1
self.predicates = predicates
self.predicate = _oneof
@predicate_method
def __match__(self, x):
return len(tuple((partial(ismatch, x), self.predicates))) == 1
class Type(Predicate):
"""Predicate that match a value by its type"""
def __init__(self, t):
self.type = t
self.predicate = partial(flip(isinstance), t)
@predicate_method
def __match__(self, x):
return isinstance(x, self.type)
class Many(Pattern):
def __init__(self, patterns):
self.patterns = patterns
def __match__(self, x):
return tuple(map(partial(getmatch, x), self.patterns))
|
{
"content_hash": "51dab097b841abf8e23e6327bc6c07cb",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 104,
"avg_line_length": 24.810551558752998,
"alnum_prop": 0.5920162381596752,
"repo_name": "DrPyser/python-multimethods",
"id": "0afc4328c2ebb01d3c2e93b6d702ee1438d19e08",
"size": "10346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "patmat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16102"
}
],
"symlink_target": ""
}
|
from numpy import asarray, kron, zeros
from optimix import Function, Vector
from glimix_core._util import unvec, vec
class KronMean(Function):
"""
Kronecker mean function, (A⊗X)vec(B).
Let
- n be the number of samples;
- p the number of traits; and
- c the number of covariates.
The mathematical representation is
𝐦 = (A⊗X)vec(B)
where A is a p×p trait design matrix of fixed effects and X is a n×c sample design
matrix of fixed effects. B is a c×p matrix of fixed-effect sizes.
"""
def __init__(self, A, X):
"""
Constructor.
Parameters
----------
A : array_like
p×p array.
X : array_like
n×c array.
"""
self._A = asarray(A, float)
self._X = asarray(X, float)
vecB = zeros((X.shape[1], A.shape[0])).ravel()
self._vecB = Vector(vecB)
self._nparams = vecB.size
Function.__init__(self, "KronMean", vecB=self._vecB)
@property
def nparams(self):
"""
Number of parameters.
"""
return self._nparams
@property
def A(self):
"""
Matrix A.
"""
return self._A
@property
def X(self):
"""
Matrix X.
"""
return self._X
@property
def AX(self):
"""
A ⊗ X.
"""
return kron(self.A, self.X)
def value(self):
"""
Kronecker mean function.
Returns
-------
𝐦 : ndarray
(A⊗X)vec(B).
"""
return self.AX @ self._vecB.value
def gradient(self):
"""
Gradient of the linear mean function.
Returns
-------
vecB : ndarray
Derivative of M over vec(B).
"""
return {"vecB": self.AX}
@property
def B(self):
"""
Effect-sizes parameter, B.
"""
return unvec(self._vecB.value, (self.X.shape[1], self.A.shape[0]))
@B.setter
def B(self, v):
self._vecB.value = vec(asarray(v, float))
def __str__(self):
tname = type(self).__name__
msg = "{}(A=..., X=...)".format(tname)
if self.name is not None:
msg += ": {}".format(self.name)
msg += "\n"
mat = format(self.B)
msg += " B: " + "\n ".join(mat.split("\n"))
return msg
|
{
"content_hash": "dc0e28db9a9b2c85211b322ff63e4a41",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 86,
"avg_line_length": 21.517857142857142,
"alnum_prop": 0.4817427385892116,
"repo_name": "glimix/limix-inference",
"id": "828df95ab76ce6d9811ba4f73e256551d0a60ad4",
"size": "2429",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "glimix_core/mean/_kron.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89299"
},
{
"name": "Shell",
"bytes": "863"
}
],
"symlink_target": ""
}
|
from supplychainpy.reporting.blueprints.bot.views import bot
|
{
"content_hash": "2d589b81cb127d0078ebc24d996dbb4d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 60,
"avg_line_length": 21,
"alnum_prop": 0.8412698412698413,
"repo_name": "KevinFasusi/supplychainpy",
"id": "5acaa268b2f27c4c1c3bdcfa5fadd486ea1774a7",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supplychainpy/reporting/blueprints/bot/templates/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13286"
},
{
"name": "Gherkin",
"bytes": "491"
},
{
"name": "HTML",
"bytes": "61502"
},
{
"name": "JavaScript",
"bytes": "691701"
},
{
"name": "Python",
"bytes": "624591"
},
{
"name": "Shell",
"bytes": "1456"
}
],
"symlink_target": ""
}
|
import azure.common.credentials
import azure.mgmt.network
import azure.mgmt.resource
subscription_id = 'get_me'
group = 'get_me'
client_id = 'get_me',
secret = 'get_me',
tenant = 'get_me'
# Connect to azure
credentials = azure.common.credentials.ServicePrincipalCredentials(client_id, secret, tenant)
rmc = azure.mgmt.resource.ResourceManagementClient(credentials, subscription_id)
nmc = azure.mgmt.network.NetworkManagementClient(credentials, subscription_id)
# Get the ips of the nics we care about, grouped by instance kind.
buckets = {
'masterNodeNic': [],
'slavePrivateNic': [],
'slavePublicNic': []
}
def try_get_bucket(name):
for bucket_name, bucket in buckets.items():
if name.startswith(bucket_name):
return bucket
return None
def lookup_ip(name):
nic = nmc.network_interfaces.get(group, name)
all_ips = []
for config in nic.ip_configurations:
all_ips.append(config.private_ip_address)
assert len(all_ips) == 1
return all_ips[0]
resources_we_want = list()
for resource in rmc.resource_groups.list_resources(group):
bucket = try_get_bucket(resource.name)
if bucket is None:
continue
bucket.append(lookup_ip(resource.name))
print(buckets)
|
{
"content_hash": "844e489961cb843fa073162457421845",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 93,
"avg_line_length": 25.428571428571427,
"alnum_prop": 0.7046548956661316,
"repo_name": "movicha/dcos",
"id": "03a8ec9d9e608224e99239cae4041c81970d2a06",
"size": "1246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_util/azure_get_ip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1911"
},
{
"name": "Python",
"bytes": "941213"
},
{
"name": "Shell",
"bytes": "55412"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2012 - 2016, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from ..models import *
from webapp import db
import requests
import datetime
import json
def sync():
return
print u'Fahrradständer in Moers'
base_url = 'https://www.offenesdatenportal.de/dataset/a7c2b36e-6719-47ef-8845-758928fc5b30/resource/46686088-b524-4552-a43e-5ad535988339/download/fahrradstander.geojson'
r = requests.get(base_url, verify=False)
containers = json.loads(r.text.encode('utf-8'))
sharing_provider = SharingProvider.query.filter_by(slug='fahrradstaender-moers')
if sharing_provider.count():
sharing_provider = sharing_provider.first()
else:
sharing_provider = SharingProvider()
sharing_provider.created = datetime.datetime.now()
sharing_provider.slug = 'fahrradstaender-moers'
sharing_provider.active = 1
sharing_provider.updated = datetime.datetime.now()
sharing_provider.name = u'Fahrradständer in Moers'
db.session.add(sharing_provider)
db.session.commit()
for raw_sharing_station in containers['features']:
external_id = str(raw_sharing_station['geometry']['coordinates'][0][1]) + '-' + str(raw_sharing_station['geometry']['coordinates'][0][0])
sharing_station = SharingStation.query.filter_by(external_id=external_id).filter_by(sharing_provider_id=sharing_provider.id)
if sharing_station.count():
sharing_station = sharing_station.first()
else:
sharing_station = SharingStation()
sharing_station.created = datetime.datetime.now()
sharing_station.external_id = external_id
sharing_station.active = 1
sharing_station.updated = datetime.datetime.now()
sharing_station.lat = raw_sharing_station['geometry']['coordinates'][0][1]
sharing_station.lon = raw_sharing_station['geometry']['coordinates'][0][0]
sharing_station.name = raw_sharing_station['properties']['LANGNAME']
sharing_station.station_type = 6
sharing_station.sharing_provider_id = sharing_provider.id
db.session.add(sharing_station)
db.session.commit()
|
{
"content_hash": "fb79cfe7363a6d7b0264ddfee7c2d195",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 755,
"avg_line_length": 64.01851851851852,
"alnum_prop": 0.7674284061324849,
"repo_name": "ruhrmobil-E/mobilitaet-finden",
"id": "05b568312c0958e3bd7a791797289f0f12b52607",
"size": "3478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/sync/fahrradstaender_moers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4165"
},
{
"name": "HTML",
"bytes": "21972"
},
{
"name": "JavaScript",
"bytes": "23732"
},
{
"name": "Python",
"bytes": "70993"
},
{
"name": "Shell",
"bytes": "933"
}
],
"symlink_target": ""
}
|
import os
import django
# Make filepaths relative to settings.
ROOT = os.path.dirname(os.path.abspath(__file__))
path = lambda *a: os.path.join(ROOT, *a)
DEBUG = True
TEMPLATE_DEBUG = True
if django.VERSION < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
else:
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
JINJA_CONFIG = {}
SITE_ID = 1
SECRET_KEY = 'foobar'
DATABASES = {
'default': {
'NAME': 'test.db',
'ENGINE': 'django.db.backends.sqlite3',
}
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'waffle',
'test_app',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'waffle.middleware.WaffleMiddleware',
)
ROOT_URLCONF = 'test_app.urls'
TEMPLATE_LOADERS = (
'jingo.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
JINGO_EXCLUDE_APPS = (
'django',
'waffle',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
)
WAFFLE_FLAG_DEFAULT = False
WAFFLE_SWITCH_DEFAULT = False
WAFFLE_SAMPLE_DEFAULT = False
WAFFLE_OVERRIDE = False
WAFFLE_CACHE_PREFIX = 'test:'
if django.VERSION < (1, 7):
INSTALLED_APPS += ('south', )
SOUTH_MIGRATION_MODULES = {
'waffle': 'waffle.south_migrations'
}
|
{
"content_hash": "3fad288b5080d8b62c6daaf19ab9597c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 62,
"avg_line_length": 21.28,
"alnum_prop": 0.6760651629072681,
"repo_name": "mark-adams/django-waffle",
"id": "07c6c24bd989ab1528310b00da96cddcb646fa6f",
"size": "1596",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "test_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "929"
},
{
"name": "JavaScript",
"bytes": "1250"
},
{
"name": "Python",
"bytes": "123633"
},
{
"name": "Shell",
"bytes": "627"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
from decimal import Decimal
import shop.util.fields
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('address', models.CharField(max_length=255, verbose_name='Address')),
('address2', models.CharField(max_length=255, verbose_name='Address2', blank=True)),
('zip_code', models.CharField(max_length=20, verbose_name='Zip Code')),
('city', models.CharField(max_length=20, verbose_name='City')),
('state', models.CharField(max_length=255, verbose_name='State')),
],
options={
'verbose_name': 'Address',
'verbose_name_plural': 'Addresses',
},
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(null=True, blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'verbose_name': 'Cart',
'verbose_name_plural': 'Carts',
},
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantity', models.IntegerField()),
('cart', models.ForeignKey(related_name='items', to='shop.Cart')),
],
options={
'abstract': False,
'verbose_name': 'Cart item',
'verbose_name_plural': 'Cart items',
},
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
'verbose_name': 'Country',
'verbose_name_plural': 'Countries',
},
),
migrations.CreateModel(
name='ExtraOrderItemPriceField',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=255, verbose_name='Label')),
('value', shop.util.fields.CurrencyField(default=Decimal('0.0'), verbose_name='Amount', max_digits=30, decimal_places=2)),
('data', jsonfield.fields.JSONField(null=True, verbose_name='Serialized extra data', blank=True)),
],
options={
'verbose_name': 'Extra order item price field',
'verbose_name_plural': 'Extra order item price fields',
},
),
migrations.CreateModel(
name='ExtraOrderPriceField',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=255, verbose_name='Label')),
('value', shop.util.fields.CurrencyField(default=Decimal('0.0'), verbose_name='Amount', max_digits=30, decimal_places=2)),
('data', jsonfield.fields.JSONField(null=True, verbose_name='Serialized extra data', blank=True)),
('is_shipping', models.BooleanField(default=False, verbose_name='Is shipping', editable=False)),
],
options={
'verbose_name': 'Extra order price field',
'verbose_name_plural': 'Extra order price fields',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.IntegerField(default=10, verbose_name='Status', choices=[(10, 'Processing'), (20, 'Confirming'), (30, 'Confirmed'), (40, 'Completed'), (50, 'Shipped'), (60, 'Canceled')])),
('order_subtotal', shop.util.fields.CurrencyField(default=Decimal('0.0'), verbose_name='Order subtotal', max_digits=30, decimal_places=2)),
('order_total', shop.util.fields.CurrencyField(default=Decimal('0.0'), verbose_name='Order Total', max_digits=30, decimal_places=2)),
('shipping_address_text', models.TextField(null=True, verbose_name='Shipping address', blank=True)),
('billing_address_text', models.TextField(null=True, verbose_name='Billing address', blank=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('cart_pk', models.PositiveIntegerField(null=True, verbose_name='Cart primary key', blank=True)),
('user', models.ForeignKey(verbose_name='User', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
'verbose_name': 'Order',
'verbose_name_plural': 'Orders',
},
),
migrations.CreateModel(
name='OrderExtraInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(verbose_name='Extra info', blank=True)),
('order', models.ForeignKey(related_name='extra_info', verbose_name='Order', to='shop.Order')),
],
options={
'verbose_name': 'Order extra info',
'verbose_name_plural': 'Order extra info',
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_reference', models.CharField(max_length=255, verbose_name='Product reference')),
('product_name', models.CharField(max_length=255, null=True, verbose_name='Product name', blank=True)),
('unit_price', shop.util.fields.CurrencyField(default=Decimal('0.0'), verbose_name='Unit price', max_digits=30, decimal_places=2)),
('quantity', models.IntegerField(verbose_name='Quantity')),
('line_subtotal', shop.util.fields.CurrencyField(default=Decimal('0.0'), verbose_name='Line subtotal', max_digits=30, decimal_places=2)),
('line_total', shop.util.fields.CurrencyField(default=Decimal('0.0'), verbose_name='Line total', max_digits=30, decimal_places=2)),
('order', models.ForeignKey(related_name='items', verbose_name='Order', to='shop.Order')),
],
options={
'abstract': False,
'verbose_name': 'Order item',
'verbose_name_plural': 'Order items',
},
),
migrations.CreateModel(
name='OrderPayment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', shop.util.fields.CurrencyField(default=Decimal('0.0'), verbose_name='Amount', max_digits=30, decimal_places=2)),
('transaction_id', models.CharField(help_text="The transaction processor's reference", max_length=255, verbose_name='Transaction ID')),
('payment_method', models.CharField(help_text='The payment backend used to process the purchase', max_length=255, verbose_name='Payment method')),
('order', models.ForeignKey(verbose_name='Order', to='shop.Order')),
],
options={
'verbose_name': 'Order payment',
'verbose_name_plural': 'Order payments',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('slug', models.SlugField(unique=True, verbose_name='Slug')),
('active', models.BooleanField(default=False, verbose_name='Active')),
('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date added')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last modified')),
('unit_price', shop.util.fields.CurrencyField(default=Decimal('0.0'), verbose_name='Unit price', max_digits=30, decimal_places=2)),
('polymorphic_ctype', models.ForeignKey(related_name='polymorphic_shop.product_set+', editable=False, to='contenttypes.ContentType', null=True)),
],
options={
'abstract': False,
'verbose_name': 'Product',
'verbose_name_plural': 'Products',
},
),
migrations.AddField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Product', blank=True, to='shop.Product', null=True),
),
migrations.AddField(
model_name='extraorderpricefield',
name='order',
field=models.ForeignKey(verbose_name='Order', to='shop.Order'),
),
migrations.AddField(
model_name='extraorderitempricefield',
name='order_item',
field=models.ForeignKey(verbose_name='Order item', to='shop.OrderItem'),
),
migrations.AddField(
model_name='cartitem',
name='product',
field=models.ForeignKey(to='shop.Product'),
),
migrations.AddField(
model_name='address',
name='country',
field=models.ForeignKey(verbose_name='Country', blank=True, to='shop.Country', null=True),
),
migrations.AddField(
model_name='address',
name='user_billing',
field=models.OneToOneField(related_name='billing_address', null=True, blank=True, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='address',
name='user_shipping',
field=models.OneToOneField(related_name='shipping_address', null=True, blank=True, to=settings.AUTH_USER_MODEL),
),
]
|
{
"content_hash": "8500d602a26222f180fc439953420ea9",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 206,
"avg_line_length": 52.483870967741936,
"alnum_prop": 0.5652822899288787,
"repo_name": "chriscauley/django-shop",
"id": "cd97c523ba3fedee9a6eec7e3e7a3d3c8bf52d06",
"size": "11413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9179"
},
{
"name": "Python",
"bytes": "253667"
},
{
"name": "Shell",
"bytes": "916"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, redirect
from models import User, Photo, Album, Tag
def front_view(request):
return render(request, 'front.html')
def home_view(request):
return render(request, 'home.html', {'albums': Album.objects.all()})
def album_view(request, albumid):
myalbum = Album.objects.get(pk=albumid)
return render(request, 'album.html', {'photos': myalbum.photos.all()})
def photo_view(request, photoid):
myphoto = Photo.objects.get(pk=photoid)
return render(request, 'photo.html', {'photo': myphoto})
def tag_view(request, tagid):
mytag = Tag.objects.get(pk=tagid)
taggedphotos = Photo.objects.filter(tags=mytag).all()
return render(request, 'tag.html', {'photos': taggedphotos, 'tag': mytag})
|
{
"content_hash": "4103c3ca335715c58f7644c4f45ced6e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 29.192307692307693,
"alnum_prop": 0.6982872200263505,
"repo_name": "sniboboof/djangophotos",
"id": "5e04e5032343c24218588b7df29ccae682328285",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "albumproject/albumapp/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18437"
}
],
"symlink_target": ""
}
|
"""Setuptools configuration for aitertools."""
from setuptools import setup
from setuptools import find_packages
with open('README.rst', 'r') as readmefile:
README = readmefile.read()
setup(
name='aitertools',
version='0.1.0',
url='https://github.com/asyncdef/aitertools',
description='Async versions of the itertools features.',
author="Kevin Conway",
author_email="kevinjacobconway@gmail.com",
long_description=README,
license='Apache 2.0',
packages=find_packages(exclude=['tests', 'build', 'dist', 'docs']),
install_requires=[
],
entry_points={
'console_scripts': [
],
},
include_package_data=True,
)
|
{
"content_hash": "087365ba1090df25e967443e6587af34",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 22.9,
"alnum_prop": 0.6521106259097526,
"repo_name": "asyncdef/aitertools",
"id": "2d5cc3b6375f12bbfb85eb670109be0c8ad81e92",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53470"
}
],
"symlink_target": ""
}
|
"""A plugin that gather extension IDs from Chrome history browser."""
import re
import requests
from plaso.analysis import interface
from plaso.analysis import logger
from plaso.analysis import manager
class ChromeExtensionPlugin(interface.AnalysisPlugin):
"""Convert Chrome extension IDs into names, requires Internet connection."""
NAME = 'chrome_extension'
_SUPPORTED_EVENT_DATA_TYPES = frozenset([
'fs:stat'])
_TITLE_RE = re.compile(r'<title>([^<]+)</title>')
_WEB_STORE_URL = 'https://chrome.google.com/webstore/detail/{xid}?hl=en-US'
def __init__(self):
"""Initializes the Chrome extension analysis plugin."""
super(ChromeExtensionPlugin, self).__init__()
# Saved list of already looked up extensions.
self._extensions = {}
self._results = {}
def _GetChromeWebStorePage(self, extension_identifier):
"""Retrieves the page for the extension from the Chrome store website.
Args:
extension_identifier (str): Chrome extension identifier.
Returns:
str: page content or None.
"""
web_store_url = self._WEB_STORE_URL.format(xid=extension_identifier)
try:
response = requests.get(web_store_url)
except (requests.ConnectionError, requests.HTTPError) as exception:
logger.warning((
'[{0:s}] unable to retrieve URL: {1:s} with error: {2!s}').format(
self.NAME, web_store_url, exception))
return None
return response.text
def _GetPathSegmentSeparator(self, path):
"""Given a path give back the path separator as a best guess.
Args:
path (str): path.
Returns:
str: path segment separator.
"""
if path.startswith('\\') or path[1:].startswith(':\\'):
return '\\'
if path.startswith('/'):
return '/'
if '/' and '\\' in path:
# Let's count slashes and guess which one is the right one.
forward_count = len(path.split('/'))
backward_count = len(path.split('\\'))
if forward_count > backward_count:
return '/'
return '\\'
# Now we are sure there is only one type of separators yet
# the path does not start with one.
if '/' in path:
return '/'
return '\\'
def _GetTitleFromChromeWebStore(self, extension_identifier):
"""Retrieves the name of the extension from the Chrome store website.
Args:
extension_identifier (str): Chrome extension identifier.
Returns:
str: name of the extension or None.
"""
# Check if we have already looked this extension up.
if extension_identifier in self._extensions:
return self._extensions.get(extension_identifier)
page_content = self._GetChromeWebStorePage(extension_identifier)
if not page_content:
logger.warning(
'[{0:s}] no data returned for extension identifier: {1:s}'.format(
self.NAME, extension_identifier))
return None
first_line, _, _ = page_content.partition('\n')
match = self._TITLE_RE.search(first_line)
name = None
if match:
title = match.group(1)
if title.startswith('Chrome Web Store - '):
name = title[19:]
elif title.endswith('- Chrome Web Store'):
name = title[:-19]
if not name:
self._extensions[extension_identifier] = 'UNKNOWN'
return None
self._extensions[extension_identifier] = name
return name
def CompileReport(self, mediator):
"""Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: analysis report.
"""
lines_of_text = []
for user, extensions in sorted(self._results.items()):
lines_of_text.append(' == USER: {0:s} =='.format(user))
for extension, extension_identifier in sorted(extensions):
lines_of_text.append(' {0:s} [{1:s}]'.format(
extension, extension_identifier))
lines_of_text.append('')
lines_of_text.append('')
report_text = '\n'.join(lines_of_text)
analysis_report = super(ChromeExtensionPlugin, self).CompileReport(mediator)
analysis_report.text = report_text
analysis_report.report_dict = self._results
return analysis_report
# pylint: disable=unused-argument
def ExamineEvent(self, mediator, event, event_data, event_data_stream):
"""Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
if event_data.data_type not in self._SUPPORTED_EVENT_DATA_TYPES:
return
filename = getattr(event_data, 'filename', None)
if not filename:
return
separator = self._GetPathSegmentSeparator(filename)
path_segments = filename.lower().split(separator)
# Determine if we have a Chrome extension ID.
if 'chrome' not in path_segments and 'chromium' not in path_segments:
return
if path_segments[-2] != 'extensions':
return
# TODO: use a regex to check extension identifier
extension_identifier = path_segments[-1]
if extension_identifier == 'Temp':
return
user = mediator.GetUsernameForPath(filename)
# We still want this information in here, so that we can
# manually deduce the username.
if not user:
if len(filename) > 25:
user = 'Not found ({0:s}...)'.format(filename[0:25])
else:
user = 'Not found ({0:s})'.format(filename)
extension_string = self._GetTitleFromChromeWebStore(extension_identifier)
if not extension_string:
extension_string = extension_identifier
self._results.setdefault(user, [])
if (extension_string, extension_identifier) not in self._results[user]:
self._results[user].append((extension_string, extension_identifier))
manager.AnalysisPluginManager.RegisterPlugin(ChromeExtensionPlugin)
|
{
"content_hash": "6c209b4d74dd88d9b5646160334a76a1",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 80,
"avg_line_length": 30.542713567839197,
"alnum_prop": 0.6600855544587035,
"repo_name": "Onager/plaso",
"id": "5c2c37a3141a2323cc296f93df3d267a5648e07f",
"size": "6102",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plaso/analysis/chrome_extension.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1172"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1270"
},
{
"name": "Python",
"bytes": "4816953"
},
{
"name": "Shell",
"bytes": "22891"
}
],
"symlink_target": ""
}
|
import tempfile
from zipfile import ZipFile
from datetime import date
from wsgiref.util import FileWrapper
from django.http import HttpResponse
from collections import defaultdict
from explorer.utils import csv_report
_ = lambda x: x
def generate_report_action(description="Generate CSV file from SQL query",):
def generate_report(modeladmin, request, queryset):
results = [report for report in queryset if report.passes_blacklist()[0]]
queries = (len(results) > 0 and _package(results)) or defaultdict(int)
response = HttpResponse(queries["data"], content_type=queries["content_type"])
response['Content-Disposition'] = queries["filename"]
response['Content-Length'] = queries["length"]
return response
generate_report.short_description = description
return generate_report
def _package(queries):
ret = {}
is_one = len(queries) == 1
name_root = lambda n: "attachment; filename=%s" % n
ret["content_type"] = (is_one and 'text/csv') or 'application/zip'
ret["filename"] = (is_one and name_root('%s.csv' % queries[0].title.replace(',', ''))) or name_root("Report_%s.zip" % date.today())
ret["data"] = (is_one and csv_report(queries[0]).getvalue()) or _build_zip(queries)
ret["length"] = (is_one and len(ret["data"]) or ret["data"].blksize)
return ret
def _build_zip(queries):
temp = tempfile.TemporaryFile()
zip_file = ZipFile(temp, 'w')
for r in queries:
zip_file.writestr('%s.csv' % r.title, csv_report(r).getvalue() or "Error!")
zip_file.close()
ret = FileWrapper(temp)
temp.seek(0)
return ret
|
{
"content_hash": "fd439adeffd50e70da3f8da0b96735c4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 135,
"avg_line_length": 36.266666666666666,
"alnum_prop": 0.6703431372549019,
"repo_name": "grantmcconnaughey/django-sql-explorer",
"id": "1997e54b997d8603d44782c786b17788e4a14b72",
"size": "1632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "explorer/actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1390"
},
{
"name": "HTML",
"bytes": "28852"
},
{
"name": "JavaScript",
"bytes": "9220"
},
{
"name": "Python",
"bytes": "98478"
}
],
"symlink_target": ""
}
|
import json
import nbformat
from nbformat.v4 import new_notebook, new_code_cell
import os
import pytest
import requests
from subprocess import Popen
import sys
from tempfile import mkstemp
from testpath.tempdir import TemporaryDirectory
import time
from urllib.parse import urljoin
from selenium.webdriver import Firefox, Remote, Chrome
from .utils import Notebook
pjoin = os.path.join
def _wait_for_server(proc, info_file_path):
"""Wait 30 seconds for the notebook server to start"""
for i in range(300):
if proc.poll() is not None:
raise RuntimeError("Notebook server failed to start")
if os.path.exists(info_file_path):
try:
with open(info_file_path) as f:
return json.load(f)
except ValueError:
# If the server is halfway through writing the file, we may
# get invalid JSON; it should be ready next iteration.
pass
time.sleep(0.1)
raise RuntimeError("Didn't find %s in 30 seconds", info_file_path)
@pytest.fixture(scope='session')
def notebook_server():
info = {}
with TemporaryDirectory() as td:
nbdir = info['nbdir'] = pjoin(td, 'notebooks')
os.makedirs(pjoin(nbdir, u'sub ∂ir1', u'sub ∂ir 1a'))
os.makedirs(pjoin(nbdir, u'sub ∂ir2', u'sub ∂ir 1b'))
info['extra_env'] = {
'JUPYTER_CONFIG_DIR': pjoin(td, 'jupyter_config'),
'JUPYTER_RUNTIME_DIR': pjoin(td, 'jupyter_runtime'),
'IPYTHONDIR': pjoin(td, 'ipython'),
}
env = os.environ.copy()
env.update(info['extra_env'])
command = [sys.executable, '-m', 'notebook',
'--no-browser',
'--notebook-dir', nbdir,
# run with a base URL that would be escaped,
# to test that we don't double-escape URLs
'--NotebookApp.base_url=/a@b/',
]
print("command=", command)
proc = info['popen'] = Popen(command, cwd=nbdir, env=env)
info_file_path = pjoin(td, 'jupyter_runtime',
'nbserver-%i.json' % proc.pid)
info.update(_wait_for_server(proc, info_file_path))
print("Notebook server info:", info)
yield info
# Shut the server down
requests.post(urljoin(info['url'], 'api/shutdown'),
headers={'Authorization': 'token '+info['token']})
def make_sauce_driver():
"""This function helps travis create a driver on Sauce Labs.
This function will err if used without specifying the variables expected
in that context.
"""
username = os.environ["SAUCE_USERNAME"]
access_key = os.environ["SAUCE_ACCESS_KEY"]
capabilities = {
"tunnel-identifier": os.environ["TRAVIS_JOB_NUMBER"],
"build": os.environ["TRAVIS_BUILD_NUMBER"],
"tags": [os.environ['TRAVIS_PYTHON_VERSION'], 'CI'],
"platform": "Windows 10",
"browserName": os.environ['JUPYTER_TEST_BROWSER'],
"version": "latest",
}
if capabilities['browserName'] == 'firefox':
# Attempt to work around issue where browser loses authentication
capabilities['version'] = '57.0'
hub_url = "%s:%s@localhost:4445" % (username, access_key)
print("Connecting remote driver on Sauce Labs")
driver = Remote(desired_capabilities=capabilities,
command_executor="http://%s/wd/hub" % hub_url)
return driver
@pytest.fixture(scope='session')
def selenium_driver():
if os.environ.get('SAUCE_USERNAME'):
driver = make_sauce_driver()
elif os.environ.get('JUPYTER_TEST_BROWSER') == 'chrome':
driver = Chrome()
else:
driver = Firefox()
yield driver
# Teardown
driver.quit()
@pytest.fixture(scope='module')
def authenticated_browser(selenium_driver, notebook_server):
selenium_driver.jupyter_server_info = notebook_server
selenium_driver.get("{url}?token={token}".format(**notebook_server))
return selenium_driver
@pytest.fixture
def notebook(authenticated_browser):
tree_wh = authenticated_browser.current_window_handle
yield Notebook.new_notebook(authenticated_browser)
authenticated_browser.switch_to.window(tree_wh)
@pytest.fixture
def prefill_notebook(selenium_driver, notebook_server):
def inner(cells):
cells = [new_code_cell(c) if isinstance(c, str) else c
for c in cells]
nb = new_notebook(cells=cells)
fd, path = mkstemp(dir=notebook_server['nbdir'], suffix='.ipynb')
with open(fd, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
fname = os.path.basename(path)
selenium_driver.get(
"{url}notebooks/{}?token={token}".format(fname, **notebook_server)
)
return Notebook(selenium_driver)
return inner
|
{
"content_hash": "1e9e5ef797f7452f5610a4bc8524ec8b",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 78,
"avg_line_length": 34.16783216783217,
"alnum_prop": 0.61563651248465,
"repo_name": "sserrot/champion_relationships",
"id": "64cdfa23bda4d2a0ba01e0dd912415755d5e1f57",
"size": "4894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/notebook/tests/selenium/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
}
|
from synapse.storage.pdu import PduStore
from synapse.storage.signatures import SignatureStore
from synapse.storage._base import SQLBaseStore
from synapse.federation.units import Pdu
from synapse.crypto.event_signing import (
add_event_pdu_content_hash, compute_pdu_event_reference_hash
)
from synapse.api.events.utils import prune_pdu
from unpaddedbase64 import encode_base64, decode_base64
from canonicaljson import encode_canonical_json
import sqlite3
import sys
class Store(object):
_get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
_get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
_get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
_get_pdu_origin_signatures_txn = SignatureStore.__dict__["_get_pdu_origin_signatures_txn"]
_store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
_store_pdu_reference_hash_txn = SignatureStore.__dict__["_store_pdu_reference_hash_txn"]
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
store = Store()
def select_pdus(cursor):
cursor.execute(
"SELECT pdu_id, origin FROM pdus ORDER BY depth ASC"
)
ids = cursor.fetchall()
pdu_tuples = store._get_pdu_tuples(cursor, ids)
pdus = [Pdu.from_pdu_tuple(p) for p in pdu_tuples]
reference_hashes = {}
for pdu in pdus:
try:
if pdu.prev_pdus:
print "PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
for pdu_id, origin, hashes in pdu.prev_pdus:
ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
hashes[ref_alg] = encode_base64(ref_hsh)
store._store_prev_pdu_hash_txn(cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh)
print "SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
pdu = add_event_pdu_content_hash(pdu)
ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
store._store_pdu_reference_hash_txn(cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh)
for alg, hsh_base64 in pdu.hashes.items():
print alg, hsh_base64
store._store_pdu_content_hash_txn(cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64))
except:
print "FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus
def main():
conn = sqlite3.connect(sys.argv[1])
cursor = conn.cursor()
select_pdus(cursor)
conn.commit()
if __name__=='__main__':
main()
|
{
"content_hash": "57400357e1da2e2cbe97629636d02fb8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 117,
"avg_line_length": 39.333333333333336,
"alnum_prop": 0.6484893146647015,
"repo_name": "iot-factory/synapse",
"id": "616d6a10e733db74e7dbc94d19c1700726e972d0",
"size": "2714",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts-dev/hash_history.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2000"
},
{
"name": "HTML",
"bytes": "2905"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31842"
},
{
"name": "Python",
"bytes": "1879672"
},
{
"name": "Shell",
"bytes": "4548"
}
],
"symlink_target": ""
}
|
"""List of directories which are known issues for Android WebView.
There are a number of directories in the Chromium tree which should be removed
when merging into Android. Some are for licensing reasons; others are to ensure
that the build inside the Android tree does not accidentally include the wrong
headers.
This is not used by the webview_licenses tool itself; it is effectively a
"cache" of the output of webview_licenses.GetIncompatibleDirectories() for the
subset of repositories that WebView needs.
We store a copy here because GetIncompatibleDirectories() doesn't work properly
after things have been removed from the tree - it can no longer see the
README.chromium files for previously-removed directories, but they may have
newly added files in them. As long as this list is up to date, we can remove the
things listed first, and then just run the tool afterwards to validate that it
was sufficient. If the tool returns any extra directories then the snapshotting
process will stop and this list must be updated.
"""
# If there is a temporary license-related issue with a particular third_party
# directory, please put it here, with a comment linking to the bug entry.
KNOWN_ISSUES = [
'third_party/accessibility-developer-tools', # crbug.com/165901
]
KNOWN_INCOMPATIBLE = {
'.': [
# Incompatibly licensed code from the main chromium src/ directory.
'base/third_party/xdg_mime',
'breakpad',
'chrome/installer/mac/third_party/xz',
'chrome/test/data',
'third_party/active_doc',
'third_party/apple_apsl',
'third_party/apple_sample_code',
'third_party/bsdiff',
'third_party/bspatch',
'third_party/liblouis',
'third_party/speech-dispatcher',
'third_party/sudden_motion_sensor',
'third_party/swiftshader',
'third_party/talloc',
'third_party/webdriver',
'third_party/wtl',
'tools/telemetry/third_party/websocket-client',
# Code we don't want to build/include by accident from the main chromium
# src/ directory.
'third_party/ashmem/*.[ch]',
'third_party/expat/files/lib',
'third_party/libjpeg/*.[ch]',
],
'third_party/icu': [
# Incompatible code from ICU's repository.
'source/data/brkitr',
],
}
KNOWN_INCOMPATIBLE['.'].extend(KNOWN_ISSUES)
|
{
"content_hash": "81e4856c79f2a99198f3208545f7dd14",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 38.91803278688525,
"alnum_prop": 0.7000842459983151,
"repo_name": "cvsuser-chromium/chromium",
"id": "e1098a14ed377f5ca070758ab70fb2e94c8c0a8d",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "android_webview/tools/known_issues.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "36421"
},
{
"name": "C",
"bytes": "6924841"
},
{
"name": "C++",
"bytes": "179649999"
},
{
"name": "CSS",
"bytes": "812951"
},
{
"name": "Java",
"bytes": "3768838"
},
{
"name": "JavaScript",
"bytes": "8338074"
},
{
"name": "Makefile",
"bytes": "52980"
},
{
"name": "Objective-C",
"bytes": "819293"
},
{
"name": "Objective-C++",
"bytes": "6453781"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "Perl",
"bytes": "17897"
},
{
"name": "Python",
"bytes": "5640877"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "648699"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "15926"
}
],
"symlink_target": ""
}
|
class InvalidGlob(Exception):
"""The glob passed is invalid."""
pass
class PathNotFound(Exception):
"""One or more elements of the requested path did not exist in the object"""
pass
class InvalidKeyName(Exception):
"""This key contains the separator character or another invalid character"""
pass
class FilteredValue(Exception):
"""Unable to return a value, since the filter rejected it"""
pass
|
{
"content_hash": "58edfa1889d4ef1034c994cd8d190648",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.7116279069767442,
"repo_name": "benthomasson/dpath-python",
"id": "4ab39169fedd07f33d0d57537644112ef77cd9e3",
"size": "430",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "dpath/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39166"
}
],
"symlink_target": ""
}
|
from django.urls import reverse
from django.http import Http404
from django.test import TestCase, override_settings
from unittest import mock
from rest_framework.exceptions import APIException, PermissionDenied
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.routers import SimpleRouter
from rest_framework.settings import api_settings
from rest_framework.viewsets import GenericViewSet
class DummyViewSet(GenericViewSet):
"""Dummy test viewset that raises an exception when calling list()."""
def list(self, *args, **kwargs):
raise Exception('something went wrong')
test_exception = SimpleRouter()
test_exception.register('testexcept', DummyViewSet, basename='test-exception')
@override_settings(ROOT_URLCONF=tuple(test_exception.urls))
class TestExceptionHandlerWithViewSet(TestCase):
# The test client connects to got_request_exception, so we need to mock it
# otherwise it would immediately re-raise the exception.
@mock.patch('olympia.api.exceptions.got_request_exception')
def test_view_exception(self, got_request_exception_mock):
url = reverse('test-exception-list')
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False, DEBUG=False):
response = self.client.get(url)
assert response.status_code == 500
assert response.data == {'detail': 'Internal Server Error'}
assert got_request_exception_mock.send.call_count == 1
assert got_request_exception_mock.send.call_args[0][0] == DummyViewSet
assert isinstance(
got_request_exception_mock.send.call_args[1]['request'], Request
)
# The test client connects to got_request_exception, so we need to mock it
# otherwise it would immediately re-raise the exception.
@mock.patch('olympia.api.exceptions.got_request_exception')
def test_view_exception_debug(self, got_request_exception_mock):
url = reverse('test-exception-list')
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False, DEBUG=True):
response = self.client.get(url)
assert response.status_code == 500
data = response.data
assert set(data.keys()) == set(['detail', 'traceback'])
assert data['detail'] == 'Internal Server Error'
assert 'Traceback (most recent call last):' in data['traceback']
assert got_request_exception_mock.send.call_count == 1
assert got_request_exception_mock.send.call_args[0][0] == DummyViewSet
assert isinstance(
got_request_exception_mock.send.call_args[1]['request'], Request
)
class TestExceptionHandler(TestCase):
def test_api_exception_handler_returns_response(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise APIException()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 500
def test_exception_handler_returns_response_for_404(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise Http404()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 404
def test_exception_handler_returns_response_for_403(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise PermissionDenied()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 403
def test_non_api_exception_handler_returns_response(self):
# Regular DRF exception handler does not return a Response for non-api
# exceptions, but we do.
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise Exception()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 500
def test_api_exception_handler_with_propagation(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(APIException):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise APIException()
except Exception as exc:
exception_handler(exc, {})
def test_exception_handler_404_with_propagation(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(Http404):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise Http404()
except Exception as exc:
exception_handler(exc, {})
def test_exception_handler_403_with_propagation(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(PermissionDenied):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise PermissionDenied()
except Exception as exc:
exception_handler(exc, {})
def test_non_api_exception_handler_with_propagation(self):
# Regular DRF exception handler does not return a Response for non-api
# exceptions, but we do.
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(KeyError):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise KeyError()
except Exception as exc:
exception_handler(exc, {})
|
{
"content_hash": "fe4e13ffdecb6fa107ebd339cef943af",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 78,
"avg_line_length": 40.90066225165563,
"alnum_prop": 0.6450777202072538,
"repo_name": "bqbn/addons-server",
"id": "2561f66ec0e29da82acf9027a06422a6295b1961",
"size": "6176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/api/tests/test_exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "810080"
},
{
"name": "Dockerfile",
"bytes": "2868"
},
{
"name": "HTML",
"bytes": "585550"
},
{
"name": "JavaScript",
"bytes": "1071952"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5323934"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11171"
},
{
"name": "Smarty",
"bytes": "1503"
}
],
"symlink_target": ""
}
|
import sys
import os
import Cyberoam
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import qrc_icon
from base64 import *
GREEN = ':/icons/ball-green.png'
RED = ':/icons/ball-red.png'
YELLOW = ':/icons/ball-yellow.png'
lck_file = '.sam.lck'
if "linux2" in sys.platform:
def set_proc_name(newname):
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6') #Loading a 3rd party library C
buff = create_string_buffer(len(newname)+1) #Note: One larger than the name (man prctl says that)
buff.value = newname #Null terminated string as it should be
libc.prctl(15, byref(buff), 0, 0, 0) #Refer to "#define" of "/usr/include/linux/prctl.h" & arg[3..5] are zero as the man page says.
set_proc_name("SAM")
if 'win' in sys.platform:
lck_file = os.getenv('appdata')+'\\'+lck_file
def exists (pid):
"""kill function for Win32"""
import ctypes
return ctypes.windll.kernel32.OpenProcess(1, 0, pid)!=0
else:
lck_file = os.getenv('HOME')+'/'+lck_file
def exists (pid):
try:
os.kill (pid, 0)
return True
except:
return False
class Account (QTreeWidgetItem):
def __init__ (self, parent, login_id, uid, passwd, no):
super(Account, self).__init__(parent, [uid, '', '', ''])
self.username = login_id
self.passwd = passwd
self.acc_no = no
self.parent = parent
self.setIcon (0, QIcon(YELLOW))
self.pbar = QProgressBar()
self.pbar.setRange (0, 100)
self.thread = QThread()
def login (self):
try:
Cyberoam.login (self.username, self.passwd)
self.setText (1, 'Logged in')
self.thread.emit (SIGNAL('setIcon(QString)'), GREEN)
self.thread.emit (SIGNAL('loggedIn(int)'), self.acc_no)
return
except Cyberoam.DataTransferLimitExceeded:
self.setText (3, '0.00 MB')
self.thread.emit (SIGNAL('usage(int)'), self.pbar.maximum())
self.thread.emit (SIGNAL('limitExceeded()'))
self.setText (1, 'Limit Reached')
except Cyberoam.WrongPassword:
self.thread.emit (SIGNAL('wrongPassword()'))
self.setText (1, 'Wrong Password')
except Cyberoam.MultipleLoginError:
self.thread.emit (SIGNAL('multipleLogin()'))
self.setText (1, 'Account in use')
except IOError:
self.thread.emit (SIGNAL('networkError()'))
#self.setText (1, 'Network Error')
return
self.thread.emit (SIGNAL('setIcon(QString)'), RED)
self.thread.emit(SIGNAL('switch(int)'), self.acc_no)
def logout (self):
try:
Cyberoam.logout (self.username, self.passwd)
self.setText (1, 'Logged out')
self.thread.emit (SIGNAL('setIcon(QString)'), YELLOW)
self.thread.emit (SIGNAL('loggedOut()'))
except IOError:
self.thread.emit (SIGNAL('networkError()'))
#self.setText (1, 'Network Error')
self.thread.emit (SIGNAL('setIcon(QString)'), RED)
def getQuota (self):
try:
quota = Cyberoam.netUsage (self.username, self.passwd)
self.setText (3, quota[1])
used, suff = quota[0].split()
used = round(float(used)/1024) if suff=='KB' else round(float(used))
self.thread.emit (SIGNAL('usage(int)'), int(used))
self.thread.emit (SIGNAL('gotQuota(int)'), self.acc_no)
if self.text(1) != 'Logged in':
self.setText (1, '')
self.thread.emit (SIGNAL('setIcon(QString)'), YELLOW)
return
except Cyberoam.DataTransferLimitExceeded:
self.setText (3, '0.00 MB')
self.thread.emit (SIGNAL('usage(int)'), self.pbar.maximum())
self.thread.emit (SIGNAL('limitExceeded()'))
self.setText (1, 'Limit Reached')
except Cyberoam.WrongPassword:
self.thread.emit (SIGNAL('wrongPassword()'))
self.setText (1, 'Wrong Password')
except IOError:
self.thread.emit (SIGNAL('networkError()'))
#self.setText (1, 'Network Error')
return
self.thread.emit(SIGNAL('switch(int)'), self.acc_no)
self.thread.emit (SIGNAL('setIcon(QString)'), RED)
def _setIcon (self, icon):
self.setIcon (0, QIcon(icon))
class MainWindow (QMainWindow):
def __init__(self, parent=None):
super (MainWindow, self).__init__(parent)
self.loginTimer = QTimer()
self.quotaTimer = QTimer()
self.currentLogin = -1
self.toolbar = self.addToolBar ('Toolbar')
self.status = self.statusBar()
loginAction = self.createAction ('Log &In', self.login, ':/icons/network-connect.png', 'Log In')
logoutAction = self.createAction ('Log &Out', self.logout, ':/icons/network-disconnect.png', 'Log Out')
quotaAction = self.createAction ('Get Usage', self.getQuota, ':/icons/view-refresh.png', 'Refresh Quota', QKeySequence.Refresh)
newUserAction = self.createAction ('&New...', self.addAccount, ':/icons/user-add-icon.png', 'Create User', QKeySequence.New)
rmUserAction = self.createAction ('Remove', self.rmAccount, ':/icons/user-remove-icon.png', 'Remove User', QKeySequence.Delete)
editUserAction = self.createAction ('&Edit...', self.editAccount, ':/icons/user-icon.png', 'Edit User')
clearAction = self.createAction ('&Clear All', self.clearList, ':/icons/edit-clear-list.png', 'Clear Users list')
sortAction = self.createAction ('&Sort', self.sort, '', 'Sort accounts by ID')
upAction = self.createAction ('Up', self.up, ':/icons/up-icon.png', 'Move up')
downAction = self.createAction ('Down', self.down, ':/icons/down-icon.png', 'Move down')
self.autoSwitchAction = self.createAction ('Enable auto-switch', self.setAutoSwitch, ':/icons/switch-user.png', 'Enable/Disable the auto switch function', None, True)
self.balloonAction = self.createAction ('Enable balloon popups', self.setBalloon, None, 'Enable balloon popups', None, True)
prefsAction = self.createAction ('&Configure', self.configure, ':/icons/configure.png', 'Configure SAM', QKeySequence.Preferences)
updateAction = self.createAction ('&Update', self.update, ':/icons/update.png', 'Update SAM')
aboutAction = self.createAction ('&About', self.about, ':/icons/help-about.png', 'About SAM')
quitAction = self.createAction ('&Quit', self.quit, ':/icons/application-exit.png', 'Quit SAM', QKeySequence.Quit)
menubar = self.menuBar()
userMenu = menubar.addMenu ('&Users')
userMenu.addAction (newUserAction)
userMenu.addAction (sortAction)
userMenu.addSeparator()
userMenu.addAction (editUserAction)
userMenu.addAction (rmUserAction)
userMenu.addAction (clearAction)
userMenu.addSeparator()
userMenu.addAction (quitAction)
actionsMenu = menubar.addMenu ('&Actions')
actionsMenu.addAction (loginAction)
actionsMenu.addAction (quotaAction)
actionsMenu.addAction (logoutAction)
settingsMenu = menubar.addMenu ('&Settings')
settingsMenu.addAction (self.autoSwitchAction)
settingsMenu.addAction (self.balloonAction)
settingsMenu.addAction (prefsAction)
helpMenu = menubar.addMenu ('&Help')
helpMenu.addAction (updateAction)
helpMenu.addAction (aboutAction)
self.toolbar.addAction ( newUserAction )
self.toolbar.addAction ( editUserAction )
self.toolbar.addAction ( rmUserAction )
self.toolbar.addSeparator()
self.toolbar.addAction ( loginAction )
self.toolbar.addAction ( quotaAction )
self.toolbar.addAction ( logoutAction )
self.toolbar.addSeparator()
self.toolbar.addAction ( upAction )
self.toolbar.addAction ( downAction )
self.toolbar.addSeparator()
self.toolbar.addAction ( prefsAction )
self.toolbar.addAction ( updateAction )
self.toolbar.addAction ( aboutAction )
self.toolbar.addAction ( quitAction )
self.table = QTreeWidget (self)
self.table.setRootIsDecorated (False)
headers = self.table.headerItem()
headers.setText (0, 'ID')
headers.setText (1, 'Status')
headers.setText (2, 'Usage')
headers.setText (3, 'Remaining')
self.table.header().resizeSection (0, 120)
self.table.header().resizeSection (2, 160)
self.setCentralWidget (self.table)
self.setWindowIcon (QIcon(':/icons/logo.png'))
self.setWindowTitle ('SAM - Syberoam Account Manager')
self.resize(498, self.size().height())
self.tray = QSystemTrayIcon (self)
self.tray.setIcon ( QIcon(':/icons/logo.png') )
self.tray.setVisible(True)
self.trayMenu = QMenu ()
self.trayMenu.addAction ( self.autoSwitchAction )
self.trayMenu.addAction ( self.balloonAction )
#self.trayMenu.addAction ( prefsAction )
self.trayMenu.addSeparator()
self.trayMenu.addAction ( loginAction )
self.trayMenu.addAction ( logoutAction )
self.trayMenu.addSeparator()
self.trayMenu.addAction ( quitAction )
self.tray.setContextMenu ( self.trayMenu )
self.connect ( qApp, SIGNAL('commitDataRequest(QSessionManager)'),qApp.commitData)
self.connect ( self.tray, SIGNAL('activated(QSystemTrayIcon::ActivationReason)'), self.toggleWindow )
self.connect ( self.table, SIGNAL('itemSelectionChanged()'), self.selectItem )
self.connect ( self.table, SIGNAL('itemClicked(QTreeWidgetItem*,int)'), self.selectItem )
self.connect ( self.table, SIGNAL('itemDoubleClicked(QTreeWidgetItem*,int)'), self.login )
self.connect ( self.loginTimer, SIGNAL('timeout()'), self.reLogin )
self.connect ( self.quotaTimer, SIGNAL('timeout()'), self.refreshQuota )
def loadPrefs (self):
settings = QSettings("DA-IICT","SAM")
point = settings.value("pos").toPoint()
size = settings.value("size").toSize()
self.setGeometry(QRect(point,size))
settings.beginGroup("Conf")
self.autoSwitchAction.setChecked (settings.value("AutoSwitch").toBool())
self.balloonAction.setChecked((settings.value("Balloons")).toBool())
self.loginTimer.setInterval ( settings.value("ReloginAfter").toInt()[0]*1000 )
self.quotaTimer.setInterval ( settings.value("UpdateQuotaAfter").toInt()[0]*1000 )
settings.endGroup()
settings.beginGroup("Accounts")
length = (settings.value("Length")).toInt()[0]
for ac in range(length):
temp1= "Account"+str(ac)
temp = settings.value(temp1).toString()
username, password = temp.split('!@#$%')
pasw = b64decode(str(password))
self.addAccount(str(username),str(pasw))
settings.endGroup()
lck = open(lck_file, 'w')
lck.write ( str(os.getpid()) )
lck.close()
if self.getSetting('Conf', 'AutoLogin').toBool() and self.table.topLevelItemCount()>0:
self.login()
def setAutoSwitch (self, checked):
self.setSetting ('Conf', 'AutoSwitch', int(checked))
def setBalloon (self, checked):
self.setSetting ('Conf', 'Balloons', int(checked))
def closeEvent(self, event):
if self.isVisible():
self.hide()
event.ignore()
def selectItem (self):
self.table.setCurrentItem (self.table.currentItem(), 2)
def toggleWindow (self, reason):
if reason == QSystemTrayIcon.Trigger:
self.hide() if self.isVisible() else self.show()
def addAccount (self, uid=None, pwd=None):
import prompt
if uid is not None and pwd is not None:
new = Account (self.table, uid+str(self.getSetting('Conf', 'Domain').toString()), uid, pwd, self.table.topLevelItemCount())
self.table.setItemWidget (new, 2, new.pbar)
#self.connect (new.thread, SIGNAL('limitExceeded()'), self.onLimitExceed)
#self.connect (new.thread, SIGNAL('wrongPassword()'), self.onWrongPassword)
#self.connect (new.thread, SIGNAL('multipleLogin()'), self.onMultipleLogin)
self.connect (new.thread, SIGNAL('networkError()'), self.onNetworkError)
self.connect (new.thread, SIGNAL('usage(int)'), new.pbar.setValue)
self.connect (new.thread, SIGNAL('setIcon(QString)'), new._setIcon)
self.connect (new.thread, SIGNAL('switch(int)'), self.switch)
self.connect (new.thread, SIGNAL('loggedIn(int)'), self.onLoggedIn)
self.connect (new.thread, SIGNAL('loggedOut()'), self.onLoggedOut)
self.connect (new.thread, SIGNAL('gotQuota(int)'), self.onGotQuota)
self.getQuota (new)
self.status.showMessage (uid+' added', 5000)
self.setSetting ('Accounts', 'Length', self.table.topLevelItemCount())
self.setSetting ('Accounts', 'Account'+str(self.table.indexOfTopLevelItem(new)), new.text(0)+'!@#$%'+b64encode(str(new.passwd)))
else:
dlg = prompt.Prompt(self)
dlg.setWindowIcon (QIcon(':/icons/list-add-user.png'))
if dlg.exec_():
self.addAccount(str(dlg.unameEdit.text()), str(dlg.pwdEdit.text()))
def editAccount (self):
import prompt
item = self.table.currentItem()
dlg = prompt.Prompt(self, item.text(0))
dlg.setWindowIcon (QIcon(':/icons/user-properties.png'))
if dlg.exec_():
item.username = str(dlg.unameEdit.text())+str(self.getSetting('Conf', 'Domain').toString())
item.setText (0, str(dlg.unameEdit.text()))
if str(dlg.pwdEdit.text()) != '':
item.passwd = str( dlg.pwdEdit.text() )
if self.table.indexOfTopLevelItem(item) == self.currentLogin:
self.reLogin()
else:
self.getQuota ( item )
self.setSetting ('Accounts', 'Account'+str(self.table.indexOfTopLevelItem(item)), item.text(0)+'!@#$%'+ b64encode(str(item.passwd)))
def configure (self):
import settings
dlg = settings.SettingsDlg(self)
if dlg.exec_():
s = QSettings ('DA-IICT', 'SAM')
s.beginGroup("Conf")
s.setValue("AutoLogin", int(dlg.autoLogin.isChecked()))
s.setValue("AutoSwitch", int(dlg.autoSwitchCheck.isChecked()))
s.setValue("SwitchOnCritical", int(dlg.criticalCheck.isChecked() and dlg.autoSwitchCheck.isChecked()))
s.setValue("Balloons", int(dlg.balloonPopups.isChecked()))
s.setValue("UpdateQuotaAfter", dlg.quotaSpin.value()*60)
s.setValue("ReloginAfter", dlg.loginSpin.value()*60)
s.setValue("CriticalQuotaLimit", dlg.criticalSpin.value()*1024)
s.setValue("Server", str (dlg.ipEdit.text()))
s.setValue("Port", str (dlg.portEdit.text()))
s.setValue("Domain", str(dlg.domainEdit.text()))
s.endGroup()
Cyberoam.cyberroamIP = str(dlg.ipEdit.text())
Cyberoam.cyberroamPort = str(dlg.portEdit.text())
self.autoSwitchAction.setChecked ( dlg.autoSwitchCheck.isChecked() )
self.balloonAction.setChecked ( dlg.balloonPopups.isChecked() )
# Reinitiate timers
self.loginTimer.stop()
self.quotaTimer.stop()
self.loginTimer.setInterval ( dlg.loginSpin.value()*60*1000 )
self.quotaTimer.setInterval ( dlg.quotaSpin.value()*60*1000 )
self.loginTimer.start()
self.quotaTimer.start()
def setSetting (self, group, key, value):
s = QSettings ('DA-IICT', 'SAM')
s.beginGroup (group)
s.setValue (key, value)
s.endGroup()
def getSetting (self, group, key):
s = QSettings ('DA-IICT', 'SAM')
s.beginGroup (group)
r = s.value (key)
s.endGroup()
return r
def login (self, item=None):
if item is None:
item = self.table.currentItem()
if item is None:
item = self.table.topLevelItem(0)
self.table.setCurrentItem(item)
if item.thread.isRunning():
item.thread.wait()
item.thread.run = item.login
self.currentLogin = self.table.indexOfTopLevelItem(item) #experimental
item.thread.start()
def onLoggedIn (self, acc_no):
self.loginTimer.stop()
self.quotaTimer.stop()
self.currentLogin = acc_no
self.loginTimer.start ()
self.quotaTimer.start ()
self.getQuota (self.table.topLevelItem(acc_no))
if self.getSetting('Conf', 'Balloons').toBool():
self.tray.showMessage ('SAM', self.table.topLevelItem(acc_no).text(0)+': Logged in')
for i in range (self.table.topLevelItemCount()):
item = self.table.topLevelItem(i)
if item.text(1) == 'Logged in' and self.currentLogin!=i:
item.setText (1, '')
item.setIcon (0, QIcon(YELLOW))
def reLogin (self):
self.login (self.table.topLevelItem(self.currentLogin))
def logout (self):
item = self.table.topLevelItem (self.currentLogin)
if item.thread.isRunning():
item.thread.wait()
item.thread.run = item.logout
item.thread.start()
def onLoggedOut (self):
self.loginTimer.stop()
self.quotaTimer.stop()
self.currentLogin = -1
def refreshQuota (self):
self.getQuota( self.table.topLevelItem(self.currentLogin) )
def getQuota (self, item=None):
if item is None:
item = self.table.currentItem()
if item.thread.isRunning():
item.thread.wait()
item.thread.run = item.getQuota
item.thread.start()
def onGotQuota (self, acc_no):
if self.getSetting ('Conf', 'SwitchOnCritical').toBool():
item = self.table.topLevelItem (acc_no)
used = item.pbar.value()
if used >= int(round(self.getSetting('Conf','CriticalQuotaLimit').toInt()[0]/1024.0)):
item.setText (1, 'Critical usage')
item.setIcon (0, QIcon (RED))
if acc_no == self.currentLogin:
if acc_no == self.table.topLevelItemCount()-1:
self.tray.showMessage ('SAM', 'Critical usage of last account reached.')
else:
self.switch (acc_no)
def switch (self, switch_from):
if self.currentLogin != switch_from:
return
if not self.getSetting('Conf', 'AutoSwitch').toBool():
self.currentLogin = -1
return
if switch_from == self.table.topLevelItemCount()-1 and self.table.topLevelItem(switch_from).text(1)=='Limit Reached':
self.currentLogin = -1
self.tray.showMessage('SAM', 'Data transfer limit for last account exceeded.')
return
self.login ( self.table.topLevelItem(switch_from+1) )
def onNetworkError (self):
self.status.showMessage ('Network Error')
self.tray.showMessage ('SAM', 'Network Error')
def update (self):
import update
o = update.Updater( self, str( self.getSetting('Conf', 'rev').toString() ) )
path = os.path.dirname(__file__)
ls = os.listdir ( path )
if o.exec_():
for item in ls:
if item[-4:]=='.tmp':
try:
os.remove ( path+os.sep+item[:-4] )
except: pass
os.rename ( path+os.sep+item, path+os.sep+item[:-4] )
self.setSetting ('Conf', 'rev', o.rev)
else:
for item in ls:
if item.endswith('.tmp'):
os.remove ( path+os.sep+item )
def move (self, to):
if self.table.topLevelItemCount()<2:
return None
current = self.table.indexOfTopLevelItem ( self.table.currentItem() )
bound = (to>0) * (self.table.topLevelItemCount()-1)
if current == bound:
return None
values=[]
for i in range (self.table.topLevelItemCount()):
values.append (self.table.topLevelItem(i).pbar.value())
values.insert (current+to, values.pop(current))
self.table.insertTopLevelItem ( current+to, self.table.takeTopLevelItem (current) )
if current == self.currentLogin:
self.currentLogin += to
elif current+to == self.currentLogin:
self.currentLogin -= to
self.table.setCurrentItem ( self.table.topLevelItem(current+to) )
self.updateList(values)
for i in [current, current+to]:
item = self.table.topLevelItem(i)
self.setSetting ('Accounts', 'Account'+str(i), str(item.text(0))+'!@#$%'+b64encode(str(item.passwd)))
def up (self): self.move (-1)
def down (self): self.move (1)
def sort (self):
curr = self.table.topLevelItem(self.currentLogin).text(0)
self.table.sortItems (0, Qt.AscendingOrder)
for i in range (self.table.topLevelItemCount()):
item = self.table.topLevelItem(i)
item.acc_no = i
if item.text(0) == curr:
self.currentLogin = i
self.saveAccounts()
def updateList (self, values=[]):
if len(values)!=self.table.topLevelItemCount():
return
for i in range(0, self.table.topLevelItemCount()):
item = self.table.topLevelItem(i)
item.acc_no = i
item.pbar = QProgressBar()
item.pbar.setRange (0, 100)
item.pbar.setValue (values[i])
self.table.setItemWidget(item, 2, item.pbar)
def rmAccount (self):
if self.table.topLevelItemCount() == 0:
self.status.showMessage ('Nothing to remove!', 5000)
return None
current = self.table.indexOfTopLevelItem ( self.table.currentItem() )
pop = self.table.takeTopLevelItem (current)
self.status.showMessage (pop.text(0)+' removed', 5000)
for i in range (current, self.table.topLevelItemCount()):
item = self.table.topLevelItem(i)
self.setSetting ('Accounts', 'Account'+str(i), str(item.text(0))+'!@#$%'+b64encode(str(item.passwd)))
s = QSettings ('DA-IICT', 'SAM')
s.beginGroup ('Accounts')
s.remove ( 'Account'+str(self.table.topLevelItemCount()) )
self.setSetting ('Accounts', 'Length', self.table.topLevelItemCount())
return pop
def clearList (self):
if self.table.topLevelItemCount()==0:
self.status.showMessage ('List already clear!', 5000)
return None
b = QMessageBox.question (self, 'SAM', 'Are you sure you want to remove all accounts?', QMessageBox.Yes | QMessageBox.No)
if b == QMessageBox.Yes:
self.table.clear()
self.status.showMessage ('List cleared', 5000)
self.saveAccounts()
def saveAccounts (self):
settings = QSettings('DA-IICT', 'SAM')
settings.beginGroup("Accounts")
settings.setValue("Length", self.table.topLevelItemCount())
for i in range(self.table.topLevelItemCount()):
ac = self.table.topLevelItem(i)
temp = str(ac.text(0))+'!@#$%'+ b64encode(str(ac.passwd))
temp1 = "Account"+str(i)
settings.setValue(temp1, temp)
settings.endGroup()
def about (self):
import about
dlg = about.About()
dlg.exec_()
def quit (self):
self.loginTimer.stop()
self.quotaTimer.stop()
settings= QSettings ("DA-IICT", "SAM")
settings.setValue ("size", self.size())
settings.setValue ("pos", self.pos())
os.remove(lck_file)
qApp.quit()
def createAction (self, text, slot=None, icon=None, tip=None, shortcut=None, checkable=None):
action = QAction (text, self)
if icon is not None:
action.setIcon (QIcon (icon))
if shortcut is not None:
action.setShortcut (shortcut)
if tip is not None:
action.setToolTip (tip)
action.setStatusTip (tip)
if slot is not None:
if checkable:
action.setCheckable (True)
self.connect (action, SIGNAL('toggled(bool)'), slot)
else:
self.connect (action, SIGNAL('triggered()'), slot)
return action
class QApplication(QApplication):
def __init__(self,arg):
super(QApplication,self).__init__(arg)
def commitData(self,manager):
os.remove(lck_file)
def _main():
app = QApplication (sys.argv)
window = MainWindow()
window.show()
window.loadPrefs()
return app.exec_()
def main():
if not os.access (lck_file, os.F_OK):
return _main()
else:
pid = int ( open(lck_file, 'rb').read() )
if exists(pid):
app = QApplication (sys.argv)
QMessageBox.information (None, 'SAM', 'SAM is already running.')
else:
return _main()
if __name__=='__main__':
sys.stderr = sys.stdout
sys.exit (main())
|
{
"content_hash": "c13481f11fde1245342d0297b6dd8fbd",
"timestamp": "",
"source": "github",
"line_count": 604,
"max_line_length": 168,
"avg_line_length": 36.41225165562914,
"alnum_prop": 0.6944027645159824,
"repo_name": "viranch/sam",
"id": "aff438039867078682017925b4e0fff9b085cdde",
"size": "22172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sam/main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "322925"
}
],
"symlink_target": ""
}
|
import logging
import os
from ...utils import paths
from ...utils import transfer
from kolibri.core.tasks.management.commands.base import AsyncCommand
logger = logging.getLogger(__name__)
class Command(AsyncCommand):
def add_arguments(self, parser):
parser.add_argument("channel_id", type=str)
parser.add_argument("destination", type=str)
def handle_async(self, *args, **options):
channel_id = options["channel_id"]
data_dir = os.path.realpath(options["destination"])
logger.info(
"Exporting channel database for channel id {} to {}".format(
channel_id, data_dir
)
)
src = paths.get_content_database_file_path(channel_id)
dest = paths.get_content_database_file_path(channel_id, datafolder=data_dir)
logger.debug("Source file: {}".format(src))
logger.debug("Destination file: {}".format(dest))
with transfer.FileCopy(src, dest, cancel_check=self.is_cancelled) as copy:
with self.start_progress(total=copy.total_size) as progress_update:
try:
for block in copy:
progress_update(len(block))
except transfer.TransferCanceled:
pass
if self.is_cancelled():
try:
os.remove(dest)
except IOError:
pass
self.cancel()
|
{
"content_hash": "dffb7275eade4584596908c8653c583a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 84,
"avg_line_length": 32.30434782608695,
"alnum_prop": 0.5693135935397039,
"repo_name": "indirectlylit/kolibri",
"id": "75c50d4dc3988907a6aa2722191a90367d22f520",
"size": "1486",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "kolibri/core/content/management/commands/exportchannel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2554964"
},
{
"name": "Dockerfile",
"bytes": "4114"
},
{
"name": "Gherkin",
"bytes": "365088"
},
{
"name": "HTML",
"bytes": "24294"
},
{
"name": "JavaScript",
"bytes": "1613945"
},
{
"name": "Makefile",
"bytes": "11953"
},
{
"name": "Python",
"bytes": "2860587"
},
{
"name": "SCSS",
"bytes": "5225"
},
{
"name": "Shell",
"bytes": "5245"
},
{
"name": "Vue",
"bytes": "1604613"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse
from cars.models import Car
from tastypie.resources import ModelResource
class CarResource(ModelResource):
def dispatch_list(self, request, **kwargs):
# TODO: Refactor this
if request.method == "POST":
return HttpResponse(True)
return super(CarResource, self).dispatch_list(request, **kwargs)
class Meta(object):
queryset = Car.objects.order_by('name')
resource_name = 'car'
allowed_methods = ['get', 'post']
detail_allowed_methods = ['get']
# Due to initial requirements
|
{
"content_hash": "52654ec08ee63f7571a0b7be2c31f7a4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 72,
"avg_line_length": 33.111111111111114,
"alnum_prop": 0.6560402684563759,
"repo_name": "Alexx-G/tastypie-example",
"id": "8acb92aa095138991de70efe90a4bcfacbba7507",
"size": "596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cars/api/resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68"
},
{
"name": "HTML",
"bytes": "2445"
},
{
"name": "JavaScript",
"bytes": "811"
},
{
"name": "Python",
"bytes": "17793"
}
],
"symlink_target": ""
}
|
import six
from math import copysign
import numpy as np
import mceq_config as config
from MCEq.misc import info, print_in_rows, getAZN
from particletools.tables import PYTHIAParticleData
info(5, 'Initialization of PYTHIAParticleData object')
_pdata = PYTHIAParticleData()
backward_compatible_namestr = {
'nu_mu': 'numu',
'nu_mubar': 'antinumu',
'nu_e': 'nue',
'nu_ebar': 'antinue',
'nu_tau': 'nutau',
'nu_taubar': 'antinutau'
}
# Replace particle names for neutrinos with those used
# in previous MCEq versions
def _pname(pdg_id_or_name):
"""Replace some particle names from pythia database with those from previous
MCEq versions for backward compatibility."""
pythia_name = _pdata.name(pdg_id_or_name)
if pythia_name in backward_compatible_namestr:
return backward_compatible_namestr[pythia_name]
return pythia_name
class MCEqParticle(object):
"""Bundles different particle properties for simplified
availability of particle properties in :class:`MCEq.core.MCEqRun`.
Args:
pdg_id (int): PDG ID of the particle
egrid (np.array, optional): energy grid (centers)
cs_db (object, optional): reference to an instance of
:class:`InteractionYields`
"""
def __init__(self,
pdg_id,
helicity,
energy_grid=None,
cs_db=None,
init_pdata_defaults=True):
#: (bool) if it's an electromagnetic particle
self.is_em = abs(pdg_id) == 11 or pdg_id == 22
#: (int) helicity -1, 0, 1 (0 means undefined or average)
self.helicity = helicity
#: (bool) particle is a nucleus (not yet implemented)
self.is_nucleus = False
#: (bool) particle is a hadron
self.is_hadron = False
#: (bool) particle is a lepton
self.is_lepton = False
#: (float) ctau in cm
self.ctau = None
#: (float) mass in GeV
self.mass = None
#: (str) species name in string representation
self.name = None
#: Mass, charge, neutron number
self.A, self.Z, self.N = getAZN(pdg_id)
#: (bool) particle has both, hadron and resonance properties
self.is_mixed = False
#: (bool) if particle has just resonance behavior
self.is_resonance = False
#: (bool) particle is interacting projectile
self.is_projectile = False
#: (bool) particle is stable
self.is_stable = False or pdg_id in config.adv_set['disable_decays']
#: (bool) can_interact
self.can_interact = False
#: (bool) has continuous losses dE/dX defined
self.has_contloss = False
#: (np.array) continuous losses in GeV/(g/cm2)
self.dEdX = None
#: (bool) is a tracking particle
self.is_tracking = False
#: decay channels if any
self.decay_dists = {}
#: (int) Particle Data Group Monte Carlo particle ID
self.pdg_id = (pdg_id, helicity)
#: (int) Unique PDG ID that is different for tracking particles
self.unique_pdg_id = (pdg_id, helicity)
#: (int) MCEq ID
self.mceqidx = -1
#: (float) mixing energy, transition between hadron and
# resonance behavior
self.E_mix = 0
#: (int) energy grid index, where transition between
# hadron and resonance occurs
self.mix_idx = 0
#: (float) critical energy in air at the surface
self.E_crit = 0
# Energy and cross section dependent inits
self.current_cross_sections = None
self._energy_grid = energy_grid
# Variables for hadronic interaction
self.current_hadronic_model = None
self.hadr_secondaries = []
self.hadr_yields = {}
# Variables for decays
self.children = []
self.decay_dists = {}
# A_target
self.A_target = config.A_target
if init_pdata_defaults:
self._init_defaults_from_pythia_database()
if self._energy_grid is not None and cs_db is not None:
#: interaction cross section in 1/cm2
self.set_cs(cs_db)
def _init_defaults_from_pythia_database(self):
"""Init some particle properties from :mod:`particletools.tables`."""
#: (bool) particle is a nucleus (not yet implemented)
self.is_nucleus = _pdata.is_nucleus(self.pdg_id[0])
#: (bool) particle is a hadron
self.is_hadron = _pdata.is_hadron(self.pdg_id[0])
#: (bool) particle is a hadron
self.is_lepton = _pdata.is_lepton(self.pdg_id[0])
#: Mass, charge, neutron number
self.A, self.Z, self.N = getAZN(self.pdg_id[0])
#: (float) ctau in cm
self.ctau = _pdata.ctau(self.pdg_id[0])
#: (float) mass in GeV
self.mass = _pdata.mass(self.pdg_id[0])
#: (str) species name in string representation
name = _pname(self.pdg_id[0]) if self.name is None else self.name
if self.helicity == -1:
name += '_l'
elif self.helicity == +1:
name += '_r'
self.name = name
#: (bool) particle is stable
#: TODO the exclusion of neutron decays is a hotfix
self.is_stable = (not self.ctau < np.inf or
self.pdg_id[0] in config.adv_set['disable_decays'])
def init_custom_particle_data(self, name, pdg_id, helicity, ctau, mass,
**kwargs):
"""Add custom particle type. (Incomplete and not debugged)"""
#: (int) Particle Data Group Monte Carlo particle ID
self.pdg_id = (pdg_id, helicity)
#: (bool) if it's an electromagnetic particle
self.is_em = kwargs.pop('is_em', abs(pdg_id) == 11 or pdg_id == 22)
#: (bool) particle is a nucleus (not yet implemented)
self.is_nucleus = kwargs.pop('is_nucleus',
_pdata.is_nucleus(self.pdg_id[0]))
#: (bool) particle is a hadron
self.is_hadron = kwargs.pop('is_hadron',
_pdata.is_hadron(self.pdg_id[0]))
#: (bool) particle is a hadron
self.is_lepton = kwargs.pop('is_lepton',
_pdata.is_lepton(self.pdg_id[0]))
#: Mass, charge, neutron number
self.A, self.Z, self.N = getAZN(self.pdg_id[0])
#: (float) ctau in cm
self.ctau = ctau
#: (float) mass in GeV
self.mass = mass
#: (str) species name in string representation
self.name = name
#: (bool) particle is stable
self.is_stable = not self.ctau < np.inf
def set_cs(self, cs_db):
"""Set cross section adn recalculate the dependent variables"""
info(11, 'Obtaining cross sections for', self.pdg_id)
self.current_cross_sections = cs_db.iam
self.cs = cs_db[self.pdg_id[0]]
if sum(self.cs) > 0:
self.can_interact = True
else:
self.can_interact = False
self._critical_energy()
self._calculate_mixing_energy()
def set_hadronic_channels(self, hadronic_db, pmanager):
"""Changes the hadronic interaction model.
Replaces indexing of the yield dictionary from PDG IDs
with references from partilcle manager.
"""
self.current_hadronic_model = hadronic_db.iam
# Collect MCEqParticle references to children
# instead of PDG ID as index
if self.pdg_id in hadronic_db.parents and not self.is_tracking:
self.is_projectile = True
self.hadr_secondaries = [
pmanager.pdg2pref[pid]
for pid in hadronic_db.relations[self.pdg_id]
]
self.hadr_yields = {}
for s in self.hadr_secondaries:
self.hadr_yields[s] = hadronic_db.get_matrix(
self.pdg_id, s.pdg_id)
else:
self.is_projectile = False
self.hadr_secondaries = []
self.hadr_yields = {}
def add_hadronic_production_channel(self, child, int_matrix):
"""Add a new particle that is produced in hadronic interactions.
The int_matrix is expected to be in the correct shape and scale
as the other interaction (dN/dE(i,j)) matrices. Energy conservation
is not checked.
"""
if not self.is_projectile:
raise Exception('The particle should be a projectile.')
if child in self.hadr_secondaries:
info(1, 'Child {0} has been already added.'.format(child.name))
return
self.hadr_secondaries.append(child)
self.hadr_yields[child] = int_matrix
def add_decay_channel(self, child, dec_matrix, force=False):
"""Add a decay channel.
The branching ratios are not renormalized and one needs to take care
of this externally.
"""
if self.is_stable:
raise Exception('Cannot add decay channel to stable particle.')
if child in self.children and not force:
info(1, 'Child {0} has been already added.'.format(child.name))
return
elif child in self.children and force:
info(1, 'Overwriting decay matrix of child {0}.'.format(child.name))
self.decay_dists[child] = dec_matrix
return
self.children.append(child)
self.decay_dists[child] = dec_matrix
def set_decay_channels(self, decay_db, pmanager):
"""Populates decay channel and energy distributions"""
if self.is_stable or self.is_tracking:
# Variables for decays
self.children = []
self.decay_dists = {}
return
if self.pdg_id not in decay_db.parents:
raise Exception('Unstable particle without decay distribution:',
self.pdg_id, self.name)
self.children = []
self.children = [pmanager[d] for d in decay_db.children(self.pdg_id)]
self.decay_dists = {}
for c in self.children:
self.decay_dists[c] = decay_db.get_matrix(self.pdg_id, c.pdg_id)
def track_decays(self, tracking_particle):
children_d = {}
for c in self.children:
children_d[c.pdg_id] = c
if tracking_particle.pdg_id not in list(children_d):
info(
17, 'Parent particle {0} does not decay into {1}'.format(
self.name, tracking_particle.name))
return False
# Copy the decay distribution from original PDG
self.children.append(tracking_particle)
self.decay_dists[tracking_particle] = self.decay_dists[children_d[
tracking_particle.pdg_id]]
return True
def track_interactions(self, tracking_particle):
secondaries_d = {}
for s in self.hadr_secondaries:
secondaries_d[s.pdg_id] = s
if tracking_particle.pdg_id not in list(secondaries_d):
info(
17, 'Parent particle {0} does not produce {1} at the vertex'.
format(self.name, tracking_particle.name))
return False
# Copy the decay distribution from original PDG
self.hadr_secondaries.append(tracking_particle)
self.hadr_yields[tracking_particle] = self.hadr_yields[secondaries_d[
tracking_particle.pdg_id]]
return True
def is_secondary(self, particle_ref):
"""`True` if this projectile and produces particle `particle_ref`."""
if not isinstance(particle_ref, self.__class__):
raise Exception('Argument not of MCEqParticle type.')
return particle_ref in self.hadr_secondaries
def is_child(self, particle_ref):
"""`True` if this particle decays into `particle_ref`."""
if not isinstance(particle_ref, self.__class__):
raise Exception('Argument not of MCEqParticle type.')
return particle_ref in self.children
@property
def hadridx(self):
"""Returns index range where particle behaves as hadron.
Returns:
:func:`tuple` (int,int): range on energy grid
"""
return (self.mix_idx, self._energy_grid.d)
@property
def residx(self):
"""Returns index range where particle behaves as resonance.
Returns:
:func:`tuple` (int,int): range on energy grid
"""
return (0, self.mix_idx)
@property
def lidx(self):
"""Returns lower index of particle range in state vector.
Returns:
(int): lower index in state vector :attr:`MCEqRun.phi`
"""
return self.mceqidx * self._energy_grid.d
@property
def uidx(self):
"""Returns upper index of particle range in state vector.
Returns:
(int): upper index in state vector :attr:`MCEqRun.phi`
"""
return (self.mceqidx + 1) * self._energy_grid.d
def inverse_decay_length(self, cut=True):
r"""Returns inverse decay length (or infinity (np.inf), if
particle is stable), where the air density :math:`\rho` is
factorized out.
Args:
E (float) : energy in laboratory system in GeV
cut (bool): set to zero in 'resonance' regime
Returns:
(float): :math:`\frac{\rho}{\lambda_{dec}}` in 1/cm
"""
try:
dlen = self.mass / self.ctau / (self._energy_grid.c + self.mass)
if cut:
dlen[0:self.mix_idx] = 0.
# Correction for bin average, since dec. length is a steep falling
# function. This factor averages the value over bin length for
# 10 bins per decade.
# return 0.989 * dlen
return dlen
except ZeroDivisionError:
return np.ones_like(self._energy_grid.d) * np.inf
def inel_cross_section(self, mbarn=False):
"""Returns inelastic cross section.
Args:
mbarn (bool) : if True cross section in mb otherwise in cm**2
Returns:
(float): :math:`\\sigma_{\\rm inel}` in mb or cm**2
"""
#: unit - :math:`\text{GeV} \cdot \text{fm}`
GeVfm = 0.19732696312541853
#: unit - :math:`\text{GeV} \cdot \text{cm}`
GeVcm = GeVfm * 1e-13
#: unit - :math:`\text{GeV}^2 \cdot \text{mbarn}`
GeV2mbarn = 10.0 * GeVfm**2
#: unit conversion - :math:`\text{mbarn} \to \text{cm}^2`
mbarn2cm2 = GeV2mbarn / GeVcm**2
if mbarn:
return mbarn2cm2 * self.cs
return self.cs
def inverse_interaction_length(self):
"""Returns inverse interaction length for A_target given by config.
Returns:
(float): :math:`\\frac{1}{\\lambda_{int}}` in cm**2/g
"""
m_target = self.A_target * 1.672621 * 1e-24 # <A> * m_proton [g]
return self.cs / m_target
def _assign_hadr_dist_idx(self, child, projidx, chidx, cmat):
"""Copies a subset, defined between indices ``projidx`` and ``chiidx``
from the ``hadr_yields`` into ``cmat``
Args:
child (int): PDG ID of final state child/secondary particle
projidx (int,int): tuple containing index range relative
to the projectile's energy grid
dtridx (int,int): tuple containing index range relative
to the child's energy grid
cmat (numpy.array): array reference to the interaction matrix
"""
cmat[chidx[0]:chidx[1], projidx[0]:projidx[1]] = self.hadr_yields[
child][chidx[0]:chidx[1], projidx[0]:projidx[1]]
def _assign_decay_idx(self, child, projidx, chidx, cmat):
"""Copies a subset, defined between indices ``projidx`` and ``chiidx``
from the ``hadr_yields`` into ``cmat``
Args:
child (int): PDG ID of final state child/secondary particle
projidx (int,int): tuple containing index range relative
to the projectile's energy grid
dtridx (int,int): tuple containing index range relative
to the child's energy grid
cmat (numpy.array): array reference to the interaction matrix
"""
cmat[chidx[0]:chidx[1], projidx[0]:projidx[1]] = self.decay_dists[
child][chidx[0]:chidx[1], projidx[0]:projidx[1]]
def dN_dxlab(self, kin_energy, sec_pdg, verbose=True, **kwargs):
r"""Returns :math:`dN/dx_{\rm Lab}` for interaction energy close
to ``kin_energy`` for hadron-air collisions.
The function respects modifications applied via :func:`_set_mod_pprod`.
Args:
kin_energy (float): approximate interaction kin_energy
prim_pdg (int): PDG ID of projectile
sec_pdg (int): PDG ID of secondary particle
verbose (bool): print out the closest enerkin_energygy
Returns:
(numpy.array, numpy.array): :math:`x_{\rm Lab}`, :math:`dN/dx_{\rm Lab}`
"""
eidx = (np.abs(self._energy_grid.c - kin_energy)).argmin()
en = self._energy_grid.c[eidx]
info(10, 'Nearest energy, index: ', en, eidx, condition=verbose)
m = self.hadr_yields[sec_pdg]
xl_grid = (self._energy_grid.c[:eidx + 1]) / en
xl_dist = en * xl_grid * m[:eidx +
1, eidx] / self._energy_grid.w[:eidx + 1]
return xl_grid, xl_dist
def dNdec_dxlab(self, kin_energy, sec_pdg, verbose=True, **kwargs):
r"""Returns :math:`dN/dx_{\rm Lab}` for interaction energy close
to ``kin_energy`` for hadron-air collisions.
The function respects modifications applied via :func:`_set_mod_pprod`.
Args:
kin_energy (float): approximate interaction energy
prim_pdg (int): PDG ID of projectile
sec_pdg (int): PDG ID of secondary particle
verbose (bool): print out the closest energy
Returns:
(numpy.array, numpy.array): :math:`x_{\rm Lab}`, :math:`dN/dx_{\rm Lab}`
"""
eidx = (np.abs(self._energy_grid.c - kin_energy)).argmin()
en = self._energy_grid.c[eidx]
info(10, 'Nearest energy, index: ', en, eidx, condition=verbose)
m = self.decay_dists[sec_pdg]
xl_grid = (self._energy_grid.c[:eidx + 1]) / en
xl_dist = en * xl_grid * m[:eidx +
1, eidx] / self._energy_grid.w[:eidx + 1]
return xl_grid, xl_dist
def dN_dEkin(self, kin_energy, sec_pdg, verbose=True, **kwargs):
r"""Returns :math:`dN/dE_{\rm Kin}` in lab frame for an interaction energy
close to ``kin_energy`` (total) for hadron-air collisions.
The function respects modifications applied via :func:`_set_mod_pprod`.
Args:
kin_energy (float): approximate interaction energy
prim_pdg (int): PDG ID of projectile
sec_pdg (int): PDG ID of secondary particle
verbose (bool): print out the closest energy
Returns:
(numpy.array, numpy.array): :math:`x_{\rm Lab}`, :math:`dN/dx_{\rm Lab}`
"""
eidx = (np.abs(self._energy_grid.c - kin_energy)).argmin()
en = self._energy_grid.c[eidx]
info(10, 'Nearest energy, index: ', en, eidx, condition=verbose)
m = self.hadr_yields[sec_pdg]
ekin_grid = self._energy_grid.c
elab_dist = m[:eidx + 1, eidx] / self._energy_grid.w[eidx]
return ekin_grid[:eidx + 1], elab_dist
def dN_dxf(self,
energy,
prim_pdg,
sec_pdg,
pos_only=True,
verbose=True,
**kwargs):
r"""Returns :math:`dN/dx_{\rm F}` in c.m. for interaction energy close
to ``energy`` (lab. not kinetic) for hadron-air collisions.
The function respects modifications applied via :func:`_set_mod_pprod`.
Args:
energy (float): approximate interaction lab. energy
prim_pdg (int): PDG ID of projectile
sec_pdg (int): PDG ID of secondary particle
verbose (bool): print out the closest energy
Returns:
(numpy.array, numpy.array): :math:`x_{\rm F}`, :math:`dN/dx_{\rm F}`
"""
if not hasattr(self, '_ptav_sib23c'):
# Load spline of average pt distribution as a funtion of log(E_lab) from sib23c
import pickle
from os.path import join
self._ptav_sib23c = pickle.load(
open(join(config.data_dir, 'sibyll23c_aux.ppd'), 'rb'))[0]
def xF(xL, Elab, ppdg):
m = {2212: 0.938, 211: 0.139, 321: 0.493}
mp = m[2212]
Ecm = np.sqrt(2 * Elab * mp + 2 * mp**2)
Esec = xL * Elab
betacm = np.sqrt((Elab - mp) / (Elab + mp))
gammacm = (Elab + mp) / Ecm
avpt = self._ptav_sib23c[ppdg](
np.log(np.sqrt(Elab**2) - m[np.abs(ppdg)]**2))
xf = 2 * (-betacm * gammacm * Esec + gammacm *
np.sqrt(Esec**2 - m[np.abs(ppdg)]**2 - avpt**2)) / Ecm
dxl_dxf = 1. / (
2 *
(-betacm * gammacm * Elab + xL * Elab**2 * gammacm / np.sqrt(
(xL * Elab)**2 - m[np.abs(ppdg)]**2 - avpt**2)) / Ecm)
return xf, dxl_dxf
eidx = (np.abs(self._energy_grid.c + self.mass - energy)).argmin()
en = self._energy_grid.c[eidx] + self.mass
info(2, 'Nearest energy, index: ', en, eidx, condition=verbose)
m = self.hadr_yields[sec_pdg]
xl_grid = (self._energy_grid.c[:eidx + 1] + self.mass) / en
xl_dist = xl_grid * en * m[:eidx + 1, eidx] / np.diag(
self._energy_grid.w)[:eidx + 1]
xf_grid, dxl_dxf = xF(xl_grid, en, sec_pdg)
xf_dist = xl_dist * dxl_dxf
if pos_only:
xf_dist = xf_dist[xf_grid >= 0]
xf_grid = xf_grid[xf_grid >= 0]
return xf_grid, xf_dist
return xf_grid, xf_dist
def _critical_energy(self):
"""Returns critical energy where decay and interaction
are balanced.
Approximate value in Air.
Returns:
(float): :math:`\\frac{m\\ 6.4 \\text{km}}{c\\tau}` in GeV
"""
if self.is_stable or self.ctau <= 0.:
self.E_crit = np.inf
else:
self.E_crit = self.mass * 6.4e5 / self.ctau
def _calculate_mixing_energy(self):
"""Calculates interaction/decay length in Air and decides if
the particle has resonance and/or hadron behavior.
Class attributes :attr:`is_mixed`, :attr:`E_mix`, :attr:`mix_idx`,
:attr:`is_resonance` are calculated here. If the option `no_mixing`
is set in config.adv_config particle is forced to be a resonance
or hadron behavior.
Args:
e_grid (numpy.array): energy grid of size :attr:`d`
max_density (float): maximum density on the integration path (largest
decay length)
"""
cross_over = config.hybrid_crossover
max_density = config.max_density
d = self._energy_grid.d
inv_intlen = self.inverse_interaction_length()
inv_declen = self.inverse_decay_length()
# If particle is stable, no "mixing" necessary
if (not np.any(np.nan_to_num(inv_declen) > 0.)
or abs(self.pdg_id[0]) in config.adv_set["exclude_from_mixing"]
or config.adv_set['no_mixing']
or self.pdg_id[0] in config.adv_set['disable_decays']):
self.mix_idx = 0
self.is_mixed = False
self.is_resonance = False
return
# If particle is forced to be a "resonance"
if (np.abs(self.pdg_id[0]) in config.adv_set["force_resonance"]):
self.mix_idx = d - 1
self.E_mix = self._energy_grid.c[self.mix_idx]
self.is_mixed = False
self.is_resonance = True
# Particle can interact and decay
elif self.can_interact and not self.is_stable:
# This is lambda_dec / lambda_int
threshold = np.zeros_like(inv_intlen)
mask = inv_declen != 0.
threshold[mask] = inv_intlen[mask] * max_density / inv_declen[mask]
del mask
self.mix_idx = np.where(threshold > cross_over)[0][0]
self.E_mix = self._energy_grid.c[self.mix_idx]
self.is_mixed = True
self.is_resonance = False
# These particles don't interact but can decay (e.g. tau leptons)
elif not self.can_interact and not self.is_stable:
mask = inv_declen != 0.
self.mix_idx = np.where(
max_density / inv_declen > config.dXmax)[0][0]
self.E_mix = self._energy_grid.c[self.mix_idx]
self.is_mixed = True
self.is_resonance = False
# Particle is stable but that should be handled above
else:
print(self.name, "This case shouldn't occur.")
threshold = np.inf
self.mix_idx = 0
self.is_mixed = False
self.is_resonance = False
def __eq__(self, other):
"""Checks name for equality"""
if isinstance(other, MCEqParticle):
return self.name == other.name
else:
return NotImplemented
def __neq__(self, other):
"""Checks name for equality"""
if isinstance(other, MCEqParticle):
return self.name != other.name
else:
return NotImplemented
def __hash__(self):
"""Instruction for comuting the hash"""
return hash(self.name)
def __repr__(self):
a_string = ("""
{0}:
is_hadron : {1}
is_lepton : {2}
is_nucleus : {3}
is_stable : {4}
is_mixed : {5}
is_resonance : {6}
is_tracking : {7}
is_projectile : {8}
mceqidx : {9}
E_mix : {10:2.1e} GeV\n""").format(
self.name, self.is_hadron, self.is_lepton, self.is_nucleus,
self.is_stable, self.is_mixed, self.is_resonance, self.is_tracking,
self.is_projectile, self.mceqidx, self.E_mix)
return a_string
class ParticleManager(object):
"""Database for objects of :class:`MCEqParticle`.
Authors:
Anatoli Fedynitch (DESY)
Jonas Heinze (DESY)
"""
def __init__(self, pdg_id_list, energy_grid, cs_db, mod_table=None):
# (dict) Dimension of primary grid
self._energy_grid = energy_grid
# Particle index shortcuts
#: (dict) Converts PDG ID to index in state vector
self.pdg2mceqidx = {}
#: (dict) Converts particle name to index in state vector
self.pname2mceqidx = {}
#: (dict) Converts PDG ID to reference of
# :class:`particlemanager.MCEqParticle`
self.pdg2pref = {}
#: (dict) Converts particle name to reference of
#: class:`particlemanager.MCEqParticle`
self.pname2pref = {}
#: (dict) Converts MCEq index to reference of
#: class:`particlemanager.MCEqParticle`
self.mceqidx2pref = {}
#: (dict) Converts index in state vector to PDG ID
self.mceqidx2pdg = {}
#: (dict) Converts index in state vector to reference
# of :class:`particlemanager.MCEqParticle`
self.mceqidx2pname = {}
# Save setup of tracked particles to reapply the relations
# when models change
self.tracking_relations = {}
#: (int) Total number of species
self.nspec = 0
# save currently applied cross section model
self.current_cross_sections = None
# save currently applied hadronic model
self.current_hadronic_model = None
# Cross section database
self._cs_db = cs_db
# Dictionary to save te tracking particle config
self.tracking_relations = []
# Save the tracking relations requested by default tracking
self._tracking_requested = []
self._init_categories(particle_pdg_list=pdg_id_list)
self.print_particle_tables(10)
def set_cross_sections_db(self, cs_db):
"""Sets the inelastic cross section to each interacting particle.
This applies to most of the hadrons and does not imply that the
particle becomes a projectile. parents need in addition defined
hadronic channels.
"""
info(5, 'Setting cross section particle variables.')
if self.current_cross_sections == cs_db.iam:
info(10, 'Same cross section model not applied to particles.')
return
for p in self.cascade_particles:
p.set_cs(cs_db)
self.current_cross_sections = cs_db.iam
self._update_particle_tables()
def set_decay_channels(self, decay_db):
"""Attaches the references to the decay yield tables to
each unstable particle"""
info(5, 'Setting decay info for particles.')
for p in self.all_particles:
p.set_decay_channels(decay_db, self)
self._restore_tracking_setup()
self._update_particle_tables()
def set_interaction_model(self,
cs_db,
hadronic_db,
updated_parent_list=None,
force=False):
"""Attaches the references to the hadronic yield tables to
each projectile particle"""
info(5, 'Setting hadronic secondaries for particles.')
if (self.current_hadronic_model == hadronic_db.iam and
not force and updated_parent_list is None):
info(10, 'Same hadronic model not applied to particles.')
return
if updated_parent_list is not None:
self._init_categories(updated_parent_list)
for p in self.cascade_particles:
p.set_cs(cs_db)
p.set_hadronic_channels(hadronic_db, self)
self.current_hadronic_model = hadronic_db.iam
self._update_particle_tables()
def set_continuous_losses(self, contloss_db):
"""Set continuous losses terms to particles with ionization
and radiation losses."""
for p in self.cascade_particles:
if p.pdg_id in contloss_db:
p.has_contloss = True
p.dEdX = contloss_db[p.pdg_id]
def add_tracking_particle(self,
parent_list,
child_pdg,
alias_name,
from_interactions=False):
"""Allows tracking decay and particle production chains.
Replaces previous ``obs_particle`` function that allowed to track
only leptons from decays certain particles. This present feature
removes the special PDG IDs 71XX, 72XX, etc and allows to define
any channel like::
$ particleManagerInstance.add_tracking_particle([211], 14, 'pi_numu')
This will store muon neutrinos from pion decays under the alias 'pi_numu'.
Multiple parents are allowed::
$ particleManagerInstance.add_tracking_particle(
[411, 421, 431], 14, 'D_numu')
Args:
alias (str): Name alias under which the result is accessible in get_solution
parents (list): list of parent particle PDG ID's
child (int): Child particle
from_interactions (bool): track particles from interactions
"""
from copy import copy
info(10, 'requested for', parent_list, child_pdg, alias_name)
for p in parent_list:
if (p, child_pdg, alias_name,
from_interactions) in self._tracking_requested:
continue
self._tracking_requested.append(
(p, child_pdg, alias_name, from_interactions))
# Check if tracking particle with the alias not yet defined
# and create new one of necessary
if alias_name in self.pname2pref:
info(15, 'Re-using tracking particle', alias_name)
tracking_particle = self.pname2pref[alias_name]
elif child_pdg not in self.pdg2pref:
info(15, 'Tracking child not a available',
'for this interaction model, skipping.')
return
else:
info(10, 'Creating new tracking particle')
# Copy all preferences of the original particle
tracking_particle = copy(self.pdg2pref[child_pdg])
tracking_particle.is_tracking = True
tracking_particle.name = alias_name
# Find a unique PDG ID for the new tracking particle
# print child_pdg[0], int(copysign(1000000, child_pdg[0]))
unique_child_pdg = (child_pdg[0] +
int(copysign(1000000, child_pdg[0])),
tracking_particle.helicity)
for i in range(100):
if unique_child_pdg not in list(self.pdg2pref):
break
info(
20, '{0}: trying to find unique_pdg ({1}) for {2}'.format(
i, tracking_particle.name, unique_child_pdg))
unique_child_pdg = (unique_child_pdg[0] +
int(copysign(10000, child_pdg[0])),
tracking_particle.helicity)
tracking_particle.unique_pdg_id = unique_child_pdg
# Track if attempt to add the tracking particle succeeded at least once
track_success = False
# Include antiparticle
for parent_pdg in list(
set(parent_list + [(-p, h) for (p, h) in parent_list])):
if parent_pdg not in self.pdg2pref:
info(15,
'Parent particle {0} does not exist.'.format(parent_pdg))
continue
if (parent_pdg, child_pdg, alias_name,
from_interactions) in self.tracking_relations:
info(
20, 'Tracking of {0} from {1} already activated.'.format(
tracking_particle.name,
self.pdg2pref[parent_pdg].name))
continue
if not from_interactions:
track_method = self.pdg2pref[parent_pdg].track_decays
else:
track_method = self.pdg2pref[parent_pdg].track_interactions
# Check if the tracking is successful. If not the particle is not
# a child of the parent particle
if track_method(tracking_particle):
info(
15, 'Parent particle {0} tracking scheduled.'.format(
parent_pdg))
self.tracking_relations.append(
(parent_pdg, child_pdg, alias_name, from_interactions))
track_success = True
if track_success and tracking_particle.name not in list(
self.pname2pref):
tracking_particle.mceqidx = np.max(list(self.mceqidx2pref)) + 1
self.all_particles.append(tracking_particle)
self.cascade_particles.append(tracking_particle)
self._update_particle_tables()
info(
10, 'tracking particle {0} successfully added.'.format(
tracking_particle.name))
def track_leptons_from(self,
parent_pdg_list,
prefix,
exclude_em=True,
from_interactions=False,
use_helicities=False):
"""Adds tracking particles for all leptons coming from decays of parents
in `parent_pdg_list`.
"""
leptons = [
p for p in self.all_particles if p.is_lepton
and not (p.is_em == exclude_em) and not p.is_tracking
]
for lepton in leptons:
if not use_helicities and lepton.pdg_id[1] != 0:
continue
self.add_tracking_particle(parent_pdg_list, lepton.pdg_id,
prefix + lepton.name, from_interactions)
def _init_categories(self, particle_pdg_list):
"""Determines the list of particles for calculation and
returns lists of instances of :class:`data.MCEqParticle` .
The particles which enter this list are those, which have a
defined index in the SIBYLL 2.3 interaction model. Included are
most relevant baryons and mesons and some of their high mass states.
More details about the particles which enter the calculation can
be found in :mod:`particletools`.
Returns:
(tuple of lists of :class:`data.MCEqParticle`): (all particles,
cascade particles, resonances)
"""
from MCEq.particlemanager import MCEqParticle
info(5, "Generating particle list.")
if particle_pdg_list is not None:
particles = particle_pdg_list
else:
from particletools.tables import SibyllParticleTable
modtab = SibyllParticleTable()
particles = modtab.baryons + modtab.mesons + modtab.leptons
# Remove duplicates
particles = sorted(list(set(particles)))
# Initialize particle objects
particle_list = [
MCEqParticle(pdg, hel, self._energy_grid, self._cs_db)
for pdg, hel in particles
]
# Sort by critical energy (= int_len ~== dec_length ~ int_cs/tau)
particle_list.sort(key=lambda x: x.E_crit, reverse=False)
# Cascade particles will "live" on the grid and have an mceqidx assigned
self.cascade_particles = [
p for p in particle_list if not p.is_resonance
]
self.cascade_particles = sorted(self.cascade_particles,
key=lambda p: abs(p.pdg_id[0]))
# These particles will only exist implicitely and integated out
self.resonances = [p for p in particle_list if p.is_resonance]
# Assign an mceqidx (position in state vector) to each explicit particle
# Resonances will kepp the default mceqidx = -1
for mceqidx, h in enumerate(self.cascade_particles):
h.mceqidx = mceqidx
self.all_particles = self.cascade_particles + self.resonances
self._update_particle_tables()
def add_new_particle(self, new_mceq_particle):
if new_mceq_particle in self.all_particles:
info(0, 'Particle {0}/{1} has already been added. Use it.'.format(
new_mceq_particle.name, new_mceq_particle.pdg_id
))
return
if not new_mceq_particle.is_resonance:
info(2, 'New particle {0}/{1} is not a resonance.'.format(
new_mceq_particle.name, new_mceq_particle.pdg_id
))
new_mceq_particle.mceqidx = len(self.cascade_particles)
self.cascade_particles.append(new_mceq_particle)
else:
info(2, 'New particle {0}/{1} is a resonance.'.format(
new_mceq_particle.name, new_mceq_particle.pdg_id
))
self.resonances.append(new_mceq_particle)
self.all_particles = self.cascade_particles + self.resonances
self._update_particle_tables()
def _update_particle_tables(self):
"""Update internal mapping tables after changes to the particle
list occur."""
self.n_cparticles = len(self.cascade_particles)
self.dim = self._energy_grid.d
self.dim_states = self._energy_grid.d * self.n_cparticles
# Clean all dictionaries
[
d.clear() for d in [
self.pdg2mceqidx, self.pname2mceqidx, self.mceqidx2pdg,
self.mceqidx2pname, self.mceqidx2pref, self.pdg2pref,
self.pname2pref
]
]
for p in self.all_particles:
self.pdg2mceqidx[p.unique_pdg_id] = p.mceqidx
self.pname2mceqidx[p.name] = p.mceqidx
self.mceqidx2pdg[p.mceqidx] = p.unique_pdg_id
self.mceqidx2pname[p.mceqidx] = p.name
self.mceqidx2pref[p.mceqidx] = p
self.pdg2pref[p.unique_pdg_id] = p
self.pname2pref[p.name] = p
self.print_particle_tables(20)
def _restore_tracking_setup(self):
"""Restores the setup of tracking particles after model changes."""
info(10, 'Restoring tracking particle setup')
if not self.tracking_relations and config.enable_default_tracking:
self._init_default_tracking()
return
# Clear tracking_relations for this initialization
self.tracking_relations = []
for pid, cid, alias, int_dec in self._tracking_requested:
if pid not in self.pdg2pref:
info(15, 'Can not restore {0}, since not in particle list.')
continue
self.add_tracking_particle([pid], cid, alias, int_dec)
def _init_default_tracking(self):
"""Add default tracking particles for leptons from pi, K, and mu"""
# Init default tracking particles
info(1, 'Initializing default tracking categories (pi, K, mu)')
self._tracking_requested_by_default = []
for parents, prefix, with_helicity in [([(211, 0)], 'pi_', True),
([(321, 0)], 'k_', True),
([(13, -1),
(13, 1)], 'mulr_', False),
([(13, 0)], 'mu_h0_', False),
([(13, -1), (13, 0),
(13, 1)], 'mu_', False),
([(310, 0),
(130, 0)], 'K0_', False)]:
self.track_leptons_from(parents,
prefix,
exclude_em=True,
use_helicities=with_helicity)
# Track prompt leptons
self.track_leptons_from([
p.pdg_id for p in self.all_particles if p.ctau < config.prompt_ctau
],
'prcas_',
exclude_em=True,
use_helicities=False)
# Track leptons from interaction vertices (also prompt)
self.track_leptons_from(
[p.pdg_id for p in self.all_particles if p.is_projectile],
'prres_',
exclude_em=True,
from_interactions=True,
use_helicities=False)
self.track_leptons_from(
[p.pdg_id for p in self.all_particles if p.is_em],
'em_',
exclude_em=True,
from_interactions=True,
use_helicities=False)
def __contains__(self, pdg_id_or_name):
"""Defines the `in` operator to look for particles"""
if isinstance(pdg_id_or_name, six.integer_types):
pdg_id_or_name = (pdg_id_or_name, 0)
elif isinstance(pdg_id_or_name, six.string_types):
pdg_id_or_name = (_pdata.pdg_id(pdg_id_or_name), 0)
return pdg_id_or_name in list(self.pdg2pref)
def __getitem__(self, pdg_id_or_name):
"""Returns reference to particle object."""
if isinstance(pdg_id_or_name, tuple):
return self.pdg2pref[pdg_id_or_name]
elif isinstance(pdg_id_or_name, six.integer_types):
return self.pdg2pref[(pdg_id_or_name, 0)]
else:
return self.pdg2pref[(_pdata.pdg_id(pdg_id_or_name), 0)]
def keys(self):
"""Returns pdg_ids of all particles"""
return [p.pdg_id for p in self.all_particles]
def __repr__(self):
str_out = ""
ident = 3 * ' '
for s in self.all_particles:
str_out += s.name + '\n' + ident
str_out += 'PDG id : ' + str(s.pdg_id) + '\n' + ident
str_out += 'MCEq idx : ' + str(s.mceqidx) + '\n\n'
return str_out
def print_particle_tables(self, min_dbg_lev=2):
info(min_dbg_lev, "Hadrons and stable particles:", no_caller=True)
print_in_rows(min_dbg_lev, [
p.name for p in self.all_particles
if p.is_hadron and not p.is_resonance and not p.is_mixed
])
info(min_dbg_lev, "\nMixed:", no_caller=True)
print_in_rows(min_dbg_lev,
[p.name for p in self.all_particles if p.is_mixed])
info(min_dbg_lev, "\nResonances:", no_caller=True)
print_in_rows(min_dbg_lev,
[p.name for p in self.all_particles if p.is_resonance])
info(min_dbg_lev, "\nLeptons:", no_caller=True)
print_in_rows(min_dbg_lev, [
p.name
for p in self.all_particles if p.is_lepton and not p.is_tracking
])
info(min_dbg_lev, "\nTracking:", no_caller=True)
print_in_rows(min_dbg_lev,
[p.name for p in self.all_particles if p.is_tracking])
info(min_dbg_lev,
"\nTotal number of species:",
self.n_cparticles,
no_caller=True)
# list particle indices
if False:
info(10, "Particle matrix indices:", no_caller=True)
some_index = 0
for p in self.cascade_particles:
for i in range(self._energy_grid.d):
info(10, p.name + '_' + str(i), some_index, no_caller=True)
some_index += 1
|
{
"content_hash": "584344345390dfdbe8e2171ea6823306",
"timestamp": "",
"source": "github",
"line_count": 1170,
"max_line_length": 91,
"avg_line_length": 39.30940170940171,
"alnum_prop": 0.5606627239519917,
"repo_name": "afedynitch/MCEq",
"id": "5f4437b97a79e1d400351ee08449bd0440d6b771",
"size": "45993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MCEq/particlemanager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "112130"
},
{
"name": "Makefile",
"bytes": "438"
},
{
"name": "Python",
"bytes": "270214"
}
],
"symlink_target": ""
}
|
"""The Amplification problem class."""
from typing import Optional, Callable, Any, Union, List
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library import GroverOperator
from qiskit.quantum_info import Statevector
class AmplificationProblem:
"""The amplification problem is the input to amplitude amplification algorithms, like Grover.
This class contains all problem-specific information required to run an amplitude amplification
algorithm. It minimally contains the Grover operator. It can further hold some post processing
on the optimal bitstring.
"""
def __init__(
self,
oracle: Union[QuantumCircuit, Statevector],
state_preparation: Optional[QuantumCircuit] = None,
grover_operator: Optional[QuantumCircuit] = None,
post_processing: Optional[Callable[[str], Any]] = None,
objective_qubits: Optional[Union[int, List[int]]] = None,
is_good_state: Optional[
Union[Callable[[str], bool], List[int], List[str], Statevector]
] = None,
) -> None:
r"""
Args:
oracle: The oracle reflecting about the bad states.
state_preparation: A circuit preparing the input state, referred to as
:math:`\mathcal{A}`. If None, a layer of Hadamard gates is used.
grover_operator: The Grover operator :math:`\mathcal{Q}` used as unitary in the
phase estimation circuit. If None, this operator is constructed from the ``oracle``
and ``state_preparation``.
post_processing: A mapping applied to the most likely bitstring.
objective_qubits: If set, specifies the indices of the qubits that should be measured.
If None, all qubits will be measured. The ``is_good_state`` function will be
applied on the measurement outcome of these qubits.
is_good_state: A function to check whether a string represents a good state. By default
if the ``oracle`` argument has an ``evaluate_bitstring`` method (currently only
provided by the :class:`~qiskit.circuit.library.PhaseOracle` class) this will be
used, otherwise this kwarg is required and **must** be specified.
"""
self._oracle = oracle
self._state_preparation = state_preparation
self._grover_operator = grover_operator
self._post_processing = post_processing
self._objective_qubits = objective_qubits
if is_good_state is not None:
self._is_good_state = is_good_state
elif hasattr(oracle, "evaluate_bitstring"):
self._is_good_state = oracle.evaluate_bitstring
else:
self._is_good_state = None
@property
def oracle(self) -> Union[QuantumCircuit, Statevector]:
"""Return the oracle.
Returns:
The oracle.
"""
return self._oracle
@oracle.setter
def oracle(self, oracle: Union[QuantumCircuit, Statevector]) -> None:
"""Set the oracle.
Args:
oracle: The oracle.
"""
self._oracle = oracle
@property
def state_preparation(self) -> QuantumCircuit:
r"""Get the state preparation operator :math:`\mathcal{A}`.
Returns:
The :math:`\mathcal{A}` operator as `QuantumCircuit`.
"""
if self._state_preparation is None:
state_preparation = QuantumCircuit(self.oracle.num_qubits)
state_preparation.h(state_preparation.qubits)
return state_preparation
return self._state_preparation
@state_preparation.setter
def state_preparation(self, state_preparation: Optional[QuantumCircuit]) -> None:
r"""Set the :math:`\mathcal{A}` operator. If None, a layer of Hadamard gates is used.
Args:
state_preparation: The new :math:`\mathcal{A}` operator or None.
"""
self._state_preparation = state_preparation
@property
def post_processing(self) -> Callable[[str], Any]:
"""Apply post processing to the input value.
Returns:
A handle to the post processing function. Acts as identity by default.
"""
if self._post_processing is None:
return lambda x: x
return self._post_processing
@post_processing.setter
def post_processing(self, post_processing: Callable[[str], Any]) -> None:
"""Set the post processing function.
Args:
post_processing: A handle to the post processing function.
"""
self._post_processing = post_processing
@property
def objective_qubits(self) -> List[int]:
"""The indices of the objective qubits.
Returns:
The indices of the objective qubits as list of integers.
"""
if self._objective_qubits is None:
return list(range(self.oracle.num_qubits))
if isinstance(self._objective_qubits, int):
return [self._objective_qubits]
return self._objective_qubits
@objective_qubits.setter
def objective_qubits(self, objective_qubits: Optional[Union[int, List[int]]]) -> None:
"""Set the objective qubits.
Args:
objective_qubits: The indices of the qubits that should be measured.
If None, all qubits will be measured. The ``is_good_state`` function will be
applied on the measurement outcome of these qubits.
"""
self._objective_qubits = objective_qubits
@property
def is_good_state(self) -> Callable[[str], bool]:
"""Check whether a provided bitstring is a good state or not.
Returns:
A callable that takes in a bitstring and returns True if the measurement is a good
state, False otherwise.
"""
if (self._is_good_state is None) or callable(self._is_good_state):
return self._is_good_state # returns None if no is_good_state arg has been set
elif isinstance(self._is_good_state, list):
if all(isinstance(good_bitstr, str) for good_bitstr in self._is_good_state):
return lambda bitstr: bitstr in self._is_good_state
else:
return lambda bitstr: all(
bitstr[good_index] == "1" # type:ignore
for good_index in self._is_good_state
)
return lambda bitstr: bitstr in self._is_good_state.probabilities_dict()
@is_good_state.setter
def is_good_state(
self, is_good_state: Union[Callable[[str], bool], List[int], List[str], Statevector]
) -> None:
"""Set the ``is_good_state`` function.
Args:
is_good_state: A function to determine whether a bitstring represents a good state.
"""
self._is_good_state = is_good_state
@property
def grover_operator(self) -> Optional[QuantumCircuit]:
r"""Get the :math:`\mathcal{Q}` operator, or Grover operator.
If the Grover operator is not set, we try to build it from the :math:`\mathcal{A}` operator
and `objective_qubits`. This only works if `objective_qubits` is a list of integers.
Returns:
The Grover operator, or None if neither the Grover operator nor the
:math:`\mathcal{A}` operator is set.
"""
if self._grover_operator is None:
return GroverOperator(self.oracle, self.state_preparation)
return self._grover_operator
@grover_operator.setter
def grover_operator(self, grover_operator: Optional[QuantumCircuit]) -> None:
r"""Set the :math:`\mathcal{Q}` operator.
If None, this operator is constructed from the ``oracle`` and ``state_preparation``.
Args:
grover_operator: The new :math:`\mathcal{Q}` operator or None.
"""
self._grover_operator = grover_operator
|
{
"content_hash": "2a869f354491fd0d12e4e61d964191a0",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 99,
"avg_line_length": 39.381188118811885,
"alnum_prop": 0.6231301068510371,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "c2908a86aa906584fad3c54c611c620a8187f744",
"size": "8433",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/algorithms/amplitude_amplifiers/amplification_problem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/bio_engineer/dna_template/shared_dna_template_bolma.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "c6c1c439c6d47fbdd26bce61016c1b9e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 99,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.7042682926829268,
"repo_name": "obi-two/Rebelion",
"id": "00ed7c7dc810ec9426aa7f06e2a2f6fd53177e30",
"size": "473",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/bio_engineer/dna_template/shared_dna_template_bolma.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
structDict = {}
#用户登录请求
CThostFtdcReqUserLoginField = {}
CThostFtdcReqUserLoginField["accountID"] = "string"
CThostFtdcReqUserLoginField["loginType"] = "int"
CThostFtdcReqUserLoginField["memberID"] = "string"
CThostFtdcReqUserLoginField["password"] = "string"
CThostFtdcReqUserLoginField["tradeDate"] = "string"
CThostFtdcReqUserLoginField["ipGatewayAddress"] = "string"
CThostFtdcReqUserLoginField["portGateway"] = "int"
CThostFtdcReqUserLoginField["localOrderNo"] = "string"
CThostFtdcReqUserLoginField["machineID"] = "int"
CThostFtdcReqUserLoginField["loginbatch"] = "int"
CThostFtdcReqUserLoginField["clientName"] = "string"
CThostFtdcReqUserLoginField["lastLoginIp"] = "string"
CThostFtdcReqUserLoginField["lastLoginDate"] = "string"
CThostFtdcReqUserLoginField["lastLoginTime"] = "string"
CThostFtdcReqUserLoginField["gateWayCurLinkNum"] = "int"
CThostFtdcReqUserLoginField["gateWayYYBDB"] = "string"
CThostFtdcReqUserLoginField["obligate"] = "string"
CThostFtdcReqUserLoginField["msg"] = "string"
structDict['CThostFtdcReqUserLoginField'] = CThostFtdcReqUserLoginField
#用户登录应答
CThostFtdcRspUserLoginField = {}
CThostFtdcRspUserLoginField["traderID"] = "string"
CThostFtdcRspUserLoginField["memberID"] = "string"
CThostFtdcRspUserLoginField["password"] = "string"
CThostFtdcRspUserLoginField["tradeDate"] = "string"
CThostFtdcRspUserLoginField["ipAddress"] = "string"
CThostFtdcRspUserLoginField["localOrderNo"] = "string"
CThostFtdcRspUserLoginField["machineID"] = "int"
CThostFtdcRspUserLoginField["loginbatch"] = "int"
CThostFtdcRspUserLoginField["clientName"] = "string"
CThostFtdcRspUserLoginField["lastLoginIp"] = "string"
CThostFtdcRspUserLoginField["lastLoginDate"] = "string"
CThostFtdcRspUserLoginField["lastLoginTime"] = "string"
CThostFtdcRspUserLoginField["gateWayCurLinkNum"] = "int"
CThostFtdcRspUserLoginField["gateWayYYBDB"] = "string"
CThostFtdcRspUserLoginField["obligate"] = "string"
CThostFtdcRspUserLoginField["msg"] = "string"
CThostFtdcRspUserLoginField["SeatNo"] = "string"
CThostFtdcRspUserLoginField["tradeCode"] = "string"
CThostFtdcRspUserLoginField["clientID"] = "string"
structDict['CThostFtdcRspUserLoginField'] = CThostFtdcRspUserLoginField
#用户登出请求
CThostFtdcUserLogoutField = {}
CThostFtdcUserLogoutField["traderID"] = "string"
CThostFtdcUserLogoutField["memberID"] = "string"
CThostFtdcUserLogoutField["password"] = "string"
CThostFtdcUserLogoutField["tradeDate"] = "string"
CThostFtdcUserLogoutField["ipAddress"] = "string"
CThostFtdcUserLogoutField["localOrderNo"] = "string"
CThostFtdcUserLogoutField["machineID"] = "int"
CThostFtdcUserLogoutField["loginbatch"] = "int"
CThostFtdcUserLogoutField["clientName"] = "string"
CThostFtdcUserLogoutField["lastLoginIp"] = "string"
CThostFtdcUserLogoutField["lastLoginDate"] = "string"
CThostFtdcUserLogoutField["lastLoginTime"] = "string"
CThostFtdcUserLogoutField["gateWayCurLinkNum"] = "int"
CThostFtdcUserLogoutField["gateWayYYBDB"] = "string"
CThostFtdcUserLogoutField["obligate"] = "string"
CThostFtdcUserLogoutField["msg"] = "string"
structDict['CThostFtdcUserLogoutField'] = CThostFtdcUserLogoutField
#合约
CThostFtdcInstrumentField = {}
CThostFtdcInstrumentField["exchangeID"] = "string"
CThostFtdcInstrumentField["instID"] = "string"
CThostFtdcInstrumentField["lowerLimit"] = "float"
CThostFtdcInstrumentField["marketID"] = "string"
CThostFtdcInstrumentField["maxHand"] = "int"
CThostFtdcInstrumentField["minHand"] = "int"
CThostFtdcInstrumentField["name"] = "string"
CThostFtdcInstrumentField["openFlag"] = "string"
CThostFtdcInstrumentField["tick"] = "float"
CThostFtdcInstrumentField["tradeState"] = "string"
CThostFtdcInstrumentField["unit"] = "int"
CThostFtdcInstrumentField["upperLimit"] = "float"
CThostFtdcInstrumentField["varietyID"] = "string"
CThostFtdcInstrumentField["varietyType"] = "string"
CThostFtdcInstrumentField["marketType"] = "string"
structDict['CThostFtdcInstrumentField'] = CThostFtdcInstrumentField
CThostFtdcQryTradingAccountField = {}
CThostFtdcQryTradingAccountField["memberID"] = "string"
CThostFtdcQryTradingAccountField["accountType"] = "string"
CThostFtdcQryTradingAccountField["traderID"] = "string"
CThostFtdcQryTradingAccountField["clientID"] = "string"
CThostFtdcQryTradingAccountField["marketID"] = "string"
structDict['CThostFtdcQryTradingAccountField'] = CThostFtdcQryTradingAccountField
#资金账户
CThostFtdcTradingAccountField = {}
CThostFtdcTradingAccountField["availCap"] = "float"
CThostFtdcTradingAccountField["available"] = "float"
CThostFtdcTradingAccountField["posiMargin"] = "float"
CThostFtdcTradingAccountField["buyPosiMargin"] = "float"
CThostFtdcTradingAccountField["sellPosiMargin"] = "float"
CThostFtdcTradingAccountField["storageMargin"] = "float"
CThostFtdcTradingAccountField["totalFee"] = "float"
CThostFtdcTradingAccountField["totalFrozen"] = "float"
CThostFtdcTradingAccountField["orderFrozen"] = "float"
CThostFtdcTradingAccountField["spotSellFrozen"] = "float"
CThostFtdcTradingAccountField["todayIn"] = "float"
CThostFtdcTradingAccountField["todayOut"] = "float"
CThostFtdcTradingAccountField["lastFrozen"] = "float"
CThostFtdcTradingAccountField["totalFrozenFee"] = "float"
CThostFtdcTradingAccountField["pickUpMargin"] = "float"
CThostFtdcTradingAccountField["middleMargin"] = "float"
structDict['CThostFtdcTradingAccountField'] = CThostFtdcTradingAccountField
#投资者持仓
CThostFtdcInvestorPositionField = {}
CThostFtdcInvestorPositionField["clientID"] = "string"
CThostFtdcInvestorPositionField["instID"] = "string"
CThostFtdcInvestorPositionField["longPosi"] = "int"
CThostFtdcInvestorPositionField["longPosiAvgPrice"] = "float"
CThostFtdcInvestorPositionField["shortPosi"] = "int"
CThostFtdcInvestorPositionField["shortPosiAvgPrice"] = "float"
CThostFtdcInvestorPositionField["longOpenAvgPrice"] = "float"
CThostFtdcInvestorPositionField["shortOpenAvgPrice"] = "float"
CThostFtdcInvestorPositionField["longPosiFrozen"] = "int"
CThostFtdcInvestorPositionField["shortPosiFrozen"] = "int"
CThostFtdcInvestorPositionField["longPosiVol"] = "int"
CThostFtdcInvestorPositionField["shortPosiVol"] = "int"
CThostFtdcInvestorPositionField["todayLong"] = "int"
CThostFtdcInvestorPositionField["todayShort"] = "int"
CThostFtdcInvestorPositionField["todayOffsetShort"] = "int"
CThostFtdcInvestorPositionField["todayOffsetLong"] = "int"
CThostFtdcInvestorPositionField["lastLong"] = "int"
CThostFtdcInvestorPositionField["lastShort"] = "int"
structDict['CThostFtdcInvestorPositionField'] = CThostFtdcInvestorPositionField
#响应信息
CThostFtdcRspInfoField = {}
#错误代码
CThostFtdcRspInfoField["ErrorID"] = "int"
#错误信息
CThostFtdcRspInfoField["ErrorMsg"] = "string"
structDict['CThostFtdcRspInfoField'] = CThostFtdcRspInfoField
#深度行情
CThostFtdcDepthMarketDataField = {}
CThostFtdcDepthMarketDataField["InstID"] = "string"
CThostFtdcDepthMarketDataField["Name"] = "string"
CThostFtdcDepthMarketDataField["MarketName"] = "string"
CThostFtdcDepthMarketDataField["PreSettle"] = "float"
CThostFtdcDepthMarketDataField["PreClose"] = "float"
CThostFtdcDepthMarketDataField["Open"] = "float"
CThostFtdcDepthMarketDataField["High"] = "float"
CThostFtdcDepthMarketDataField["Low"] = "float"
CThostFtdcDepthMarketDataField["Last"] = "float"
CThostFtdcDepthMarketDataField["Close"] = "float"
CThostFtdcDepthMarketDataField["Bid1"] = "float"
CThostFtdcDepthMarketDataField["BidLot1"] = "int"
CThostFtdcDepthMarketDataField["Ask1"] = "float"
CThostFtdcDepthMarketDataField["AskLot1"] = "int"
CThostFtdcDepthMarketDataField["Bid2"] = "float"
CThostFtdcDepthMarketDataField["BidLot2"] = "int"
CThostFtdcDepthMarketDataField["Ask2"] = "float"
CThostFtdcDepthMarketDataField["AskLot2"] = "int"
CThostFtdcDepthMarketDataField["Bid3"] = "float"
CThostFtdcDepthMarketDataField["BidLot3"] = "int"
CThostFtdcDepthMarketDataField["Ask3"] = "float"
CThostFtdcDepthMarketDataField["AskLot3"] = "int"
CThostFtdcDepthMarketDataField["Bid4"] = "float"
CThostFtdcDepthMarketDataField["BidLot4"] = "int"
CThostFtdcDepthMarketDataField["Ask4"] = "float"
CThostFtdcDepthMarketDataField["AskLot4"] = "int"
CThostFtdcDepthMarketDataField["Bid5"] = "float"
CThostFtdcDepthMarketDataField["BidLot5"] = "int"
CThostFtdcDepthMarketDataField["Ask5"] = "float"
CThostFtdcDepthMarketDataField["AskLot5"] = "int"
CThostFtdcDepthMarketDataField["Volume"] = "int"
CThostFtdcDepthMarketDataField["OpenInt"] = "int"
CThostFtdcDepthMarketDataField["UpDown"] = "float"
CThostFtdcDepthMarketDataField["Turnover"] = "float"
CThostFtdcDepthMarketDataField["Settle"] = "float"
CThostFtdcDepthMarketDataField["Average"] = "float"
CThostFtdcDepthMarketDataField["QuoteDate"] = "string"
CThostFtdcDepthMarketDataField["QuoteTime"] = "string"
CThostFtdcDepthMarketDataField["weight"] = "float"
CThostFtdcDepthMarketDataField["highLimit"] = "float"
CThostFtdcDepthMarketDataField["lowLimit"] = "float"
CThostFtdcDepthMarketDataField["UpDownRate"] = "float"
structDict['CThostFtdcDepthMarketDataField'] = CThostFtdcDepthMarketDataField
#输入报单
CThostFtdcInputOrderField = {}
CThostFtdcInputOrderField["seatID"] = "string"
CThostFtdcInputOrderField["clientID"] = "string"
CThostFtdcInputOrderField["exchangeID"] = "string"
CThostFtdcInputOrderField["instID"] = "string"
CThostFtdcInputOrderField["buyOrSell"] = "string"
CThostFtdcInputOrderField["offsetFlag"] = "string"
CThostFtdcInputOrderField["amount"] = "int"
CThostFtdcInputOrderField["weight"] = "float"
CThostFtdcInputOrderField["middleFlag"] = "string"
CThostFtdcInputOrderField["orderFlag"] = "string"
CThostFtdcInputOrderField["priceFlag"] = "string"
CThostFtdcInputOrderField["price"] = "float"
CThostFtdcInputOrderField["trigPrice"] = "float"
CThostFtdcInputOrderField["marketID"] = "string"
CThostFtdcInputOrderField["validDate"] = "string"
CThostFtdcInputOrderField["orderNo"] = "string"
CThostFtdcInputOrderField["LocalOrderNo"] = "string"
CThostFtdcInputOrderField["matchQty"] = "int"
CThostFtdcInputOrderField["matchWeight"] = "float"
CThostFtdcInputOrderField["status"] = "string"
CThostFtdcInputOrderField["entrustTime"] = "string"
CThostFtdcInputOrderField["forceoffset_flag"] = "string"
CThostFtdcInputOrderField["cancelQty"] = "int"
CThostFtdcInputOrderField["cancelTime"] = "string"
CThostFtdcInputOrderField["volumnCheck"] = "int"
CThostFtdcInputOrderField["tradeWay"] = "string"
structDict['CThostFtdcInputOrderField'] = CThostFtdcInputOrderField
#报单
CThostFtdcOrderField = {}
CThostFtdcOrderField["orderNo"] = "string"
CThostFtdcOrderField["localOrderNo"] = "string"
CThostFtdcOrderField["marketID"] = "string"
CThostFtdcOrderField["instID"] = "string"
CThostFtdcOrderField["buyOrSell"] = "string"
CThostFtdcOrderField["offsetFlag"] = "string"
CThostFtdcOrderField["amount"] = "int"
CThostFtdcOrderField["weight"] = "float"
CThostFtdcOrderField["price"] = "float"
CThostFtdcOrderField["matchQty"] = "int"
CThostFtdcOrderField["matchWeight"] = "float"
CThostFtdcOrderField["status"] = "string"
CThostFtdcOrderField["entrustTime"] = "string"
CThostFtdcOrderField["forceoffset_flag"] = "string"
CThostFtdcOrderField["cancelQty"] = "int"
CThostFtdcOrderField["cancelTime"] = "string"
CThostFtdcOrderField["tradeWay"] = "string"
structDict['CThostFtdcOrderField'] = CThostFtdcOrderField
#输入报单操作(cancel order)
CThostFtdcInputOrderActionField = {}
CThostFtdcInputOrderActionField["localOrderNo"] = "string"
CThostFtdcInputOrderActionField["orderFlag"] = "string"
CThostFtdcInputOrderActionField["marketID"] = "string"
CThostFtdcInputOrderActionField["traderID"] = "string"
CThostFtdcInputOrderActionField["tradeWay"] = "string"
structDict['CThostFtdcInputOrderActionField'] = CThostFtdcInputOrderActionField
#报单操作
CThostFtdcOrderActionField = {}
CThostFtdcOrderActionField["localOrderNo"] = "string"
CThostFtdcOrderActionField["orderFlag"] = "string"
CThostFtdcOrderActionField["marketID"] = "string"
CThostFtdcOrderActionField["traderID"] = "string"
CThostFtdcOrderActionField["tradeWay"] = "string"
structDict['CThostFtdcOrderActionField'] = CThostFtdcOrderActionField
#成交
CThostFtdcTradeField = {}
CThostFtdcTradeField["orderNo"] = "string"
CThostFtdcTradeField["matchNo"] = "string"
CThostFtdcTradeField["instID"] = "string"
CThostFtdcTradeField["buyOrSell"] = "string"
CThostFtdcTradeField["offSetFlag"] = "string"
CThostFtdcTradeField["price"] = "float"
CThostFtdcTradeField["volume"] = "int"
CThostFtdcTradeField["amount"] = "float"
CThostFtdcTradeField["weight"] = "float"
CThostFtdcTradeField["order_flag"] = "string"
CThostFtdcTradeField["matchDate"] = "string"
CThostFtdcTradeField["matchTime"] = "string"
CThostFtdcTradeField["localOrderNo"] = "string"
CThostFtdcTradeField["marketID"] = "string"
CThostFtdcTradeField["trade_fee"] = "float"
CThostFtdcTradeField["forceoffset_flag"] = "string"
CThostFtdcTradeField["forcebatchnum"] = "int"
CThostFtdcTradeField["tradeWay"] = "string"
structDict['CThostFtdcTradeField'] = CThostFtdcTradeField
#查询合约
CThostFtdcQryInstrumentField = {}
CThostFtdcQryInstrumentField["ContractID"] = "string"
CThostFtdcQryInstrumentField["ProductID"] = "string"
structDict['CThostFtdcQryInstrumentField'] = CThostFtdcQryInstrumentField
#查询成交
CThostFtdcQryTradeField = {}
CThostFtdcQryTradeField["instID"] = "string"
CThostFtdcQryTradeField["marketID"] = "string"
CThostFtdcQryTradeField["orderNo"] = "string"
CThostFtdcQryTradeField["clientID"] = "string"
CThostFtdcQryTradeField["memberID"] = "string"
CThostFtdcQryTradeField["matchNo"] = "string"
CThostFtdcQryTradeField["exchangeID"] = "string"
CThostFtdcQryTradeField["localOrderNo"] = "string"
CThostFtdcQryTradeField["traderID"] = "string"
structDict['CThostFtdcQryTradeField'] = CThostFtdcQryTradeField
#查询报单
CThostFtdcQryOrderField = {}
CThostFtdcQryOrderField["instID"] = "string"
CThostFtdcQryOrderField["marketID"] = "string"
CThostFtdcQryOrderField["orderNo"] = "string"
CThostFtdcQryOrderField["clientID"] = "string"
CThostFtdcQryOrderField["memberID"] = "string"
CThostFtdcQryOrderField["offsetFlag"] = "string"
CThostFtdcQryOrderField["traderID"] = "string"
CThostFtdcQryOrderField["status"] = "string"
CThostFtdcQryOrderField["exchangeID"] = "string"
CThostFtdcQryOrderField["localOrderNo"] = "string"
structDict['CThostFtdcQryOrderField'] = CThostFtdcQryOrderField
#查询投资者持仓
CThostFtdcQryInvestorPositionField = {}
CThostFtdcQryInvestorPositionField["clientID"] = "string"
CThostFtdcQryInvestorPositionField["memberID"] = "string"
CThostFtdcQryInvestorPositionField["marketID"] = "string"
CThostFtdcQryInvestorPositionField["instID"] = "string"
CThostFtdcQryInvestorPositionField["traderID"] = "string"
structDict['CThostFtdcQryInvestorPositionField'] = CThostFtdcQryInvestorPositionField
#查询库存
CThostFtdcQryStorageField = {}
CThostFtdcQryStorageField["clientID"] = "string"
CThostFtdcQryStorageField["memberID"] = "string"
CThostFtdcQryStorageField["traderID"] = "string"
structDict['CThostFtdcQryStorageField'] = CThostFtdcQryStorageField
CThostFtdcStorageField = {}
CThostFtdcStorageField["varietyID"] = "string"
CThostFtdcStorageField["varietyName"] = "string"
CThostFtdcStorageField["totalStorage"] = "float"
CThostFtdcStorageField["availableStorage"] = "float"
CThostFtdcStorageField["frozenStorage"] = "float"
CThostFtdcStorageField["pendStorage"] = "float"
CThostFtdcStorageField["todayBuy"] = "float"
CThostFtdcStorageField["todaySell"] = "float"
CThostFtdcStorageField["todayDeposit"] = "float"
CThostFtdcStorageField["todayRealDeposit"] = "float"
CThostFtdcStorageField["todayBorrow"] = "float"
CThostFtdcStorageField["todayLend"] = "float"
CThostFtdcStorageField["impawnStorage"] = "float"
CThostFtdcStorageField["lawFrozen"] = "float"
CThostFtdcStorageField["bankFrozen"] = "float"
CThostFtdcStorageField["customType"] = "string"
CThostFtdcStorageField["storageCost"] = "float"
CThostFtdcStorageField["impawnFrozen"] = "float"
structDict['CThostFtdcStorageField'] = CThostFtdcStorageField
CThostFtdcMarketStatusField = {}
CThostFtdcMarketStatusField["MktStatus"] = "string"
CThostFtdcMarketStatusField["marketID"] = "string"
CThostFtdcMarketStatusField["ExchCode"] = "string"
CThostFtdcMarketStatusField["MktChgTime"] = "string"
structDict['CThostFtdcMarketStatusField'] = CThostFtdcMarketStatusField
#指定的合约
CThostFtdcSpecificInstrumentField = {}
#合约代码
CThostFtdcSpecificInstrumentField["InstrumentID"] = "string"
structDict['CThostFtdcSpecificInstrumentField'] = CThostFtdcSpecificInstrumentField
|
{
"content_hash": "c03bda02c82abe482744ddf7f67cbbbd",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 85,
"avg_line_length": 39.25,
"alnum_prop": 0.8051812836844684,
"repo_name": "smther/vnpy",
"id": "60d722a81ba0c3e2c8e6c3ef689709fc23240d12",
"size": "16537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vn.ksgold/pyscript/ksgold_struct.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "122348"
},
{
"name": "C++",
"bytes": "4229575"
},
{
"name": "CMake",
"bytes": "2457"
},
{
"name": "Jupyter Notebook",
"bytes": "10948"
},
{
"name": "Objective-C",
"bytes": "806"
},
{
"name": "Python",
"bytes": "3700578"
},
{
"name": "R",
"bytes": "1354"
},
{
"name": "Shell",
"bytes": "4223"
}
],
"symlink_target": ""
}
|
"""Tests for open_spiel.python.utils.file_logger."""
import os
import tempfile
from absl.testing import absltest
from open_spiel.python.utils import file_logger
class FileLoggerTest(absltest.TestCase):
def test_file_logger(self):
tmp_dir = tempfile.mkdtemp()
try:
log_name = "test"
log_file_name = os.path.join(tmp_dir, "log-{}.txt".format(log_name))
self.assertTrue(os.path.isdir(tmp_dir))
self.assertFalse(os.path.exists(log_file_name))
with file_logger.FileLogger(tmp_dir, log_name) as logger:
logger.print("line 1")
logger.print("line", 2)
logger.print("line", 3, "asdf")
with open(log_file_name, "r") as f:
lines = f.readlines()
self.assertLen(lines, 3)
self.assertIn("line 1", lines[0])
self.assertIn("line 2", lines[1])
self.assertIn("line 3 asdf", lines[2])
finally:
if os.path.exists(log_file_name):
os.remove(log_file_name)
os.rmdir(tmp_dir)
if __name__ == "__main__":
absltest.main()
|
{
"content_hash": "b168dd526b04322636cc548fd5cd6dc6",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 25.4390243902439,
"alnum_prop": 0.6212847555129435,
"repo_name": "deepmind/open_spiel",
"id": "a99ce32b2116a7b538a4f2bdb594a5ba27f76c75",
"size": "1638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open_spiel/python/utils/file_logger_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6640"
},
{
"name": "C++",
"bytes": "4649139"
},
{
"name": "CMake",
"bytes": "78467"
},
{
"name": "Go",
"bytes": "18010"
},
{
"name": "Julia",
"bytes": "16727"
},
{
"name": "Jupyter Notebook",
"bytes": "148663"
},
{
"name": "Python",
"bytes": "2823600"
},
{
"name": "Rust",
"bytes": "18562"
},
{
"name": "Shell",
"bytes": "51087"
}
],
"symlink_target": ""
}
|
"""
Django settings for xtimr project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f589rj3&i!6sde7s%qp&a(3x)@v$*($^lr_yar3++rxszy_vgd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'web',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'xtimr.urls'
WSGI_APPLICATION = 'xtimr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
try:
from local_settings import *
except:
pass
|
{
"content_hash": "2ee9cc44282476620de6969971af1c2e",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 71,
"avg_line_length": 22.159574468085108,
"alnum_prop": 0.7119539126260201,
"repo_name": "zhy0216/xtimr",
"id": "356d5c54a4b481bebf1acf00e9bf8f7aff5b54d4",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xtimr/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2550"
},
{
"name": "Python",
"bytes": "36961"
}
],
"symlink_target": ""
}
|
import datetime
import json
#import MySQLdb
import traceback
import sys
import os
import TestRig
import UserInterface
import Config
class FileSink():
def __init__(self, fileName):
self.fileName = fileName
print "Logging to logfile: %s" % self.fileName
try:
self.logFile = open(self.fileName, 'a')
except Exception:
self.logFile = None
print "Cannot open logfile at %s" % self.fileName
def Log(self, data):
""" Log a message to the file """
try:
msg = json.dumps(data)
if self.logFile is not None:
self.logFile.write(msg + '\n')
self.logFile.flush()
except:
print "Error logging to file %s." % self.fileName
#raise #prevent logging errors from killing things.
class Logger():
def __init__(self):
self.r = TestRig.testRig
self.i = UserInterface.interface
config = Config.Config()
self.RequireDB = config.get('Logger','RequireDB',False)
self.Verbose = config.get('Logger','Verbose',True)
self.ProductionRun = config.get('Logger','ProductionRun',"pp")
self.dbHost = config.get('Logger','dbHost',"host")
self.dbUser = config.get('Logger','dbUser',"user")
self.dbPassword = config.get('Logger','dbPassword',"pw")
self.dbDatabase = config.get('Logger','dbDatabase',"db")
self.testIds = {} # dictionary to store test <--> database ID mappings.
self.sinks = []
def Init(self):
self.sinks.append(FileSink("./sd-log.log"))
# If a usb stick is connected, log to it as well.
if os.access('/media/usb0', os.R_OK):
self.sinks.append(FileSink("/media/usb0/usb-log.log"))
if self.RequireDB:
self.InitDB() # moved to menu_functional test area.
def InitDB(self):
try:
self.db = MySQLdb.connect(host = self.dbHost, user = self.dbUser, passwd = self.dbPassword, db = self.dbDatabase)
except Exception as err:
self.db = None
if self.RequireDB:
traceback.print_exc(file=sys.stderr)
raise Exception("CANNOT CONNECT TO DATABASE")
self.ClearStaleEntries()
self.LookupMachineInfo()
def ClearStaleEntries(self):
"""Sometimes we get stale tests if the code crashes. Clear and error them out."""
if self.RequireDB:
cursor = self.db.cursor()
try:
cursor.execute("UPDATE test_log SET test_status = 'error', test_result = 'STALLED' WHERE mac = %s AND test_status = 'running'", (self.MACAddress))
self.db.commit()
cursor.close()
except Exception as err:
traceback.print_exc(file=sys.stderr)
self.db.rollback()
raise Exception("DB LOGGING ERROR #6")
def LookupMachineInfo(self, create = True):
"""Look to see if we have any info about our machine available."""
if self.RequireDB:
cursor = self.db.cursor()
try:
cursor.execute("SELECT * FROM machines WHERE mac = %s", (self.MACAddress))
row = cursor.fetchone()
if row == None:
if create:
self.CreateMachineEntry()
else:
self.MachineInfo = row
cursor.close()
except Exception as err:
traceback.print_exc(file=sys.stderr)
raise Exception("DB LOGGING ERROR #7")
def CreateMachineEntry(self):
"""Create a machine entry in our tracking table."""
if self.RequireDB:
cursor = self.db.cursor()
try:
cursor.execute("INSERT INTO machines (mac, production_run, machine_type, manufacture_date) VALUES (%s, %s, %s, NOW())", (self.MACAddress, self.ProductionRun, self.MachineType))
self.db.commit()
cursor.close()
self.LookupMachineInfo(False) #pull it into our
except Exception as err:
traceback.print_exc(file=sys.stderr)
self.db.rollback()
raise Exception("DB LOGGING ERROR #8")
def Log(self, testName, message, extraData = None, logType = "raw", testId=0):
"""
Logs a message during testing. The message will be converted to a JSON string and
sprayed across as many different output modes as possible: all file logers, and database.
"""
data = {}
data['production_run'] = self.ProductionRun
data['test'] = testName
d = datetime.datetime.today()
data['timestamp'] = d.strftime("%Y-%m-%d %H:%M:%S")
data['message'] = message
data['data'] = extraData
data['type'] = logType
#send the data to all our various endpoints
if self.RequireDB:
self.LogToDB(data, testId)
for sink in self.sinks:
sink.Log(data)
def LogToDB(self, data, testId=0):
"""Record the log message to the database if present."""
if self.RequireDB:
cursor = self.db.cursor()
try:
cursor.execute("INSERT INTO raw_log (result_id, production_run, mac, log_type, test_name, test_time, message, data, machine_status) VALUES (%s, %s, %s, %s, %s, NOW(), %s, %s, %s)",
(testId, self.ProductionRun, self.MACAddress, data['type'], data['test'], data['message'], json.dumps(data['data']), json.dumps(data['status'])))
self.db.commit()
cursor.close()
except:
traceback.print_exc(file=sys.stderr)
#self.db.rollback() #if our db has gone away... then this triggers another error. plus mysql does not do transactions.
#raise Exception("DB LOGGING ERROR #1") #raise #prevent logging errors from killing things.
def GetTestId(self, testId):
if self.RequireDB:
return self.testIds[testId]
else:
return 0
def TestStart(self, test):
#insert new record into test_log table
if self.RequireDB:
cursor = self.db.cursor()
try:
cursor.execute("INSERT INTO test_log (mac, production_run, test_name, test_start, test_status) VALUES (%s, %s, %s, NOW(), 'running')",
(self.MACAddress, self.ProductionRun, test.id()))
self.db.commit()
self.testIds[test.id()] = cursor.lastrowid #store new id test:id dictionary for later use
cursor.close()
except Exception as err:
traceback.print_exc(file=sys.stderr)
self.db.rollback()
raise Exception("DB LOGGING ERROR #2")
self.Log(test.id(), "START", testId=self.GetTestId(test.id()))
def TestPass(self, test):
tid = self.GetTestId(test.id())
self.Log(test.id(), "PASS", test.testResultData, testId=tid)
if self.RequireDB:
cursor = self.db.cursor()
try:
cursor.execute("UPDATE test_log SET test_status = 'pass', test_end = NOW(), test_result = %s WHERE id = %s", (test.testResultData, self.GetTestId(test.id())))
self.db.commit()
cursor.close()
except Exception as err:
traceback.print_exc(file=sys.stderr)
self.db.rollback()
raise Exception("DB LOGGING ERROR #3")
def TestError(self, test, err):
tid = self.GetTestId(test.id())
etype, value, tb = err
data = traceback.format_exception(etype, value, tb, 10)
self.Log(test.id(), "ERROR", data, "error", testId=tid)
if self.RequireDB:
cursor = self.db.cursor()
try:
cursor.execute("UPDATE test_log SET test_status = 'error', test_end = NOW(), test_result = %s WHERE id = %s", (test.testResultData, self.GetTestId(test.id())))
self.db.commit()
cursor.close()
except Exception as err:
traceback.print_exc(file=sys.stderr)
self.db.rollback()
raise Exception("DB LOGGING ERROR #4")
def TestFail(self, test):
tid = self.GetTestId(test.id())
self.Log(test.id(), "FAIL", test.testResultData, testId=tid)
if self.RequireDB:
cursor = self.db.cursor()
try:
cursor.execute("UPDATE test_log SET test_status = 'fail', test_end = NOW(), test_result = %s WHERE id = %s", (test.testResultData, self.GetTestId(test.id())))
self.db.commit()
cursor.close()
except Exception as err:
traceback.print_exc(file=sys.stderr)
self.db.rollback()
raise Exception("DB LOGGING ERROR #5")
# Declare a single instance of the logger interface, that all modules can share
# TODO: This is so that new modules can be loaded dynamically and run, but there
# is probably a more elegent way to do this.
logger = Logger()
|
{
"content_hash": "1b1598b549fd178583c4d07cade43ea3",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 189,
"avg_line_length": 36.148471615720524,
"alnum_prop": 0.6274462430538778,
"repo_name": "Blinkinlabs/BlinkyTape",
"id": "46cfae5154b0f956e3396f99e83a917e43a1f8eb",
"size": "8278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "production/TestProgram_Beaglebone/Logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15762"
},
{
"name": "Python",
"bytes": "71844"
},
{
"name": "Shell",
"bytes": "3597"
}
],
"symlink_target": ""
}
|
'''
============================================
TGrep search implementation for NLTK trees
============================================
This module supports TGrep2 syntax for matching parts of NLTK Trees.
Note that many tgrep operators require the tree passed to be a
``ParentedTree``.
External links:
- `Tgrep tutorial <http://www.stanford.edu/dept/linguistics/corpora/cas-tut-tgrep.html>`_
- `Tgrep2 manual <http://tedlab.mit.edu/~dr/Tgrep2/tgrep2.pdf>`_
- `Tgrep2 source <http://tedlab.mit.edu/~dr/Tgrep2/>`_
Usage
=====
>>> from nltk.tree import ParentedTree
>>> from nltk.tgrep import tgrep_nodes, tgrep_positions
>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
>>> list(tgrep_nodes('NN', [tree]))
[[ParentedTree('NN', ['dog']), ParentedTree('NN', ['cat'])]]
>>> list(tgrep_positions('NN', [tree]))
[[(0, 2), (2, 1)]]
>>> list(tgrep_nodes('DT', [tree]))
[[ParentedTree('DT', ['the']), ParentedTree('DT', ['a'])]]
>>> list(tgrep_nodes('DT $ JJ', [tree]))
[[ParentedTree('DT', ['the'])]]
This implementation adds syntax to select nodes based on their NLTK
tree position. This syntax is ``N`` plus a Python tuple representing
the tree position. For instance, ``N()``, ``N(0,)``, ``N(0,0)`` are
valid node selectors. Example:
>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
>>> tree[0,0]
ParentedTree('DT', ['the'])
>>> tree[0,0].treeposition()
(0, 0)
>>> list(tgrep_nodes('N(0,0)', [tree]))
[[ParentedTree('DT', ['the'])]]
Caveats:
========
- Link modifiers: "?" and "=" are not implemented.
- Tgrep compatibility: Using "@" for "!", "{" for "<", "}" for ">" are
not implemented.
- The "=" and "~" links are not implemented.
Known Issues:
=============
- There are some issues with link relations involving leaf nodes
(which are represented as bare strings in NLTK trees). For
instance, consider the tree::
(S (A x))
The search string ``* !>> S`` should select all nodes which are not
dominated in some way by an ``S`` node (i.e., all nodes which are
not descendants of an ``S``). Clearly, in this tree, the only node
which fulfills this criterion is the top node (since it is not
dominated by anything). However, the code here will find both the
top node and the leaf node ``x``. This is because we cannot recover
the parent of the leaf, since it is stored as a bare string.
A possible workaround, when performing this kind of search, would be
to filter out all leaf nodes.
Implementation notes
====================
This implementation is (somewhat awkwardly) based on lambda functions
which are predicates on a node. A predicate is a function which is
either True or False; using a predicate function, we can identify sets
of nodes with particular properties. A predicate function, could, for
instance, return True only if a particular node has a label matching a
particular regular expression, and has a daughter node which has no
sisters. Because tgrep2 search strings can do things statefully (such
as substituting in macros, and binding nodes with node labels), the
actual predicate function is declared with three arguments::
pred = lambda n, m, l: return True # some logic here
``n``
is a node in a tree; this argument must always be given
``m``
contains a dictionary, mapping macro names onto predicate functions
``l``
is a dictionary to map node labels onto nodes in the tree
``m`` and ``l`` are declared to default to ``None``, and so need not be
specified in a call to a predicate. Predicates which call other
predicates must always pass the value of these arguments on. The
top-level predicate (constructed by ``_tgrep_exprs_action``) binds the
macro definitions to ``m`` and initialises ``l`` to an empty dictionary.
'''
from __future__ import absolute_import, print_function, unicode_literals
from nltk.compat import binary_type, text_type
import functools
import nltk.tree
try:
import pyparsing
except ImportError:
print('Warning: nltk.tgrep will not work without the `pyparsing` package')
print('installed.')
import re
class TgrepException(Exception):
'''Tgrep exception type.'''
pass
def ancestors(node):
'''
Returns the list of all nodes dominating the given tree node.
This method will not work with leaf nodes, since there is no way
to recover the parent.
'''
results = []
try:
current = node.parent()
except AttributeError:
# if node is a leaf, we cannot retrieve its parent
return results
while current:
results.append(current)
current = current.parent()
return results
def unique_ancestors(node):
'''
Returns the list of all nodes dominating the given node, where
there is only a single path of descent.
'''
results = []
try:
current = node.parent()
except AttributeError:
# if node is a leaf, we cannot retrieve its parent
return results
while current and len(current) == 1:
results.append(current)
current = current.parent()
return results
def _descendants(node):
'''
Returns the list of all nodes which are descended from the given
tree node in some way.
'''
try:
treepos = node.treepositions()
except AttributeError:
return []
return [node[x] for x in treepos[1:]]
def _leftmost_descendants(node):
'''
Returns the set of all nodes descended in some way through
left branches from this node.
'''
try:
treepos = node.treepositions()
except AttributeError:
return []
return [node[x] for x in treepos[1:] if all(y == 0 for y in x)]
def _rightmost_descendants(node):
'''
Returns the set of all nodes descended in some way through
right branches from this node.
'''
try:
rightmost_leaf = max(node.treepositions())
except AttributeError:
return []
return [node[rightmost_leaf[:i]] for i in range(1, len(rightmost_leaf) + 1)]
def _istree(obj):
'''Predicate to check whether `obj` is a nltk.tree.Tree.'''
return isinstance(obj, nltk.tree.Tree)
def _unique_descendants(node):
'''
Returns the list of all nodes descended from the given node, where
there is only a single path of descent.
'''
results = []
current = node
while current and _istree(current) and len(current) == 1:
current = current[0]
results.append(current)
return results
def _before(node):
'''
Returns the set of all nodes that are before the given node.
'''
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
return [tree[x] for x in tree.treepositions()
if x[:len(pos)] < pos[:len(x)]]
def _immediately_before(node):
'''
Returns the set of all nodes that are immediately before the given
node.
Tree node A immediately precedes node B if the last terminal
symbol (word) produced by A immediately precedes the first
terminal symbol produced by B.
'''
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
# go "upwards" from pos until there is a place we can go to the left
idx = len(pos) - 1
while 0 <= idx and pos[idx] == 0:
idx -= 1
if idx < 0:
return []
pos = list(pos[:idx + 1])
pos[-1] -= 1
before = tree[pos]
return [before] + _rightmost_descendants(before)
def _after(node):
'''
Returns the set of all nodes that are after the given node.
'''
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
return [tree[x] for x in tree.treepositions()
if x[:len(pos)] > pos[:len(x)]]
def _immediately_after(node):
'''
Returns the set of all nodes that are immediately after the given
node.
Tree node A immediately follows node B if the first terminal
symbol (word) produced by A immediately follows the last
terminal symbol produced by B.
'''
try:
pos = node.treeposition()
tree = node.root()
current = node.parent()
except AttributeError:
return []
# go "upwards" from pos until there is a place we can go to the
# right
idx = len(pos) - 1
while 0 <= idx and pos[idx] == len(current) - 1:
idx -= 1
current = current.parent()
if idx < 0:
return []
pos = list(pos[:idx + 1])
pos[-1] += 1
after = tree[pos]
return [after] + _leftmost_descendants(after)
def _tgrep_node_literal_value(node):
'''
Gets the string value of a given parse tree node, for comparison
using the tgrep node literal predicates.
'''
return (node.label() if _istree(node) else text_type(node))
def _tgrep_macro_use_action(_s, _l, tokens):
'''
Builds a lambda function which looks up the macro name used.
'''
assert len(tokens) == 1
assert tokens[0][0] == '@'
macro_name = tokens[0][1:]
def macro_use(n, m=None, l=None):
if m is None or macro_name not in m:
raise TgrepException('macro {0} not defined'.format(macro_name))
return m[macro_name](n, m, l)
return macro_use
def _tgrep_node_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
depending on the name of its node.
'''
# print 'node tokens: ', tokens
if tokens[0] == "'":
# strip initial apostrophe (tgrep2 print command)
tokens = tokens[1:]
if len(tokens) > 1:
# disjunctive definition of a node name
assert list(set(tokens[1::2])) == ['|']
# recursively call self to interpret each node name definition
tokens = [_tgrep_node_action(None, None, [node])
for node in tokens[::2]]
# capture tokens and return the disjunction
return (lambda t: lambda n, m=None, l=None: any(f(n, m, l) for f in t))(tokens)
else:
if hasattr(tokens[0], '__call__'):
# this is a previously interpreted parenthetical node
# definition (lambda function)
return tokens[0]
elif tokens[0] == '*' or tokens[0] == '__':
return lambda n, m=None, l=None: True
elif tokens[0].startswith('"'):
assert tokens[0].endswith('"')
node_lit = tokens[0][1:-1].replace('\\"', '"').replace('\\\\', '\\')
return (lambda s: lambda n, m=None, l=None: _tgrep_node_literal_value(n) == s)(node_lit)
elif tokens[0].startswith('/'):
assert tokens[0].endswith('/')
node_lit = tokens[0][1:-1]
return (lambda r: lambda n, m=None, l=None:
r.search(_tgrep_node_literal_value(n)))(re.compile(node_lit))
elif tokens[0].startswith('i@'):
node_func = _tgrep_node_action(_s, _l, [tokens[0][2:].lower()])
return (lambda f: lambda n, m=None, l=None:
f(_tgrep_node_literal_value(n).lower()))(node_func)
else:
return (lambda s: lambda n, m=None, l=None:
_tgrep_node_literal_value(n) == s)(tokens[0])
def _tgrep_parens_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
from a parenthetical notation.
'''
# print 'parenthetical tokens: ', tokens
assert len(tokens) == 3
assert tokens[0] == '('
assert tokens[2] == ')'
return tokens[1]
def _tgrep_nltk_tree_pos_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
which returns true if the node is located at a specific tree
position.
'''
# recover the tuple from the parsed sting
node_tree_position = tuple(int(x) for x in tokens if x.isdigit())
# capture the node's tree position
return (lambda i: lambda n, m=None, l=None: (hasattr(n, 'treeposition') and
n.treeposition() == i))(node_tree_position)
def _tgrep_relation_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
depending on its relation to other nodes in the tree.
'''
# print 'relation tokens: ', tokens
# process negation first if needed
negated = False
if tokens[0] == '!':
negated = True
tokens = tokens[1:]
if tokens[0] == '[':
# process square-bracketed relation expressions
assert len(tokens) == 3
assert tokens[2] == ']'
retval = tokens[1]
else:
# process operator-node relation expressions
assert len(tokens) == 2
operator, predicate = tokens
# A < B A is the parent of (immediately dominates) B.
if operator == '<':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l) for x in n))
# A > B A is the child of B.
elif operator == '>':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
predicate(n.parent(), m, l))
# A <, B Synonymous with A <1 B.
elif operator == '<,' or operator == '<1':
retval = lambda n, m=None, l=None: (_istree(n) and
bool(list(n)) and
predicate(n[0], m, l))
# A >, B Synonymous with A >1 B.
elif operator == '>,' or operator == '>1':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
(n is n.parent()[0]) and
predicate(n.parent(), m, l))
# A <N B B is the Nth child of A (the first child is <1).
elif operator[0] == '<' and operator[1:].isdigit():
idx = int(operator[1:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None: (_istree(n) and
bool(list(n)) and
0 <= i < len(n) and
predicate(n[i], m, l)))(idx - 1)
# A >N B A is the Nth child of B (the first child is >1).
elif operator[0] == '>' and operator[1:].isdigit():
idx = int(operator[1:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
0 <= i < len(n.parent()) and
(n is n.parent()[i]) and
predicate(n.parent(), m, l)))(idx - 1)
# A <' B B is the last child of A (also synonymous with A <-1 B).
# A <- B B is the last child of A (synonymous with A <-1 B).
elif operator == '<\'' or operator == '<-' or operator == '<-1':
retval = lambda n, m=None, l=None: (_istree(n) and bool(list(n))
and predicate(n[-1], m, l))
# A >' B A is the last child of B (also synonymous with A >-1 B).
# A >- B A is the last child of B (synonymous with A >-1 B).
elif operator == '>\'' or operator == '>-' or operator == '>-1':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
(n is n.parent()[-1]) and
predicate(n.parent(), m, l))
# A <-N B B is the N th-to-last child of A (the last child is <-1).
elif operator[:2] == '<-' and operator[2:].isdigit():
idx = -int(operator[2:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None: (_istree(n) and
bool(list(n)) and
0 <= (i + len(n)) < len(n) and
predicate(n[i + len(n)], m, l)))(idx)
# A >-N B A is the N th-to-last child of B (the last child is >-1).
elif operator[:2] == '>-' and operator[2:].isdigit():
idx = -int(operator[2:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None:
(hasattr(n, 'parent') and
bool(n.parent()) and
0 <= (i + len(n.parent())) < len(n.parent()) and
(n is n.parent()[i + len(n.parent())]) and
predicate(n.parent(), m, l)))(idx)
# A <: B B is the only child of A
elif operator == '<:':
retval = lambda n, m=None, l=None: (_istree(n) and
len(n) == 1 and
predicate(n[0], m, l))
# A >: B A is the only child of B.
elif operator == '>:':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
len(n.parent()) == 1 and
predicate(n.parent(), m, l))
# A << B A dominates B (A is an ancestor of B).
elif operator == '<<':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l) for x in _descendants(n)))
# A >> B A is dominated by B (A is a descendant of B).
elif operator == '>>':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in ancestors(n))
# A <<, B B is a left-most descendant of A.
elif operator == '<<,' or operator == '<<1':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l)
for x in _leftmost_descendants(n)))
# A >>, B A is a left-most descendant of B.
elif operator == '>>,':
retval = lambda n, m=None, l=None: any((predicate(x, m, l) and
n in _leftmost_descendants(x))
for x in ancestors(n))
# A <<' B B is a right-most descendant of A.
elif operator == '<<\'':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l)
for x in _rightmost_descendants(n)))
# A >>' B A is a right-most descendant of B.
elif operator == '>>\'':
retval = lambda n, m=None, l=None: any((predicate(x, m, l) and
n in _rightmost_descendants(x))
for x in ancestors(n))
# A <<: B There is a single path of descent from A and B is on it.
elif operator == '<<:':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l)
for x in _unique_descendants(n)))
# A >>: B There is a single path of descent from B and A is on it.
elif operator == '>>:':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in unique_ancestors(n))
# A . B A immediately precedes B.
elif operator == '.':
retval = lambda n, m=None, l=None: any(predicate(x, m, l)
for x in _immediately_after(n))
# A , B A immediately follows B.
elif operator == ',':
retval = lambda n, m=None, l=None: any(predicate(x, m, l)
for x in _immediately_before(n))
# A .. B A precedes B.
elif operator == '..':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in _after(n))
# A ,, B A follows B.
elif operator == ',,':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in _before(n))
# A $ B A is a sister of B (and A != B).
elif operator == '$' or operator == '%':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
any(predicate(x, m, l)
for x in n.parent() if x is not n))
# A $. B A is a sister of and immediately precedes B.
elif operator == '$.' or operator == '%.':
retval = lambda n, m=None, l=None: (hasattr(n, 'right_sibling') and
bool(n.right_sibling()) and
predicate(n.right_sibling(), m, l))
# A $, B A is a sister of and immediately follows B.
elif operator == '$,' or operator == '%,':
retval = lambda n, m=None, l=None: (hasattr(n, 'left_sibling') and
bool(n.left_sibling()) and
predicate(n.left_sibling(), m, l))
# A $.. B A is a sister of and precedes B.
elif operator == '$..' or operator == '%..':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
hasattr(n, 'parent_index') and
bool(n.parent()) and
any(predicate(x, m, l) for x in
n.parent()[n.parent_index() + 1:]))
# A $,, B A is a sister of and follows B.
elif operator == '$,,' or operator == '%,,':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
hasattr(n, 'parent_index') and
bool(n.parent()) and
any(predicate(x, m, l) for x in
n.parent()[:n.parent_index()]))
else:
raise TgrepException(
'cannot interpret tgrep operator "{0}"'.format(operator))
# now return the built function
if negated:
return (lambda r: (lambda n, m=None, l=None: not r(n, m, l)))(retval)
else:
return retval
def _tgrep_conjunction_action(_s, _l, tokens, join_char = '&'):
'''
Builds a lambda function representing a predicate on a tree node
from the conjunction of several other such lambda functions.
This is prototypically called for expressions like
(`tgrep_rel_conjunction`)::
< NP & < AP < VP
where tokens is a list of predicates representing the relations
(`< NP`, `< AP`, and `< VP`), possibly with the character `&`
included (as in the example here).
This is also called for expressions like (`tgrep_node_expr2`)::
NP < NN
S=s < /NP/=n : s < /VP/=v : n .. v
tokens[0] is a tgrep_expr predicate; tokens[1:] are an (optional)
list of segmented patterns (`tgrep_expr_labeled`, processed by
`_tgrep_segmented_pattern_action`).
'''
# filter out the ampersand
tokens = [x for x in tokens if x != join_char]
# print 'relation conjunction tokens: ', tokens
if len(tokens) == 1:
return tokens[0]
else:
return (lambda ts: lambda n, m=None, l=None: all(predicate(n, m, l)
for predicate in ts))(tokens)
def _tgrep_segmented_pattern_action(_s, _l, tokens):
'''
Builds a lambda function representing a segmented pattern.
Called for expressions like (`tgrep_expr_labeled`)::
=s .. =v < =n
This is a segmented pattern, a tgrep2 expression which begins with
a node label.
The problem is that for segemented_pattern_action (': =v < =s'),
the first element (in this case, =v) is specifically selected by
virtue of matching a particular node in the tree; to retrieve
the node, we need the label, not a lambda function. For node
labels inside a tgrep_node_expr, we need a lambda function which
returns true if the node visited is the same as =v.
We solve this by creating two copies of a node_label_use in the
grammar; the label use inside a tgrep_expr_labeled has a separate
parse action to the pred use inside a node_expr. See
`_tgrep_node_label_use_action` and
`_tgrep_node_label_pred_use_action`.
'''
# tokens[0] is a string containing the node label
node_label = tokens[0]
# tokens[1:] is an (optional) list of predicates which must all
# hold of the bound node
reln_preds = tokens[1:]
def pattern_segment_pred(n, m=None, l=None):
'''This predicate function ignores its node argument.'''
# look up the bound node using its label
if l is None or node_label not in l:
raise TgrepException('node_label ={0} not bound in pattern'.format(
node_label))
node = l[node_label]
# match the relation predicates against the node
return all(pred(node, m, l) for pred in reln_preds)
return pattern_segment_pred
def _tgrep_node_label_use_action(_s, _l, tokens):
'''
Returns the node label used to begin a tgrep_expr_labeled. See
`_tgrep_segmented_pattern_action`.
Called for expressions like (`tgrep_node_label_use`)::
=s
when they appear as the first element of a `tgrep_expr_labeled`
expression (see `_tgrep_segmented_pattern_action`).
It returns the node label.
'''
assert len(tokens) == 1
assert tokens[0].startswith('=')
return tokens[0][1:]
def _tgrep_node_label_pred_use_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
which describes the use of a previously bound node label.
Called for expressions like (`tgrep_node_label_use_pred`)::
=s
when they appear inside a tgrep_node_expr (for example, inside a
relation). The predicate returns true if and only if its node
argument is identical the the node looked up in the node label
dictionary using the node's label.
'''
assert len(tokens) == 1
assert tokens[0].startswith('=')
node_label = tokens[0][1:]
def node_label_use_pred(n, m=None, l=None):
# look up the bound node using its label
if l is None or node_label not in l:
raise TgrepException('node_label ={0} not bound in pattern'.format(
node_label))
node = l[node_label]
# truth means the given node is this node
return n is node
return node_label_use_pred
def _tgrep_bind_node_label_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
which can optionally bind a matching node into the tgrep2 string's
label_dict.
Called for expressions like (`tgrep_node_expr2`)::
/NP/
@NP=n
'''
# tokens[0] is a tgrep_node_expr
if len(tokens) == 1:
return tokens[0]
else:
# if present, tokens[1] is the character '=', and tokens[2] is
# a tgrep_node_label, a string value containing the node label
assert len(tokens) == 3
assert tokens[1] == '='
node_pred = tokens[0]
node_label = tokens[2]
def node_label_bind_pred(n, m=None, l=None):
if node_pred(n, m, l):
# bind `n` into the dictionary `l`
if l is None:
raise TgrepException(
'cannot bind node_label {0}: label_dict is None'.format(
node_label))
l[node_label] = n
return True
else:
return False
return node_label_bind_pred
def _tgrep_rel_disjunction_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
from the disjunction of several other such lambda functions.
'''
# filter out the pipe
tokens = [x for x in tokens if x != '|']
# print 'relation disjunction tokens: ', tokens
if len(tokens) == 1:
return tokens[0]
elif len(tokens) == 2:
return (lambda a, b: lambda n, m=None, l=None:
a(n, m, l) or b(n, m, l))(tokens[0], tokens[1])
def _macro_defn_action(_s, _l, tokens):
'''
Builds a dictionary structure which defines the given macro.
'''
assert len(tokens) == 3
assert tokens[0] == '@'
return {tokens[1]: tokens[2]}
def _tgrep_exprs_action(_s, _l, tokens):
'''
This is the top-lebel node in a tgrep2 search string; the
predicate function it returns binds together all the state of a
tgrep2 search string.
Builds a lambda function representing a predicate on a tree node
from the disjunction of several tgrep expressions. Also handles
macro definitions and macro name binding, and node label
definitions and node label binding.
'''
if len(tokens) == 1:
return lambda n, m=None, l=None: tokens[0](n, None, {})
# filter out all the semicolons
tokens = [x for x in tokens if x != ';']
# collect all macro definitions
macro_dict = {}
macro_defs = [tok for tok in tokens if isinstance(tok, dict)]
for macro_def in macro_defs:
macro_dict.update(macro_def)
# collect all tgrep expressions
tgrep_exprs = [tok for tok in tokens if not isinstance(tok, dict)]
# create a new scope for the node label dictionary
def top_level_pred(n, m=macro_dict, l=None):
label_dict = {}
# bind macro definitions and OR together all tgrep_exprs
return any(predicate(n, m, label_dict) for predicate in tgrep_exprs)
return top_level_pred
def _build_tgrep_parser(set_parse_actions = True):
'''
Builds a pyparsing-based parser object for tokenizing and
interpreting tgrep search strings.
'''
tgrep_op = (pyparsing.Optional('!') +
pyparsing.Regex('[$%,.<>][%,.<>0-9-\':]*'))
tgrep_qstring = pyparsing.QuotedString(quoteChar='"', escChar='\\',
unquoteResults=False)
tgrep_node_regex = pyparsing.QuotedString(quoteChar='/', escChar='\\',
unquoteResults=False)
tgrep_qstring_icase = pyparsing.Regex(
'i@\\"(?:[^"\\n\\r\\\\]|(?:\\\\.))*\\"')
tgrep_node_regex_icase = pyparsing.Regex(
'i@\\/(?:[^/\\n\\r\\\\]|(?:\\\\.))*\\/')
tgrep_node_literal = pyparsing.Regex('[^][ \r\t\n;:.,&|<>()$!@%\'^=]+')
tgrep_expr = pyparsing.Forward()
tgrep_relations = pyparsing.Forward()
tgrep_parens = pyparsing.Literal('(') + tgrep_expr + ')'
tgrep_nltk_tree_pos = (
pyparsing.Literal('N(') +
pyparsing.Optional(pyparsing.Word(pyparsing.nums) + ',' +
pyparsing.Optional(pyparsing.delimitedList(
pyparsing.Word(pyparsing.nums), delim=',') +
pyparsing.Optional(','))) + ')')
tgrep_node_label = pyparsing.Regex('[A-Za-z0-9]+')
tgrep_node_label_use = pyparsing.Combine('=' + tgrep_node_label)
# see _tgrep_segmented_pattern_action
tgrep_node_label_use_pred = tgrep_node_label_use.copy()
macro_name = pyparsing.Regex('[^];:.,&|<>()[$!@%\'^=\r\t\n ]+')
macro_name.setWhitespaceChars('')
macro_use = pyparsing.Combine('@' + macro_name)
tgrep_node_expr = (tgrep_node_label_use_pred |
macro_use |
tgrep_nltk_tree_pos |
tgrep_qstring_icase |
tgrep_node_regex_icase |
tgrep_qstring |
tgrep_node_regex |
'*' |
tgrep_node_literal)
tgrep_node_expr2 = ((tgrep_node_expr +
pyparsing.Literal('=').setWhitespaceChars('') +
tgrep_node_label.copy().setWhitespaceChars('')) |
tgrep_node_expr)
tgrep_node = (tgrep_parens |
(pyparsing.Optional("'") +
tgrep_node_expr2 +
pyparsing.ZeroOrMore("|" + tgrep_node_expr)))
tgrep_brackets = pyparsing.Optional('!') + '[' + tgrep_relations + ']'
tgrep_relation = tgrep_brackets | (tgrep_op + tgrep_node)
tgrep_rel_conjunction = pyparsing.Forward()
tgrep_rel_conjunction << (tgrep_relation +
pyparsing.ZeroOrMore(pyparsing.Optional('&') +
tgrep_rel_conjunction))
tgrep_relations << tgrep_rel_conjunction + pyparsing.ZeroOrMore(
"|" + tgrep_relations)
tgrep_expr << tgrep_node + pyparsing.Optional(tgrep_relations)
tgrep_expr_labeled = tgrep_node_label_use + pyparsing.Optional(tgrep_relations)
tgrep_expr2 = tgrep_expr + pyparsing.ZeroOrMore(':' + tgrep_expr_labeled)
macro_defn = (pyparsing.Literal('@') +
pyparsing.White().suppress() +
macro_name +
tgrep_expr2)
tgrep_exprs = (pyparsing.Optional(macro_defn + pyparsing.ZeroOrMore(';' + macro_defn) + ';') +
tgrep_expr2 +
pyparsing.ZeroOrMore(';' + (macro_defn | tgrep_expr2)) +
pyparsing.ZeroOrMore(';').suppress())
if set_parse_actions:
tgrep_node_label_use.setParseAction(_tgrep_node_label_use_action)
tgrep_node_label_use_pred.setParseAction(_tgrep_node_label_pred_use_action)
macro_use.setParseAction(_tgrep_macro_use_action)
tgrep_node.setParseAction(_tgrep_node_action)
tgrep_node_expr2.setParseAction(_tgrep_bind_node_label_action)
tgrep_parens.setParseAction(_tgrep_parens_action)
tgrep_nltk_tree_pos.setParseAction(_tgrep_nltk_tree_pos_action)
tgrep_relation.setParseAction(_tgrep_relation_action)
tgrep_rel_conjunction.setParseAction(_tgrep_conjunction_action)
tgrep_relations.setParseAction(_tgrep_rel_disjunction_action)
macro_defn.setParseAction(_macro_defn_action)
# the whole expression is also the conjunction of two
# predicates: the first node predicate, and the remaining
# relation predicates
tgrep_expr.setParseAction(_tgrep_conjunction_action)
tgrep_expr_labeled.setParseAction(_tgrep_segmented_pattern_action)
tgrep_expr2.setParseAction(functools.partial(_tgrep_conjunction_action,
join_char = ':'))
tgrep_exprs.setParseAction(_tgrep_exprs_action)
return tgrep_exprs.ignore('#' + pyparsing.restOfLine)
def tgrep_tokenize(tgrep_string):
'''
Tokenizes a TGrep search string into separate tokens.
'''
parser = _build_tgrep_parser(False)
if isinstance(tgrep_string, binary_type):
tgrep_string = tgrep_string.decode()
return list(parser.parseString(tgrep_string))
def tgrep_compile(tgrep_string):
'''
Parses (and tokenizes, if necessary) a TGrep search string into a
lambda function.
'''
parser = _build_tgrep_parser(True)
if isinstance(tgrep_string, binary_type):
tgrep_string = tgrep_string.decode()
return list(parser.parseString(tgrep_string, parseAll=True))[0]
def treepositions_no_leaves(tree):
'''
Returns all the tree positions in the given tree which are not
leaf nodes.
'''
treepositions = tree.treepositions()
# leaves are treeposition tuples that are not prefixes of any
# other treeposition
prefixes = set()
for pos in treepositions:
for length in range(len(pos)):
prefixes.add(pos[:length])
return [pos for pos in treepositions if pos in prefixes]
def tgrep_positions(pattern, trees, search_leaves=True):
"""
Return the tree positions in the trees which match the given pattern.
:param pattern: a tgrep search pattern
:type pattern: str or output of tgrep_compile()
:param trees: a sequence of NLTK trees (usually ParentedTrees)
:type trees: iter(ParentedTree) or iter(Tree)
:param search_leaves: whether ot return matching leaf nodes
:type search_leaves: bool
:rtype: iter(tree positions)
"""
if isinstance(pattern, (binary_type, text_type)):
pattern = tgrep_compile(pattern)
for tree in trees:
try:
if search_leaves:
positions = tree.treepositions()
else:
positions = treepositions_no_leaves(tree)
yield [position for position in positions
if pattern(tree[position])]
except AttributeError:
yield []
def tgrep_nodes(pattern, trees, search_leaves=True):
"""
Return the tree nodes in the trees which match the given pattern.
:param pattern: a tgrep search pattern
:type pattern: str or output of tgrep_compile()
:param trees: a sequence of NLTK trees (usually ParentedTrees)
:type trees: iter(ParentedTree) or iter(Tree)
:param search_leaves: whether ot return matching leaf nodes
:type search_leaves: bool
:rtype: iter(tree nodes)
"""
if isinstance(pattern, (binary_type, text_type)):
pattern = tgrep_compile(pattern)
for tree in trees:
try:
if search_leaves:
positions = tree.treepositions()
else:
positions = treepositions_no_leaves(tree)
yield [tree[position] for position in positions
if pattern(tree[position])]
except AttributeError:
yield []
|
{
"content_hash": "a1b5708a97b1060767d5c53d0771ca0f",
"timestamp": "",
"source": "github",
"line_count": 927,
"max_line_length": 103,
"avg_line_length": 41.66019417475728,
"alnum_prop": 0.5510241073046945,
"repo_name": "nkcr/WebIndex",
"id": "11fa8cfa96f587a92e92d84fa9aad740471c7dc3",
"size": "38866",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "app/venv/lib/python3.5/site-packages/nltk/tgrep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46110"
},
{
"name": "CSS",
"bytes": "7900"
},
{
"name": "HTML",
"bytes": "8080"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Python",
"bytes": "7933562"
},
{
"name": "R",
"bytes": "1565"
},
{
"name": "Shell",
"bytes": "3663"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatTag(BaseWeChatAPI):
def create(self, name):
return self._post(
'tag/create',
data={
'tagname': name
}
)
def update(self, tag_id, name):
return self._post(
'tag/update',
data={
'tagid': tag_id,
'tagname': name
}
)
def delete(self, tag_id):
return self._get(
'tag/delete',
params={
'tagid': tag_id
}
)
def get_users(self, tag_id):
return self._get(
'tag/get',
params={
'tagid': tag_id
}
)
def add_users(self, tag_id, user_ids):
return self._post(
'tag/addtagusers',
data={
'tagid': tag_id,
'userlist': user_ids
}
)
def delete_users(self, tag_id, user_ids):
return self._post(
'tag/deltagusers',
data={
'tagid': tag_id,
'userlist': user_ids
}
)
def list(self):
res = self._get('tag/list')
return res['taglist']
|
{
"content_hash": "ecf3cdd88e2fc043c8e6c9f453d6386a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 56,
"avg_line_length": 22.283333333333335,
"alnum_prop": 0.42408376963350786,
"repo_name": "chenjiancan/wechatpy",
"id": "c6be27338c3a063cb7cbe885db2ec7c2807d6be3",
"size": "1361",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "wechatpy/enterprise/client/api/tag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "345507"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
'''
@author: FangSun
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.primarystorage_operations as ps_ops
import apibinding.inventory as inventory
import random
_config_ = {
'timeout' : 7200,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
new_ps_list = []
VM_COUNT = 1
DATA_VOLUME_NUMBER = 10
def test():
test_util.test_dsc("Create {} vm each with {} data volume in the first primaryStorage".format(VM_COUNT, DATA_VOLUME_NUMBER))
ps_env = test_stub.PSEnvChecker()
ps_list = res_ops.get_resource(res_ops.PRIMARY_STORAGE)
if ps_env.is_sb_ceph_env:
first_ps = random.choice([ps for ps in ps_list if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE])
vm_list = test_stub.create_multi_vms(name_prefix='vm_in_fist_ps', count=VM_COUNT, ps_uuid=first_ps.uuid,
data_volume_number=DATA_VOLUME_NUMBER, ps_uuid_for_data_vol=first_ps.uuid, bs_type='Ceph')
else:
first_ps = random.choice(ps_list)
vm_list = test_stub.create_multi_vms(name_prefix='vm_in_fist_ps', count=VM_COUNT, ps_uuid=first_ps.uuid,
data_volume_number=DATA_VOLUME_NUMBER, ps_uuid_for_data_vol=first_ps.uuid)
for vm in vm_list:
test_obj_dict.add_vm(vm)
if len(ps_list) == 1:
test_util.test_dsc("Add Another primaryStorage")
second_ps = test_stub.add_primaryStorage(first_ps=first_ps)
new_ps_list.append(second_ps)
else:
second_ps = random.choice([ps for ps in ps_list if ps.uuid != first_ps.uuid])
test_util.test_dsc("Create {} vm each with {} data volume in the second primaryStorage".format(VM_COUNT, DATA_VOLUME_NUMBER))
if ps_env.is_sb_ceph_env:
vm_list = test_stub.create_multi_vms(name_prefix='vm_in_second_ps', count=VM_COUNT, ps_uuid=second_ps.uuid,
data_volume_number=DATA_VOLUME_NUMBER, ps_uuid_for_data_vol=second_ps.uuid, bs_type='ImageStoreBackupStorage')
else:
vm_list = test_stub.create_multi_vms(name_prefix='vm_in_second_ps', count=VM_COUNT, ps_uuid=second_ps.uuid,
data_volume_number=DATA_VOLUME_NUMBER, ps_uuid_for_data_vol=second_ps.uuid)
for vm in vm_list:
test_obj_dict.add_vm(vm)
test_util.test_dsc("Create one more vm in the first primaryStorage")
vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=first_ps.uuid)[0]
test_obj_dict.add_vm(vm)
test_util.test_dsc("Check the capacity")
#To do
test_util.test_pass('Multi PrimaryStorage Test Pass')
def env_recover():
test_util.test_dsc("Destroy test object")
test_lib.lib_error_cleanup(test_obj_dict)
if new_ps_list:
for new_ps in new_ps_list:
ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0])
ps_ops.delete_primary_storage(new_ps.uuid)
|
{
"content_hash": "b7fcc9f357dc7ab73030608cff28383f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 155,
"avg_line_length": 41.98684210526316,
"alnum_prop": 0.6530868066436853,
"repo_name": "zstackio/zstack-woodpecker",
"id": "21e4f76055d793899e5f27b17dab6eccb94ee6c4",
"size": "3191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/multihosts/multiPrimaryStorage/test_create_multi_vm_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
}
|
"""Archives a parser given the config ID."""
from typing import AnyStr
import click
from common import api_utility
from common import chronicle_auth
from common import exception_handler
from common import options
from common.constants import key_constants as common_constants
from common.constants import status
from parser import parser_templates
from parser import url
from parser.constants import key_constants as parser_constants
@click.command(name="archive", help="Archives a parser given the config ID.")
@options.env_option
@options.region_option
@options.verbose_option
@options.credential_file_option
@exception_handler.catch_exception()
def archive(credential_file: AnyStr, verbose: bool, region: str,
env: str) -> None:
"""Archives a parser given the config ID.
Args:
credential_file (AnyStr): Path of Service Account JSON.
verbose (bool): Option for printing verbose output to console.
region (str): Option for selecting regions. Available options - US, EUROPE,
ASIA_SOUTHEAST1.
env (str): Option for selection environment. Available options - prod, test.
Raises:
OSError: Failed to read the given file, e.g. not found, no read access
(https://docs.python.org/library/exceptions.html#os-exceptions).
ValueError: Invalid file contents.
KeyError: Required key is not present in dictionary.
TypeError: If response data is not JSON.
"""
config_id = click.prompt("Enter Config ID", show_default=False, default="")
if not config_id:
click.echo("Config ID not provided. Please enter Config ID.")
return
click.echo("Archiving parser...")
archive_parser_url = f"{url.get_url(region, 'list', env)}/{config_id}:archive"
client = chronicle_auth.initialize_http_session(credential_file)
method = "POST"
response = client.request(
method, archive_parser_url, timeout=url.HTTP_REQUEST_TIMEOUT_IN_SECS)
parsed_response = api_utility.check_content_type(response.text)
if response.status_code != status.STATUS_OK:
click.echo(
f"Error while archiving parser.\nResponse Code: {response.status_code}"
f"\nError: {parsed_response[common_constants.KEY_ERROR][common_constants.KEY_MESSAGE]}"
)
return
click.echo("\nParser archived successfully.")
parser_details = parser_templates.parser_details_template.substitute(
config_id=f"{parsed_response[parser_constants.KEY_CONFIG_ID]}",
log_type=f"{parsed_response[common_constants.KEY_LOG_TYPE]}",
state=f"{parsed_response[parser_constants.KEY_STATE]}",
sha256=f"{parsed_response[parser_constants.KEY_SHA256]}",
author=f"{parsed_response[parser_constants.KEY_AUTHOR]}",
submit_time=f"{parsed_response[parser_constants.KEY_SUBMIT_TIME]}",
last_live_time=f"{parsed_response[parser_constants.KEY_LAST_LIVE_TIME]}",
state_last_changed_time=f"{parsed_response[parser_constants.KEY_STATE_LAST_CHANGED_TIME]}"
)
click.echo(parser_details)
if verbose:
api_utility.print_request_details(archive_parser_url, method, None,
parsed_response)
|
{
"content_hash": "9ca7730026ec41f5e089e3143147b30f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 96,
"avg_line_length": 38.7625,
"alnum_prop": 0.7226701064172848,
"repo_name": "chronicle/cli",
"id": "c61a309064ca4849ed076a74da9048729f7074e7",
"size": "3677",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parser/commands/archive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "303761"
}
],
"symlink_target": ""
}
|
class HTTPError(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class AddressNotFoundError(HTTPError):
status_code = 500
class PathFinderError(HTTPError):
status_code = 500
class TBellSearchError(HTTPError):
status_code = 500
|
{
"content_hash": "1239010dbeb6383f3d271e855f2bdbe6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 64,
"avg_line_length": 25.136363636363637,
"alnum_prop": 0.6347197106690777,
"repo_name": "jisantuc/tour_de_taco_bell_api",
"id": "fc8949b0f0a36b09e55119f45b96d793ed8733d3",
"size": "553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32"
},
{
"name": "HTML",
"bytes": "1103"
},
{
"name": "Python",
"bytes": "12804"
}
],
"symlink_target": ""
}
|
import numpy as np
import theano
import theano.tensor as T
from theano.ifelse import ifelse
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, training_mode, dropout_prob, activation, weights_variance):
self.input = input
if activation == 'tanh':
activation_function = lambda x: T.tanh(x)
W_values = np.asarray(rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype='float32')
b_values = np.zeros((n_out,), dtype='float32')
elif activation == 'relu':
activation_function = lambda x: T.maximum(0.0, x)
W_values = np.asarray(rng.normal(0.0, weights_variance, size=(n_in, n_out)), dtype='float32')
b_values = np.ones((n_out,), dtype='float32') / 10.0
else:
raise ValueError('unknown activation function')
self.W = theano.shared(value=W_values, name='W', borrow=True)
self.b = theano.shared(value=b_values, name='b', borrow=True)
inv_dropout_prob = np.float32(1.0 - dropout_prob)
lin_output = ifelse(T.eq(training_mode, 1),
T.dot(self._dropout(rng, input, dropout_prob), self.W) + self.b,
T.dot(input, inv_dropout_prob * self.W) + self.b)
self.output = activation_function(lin_output)
self.weights = [self.W, self.b]
def _dropout(self, rng, layer, p):
srng = T.shared_randomstreams.RandomStreams(rng.randint(777777))
mask = srng.binomial(n=1, p=1 - p, size=layer.shape)
output = layer * T.cast(mask, 'float32')
return output
|
{
"content_hash": "d99aca36825efe4e80c5163bf65dadb1",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 107,
"avg_line_length": 42.775,
"alnum_prop": 0.5739333722969024,
"repo_name": "IraKorshunova/kaggle-seizure-prediction",
"id": "ec89e442ab9522c23afe79ea118c8ba27a624efa",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "layers/hidden_layer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99531"
}
],
"symlink_target": ""
}
|
import argparse
import os
import sys
# chdir to ~/.nefi2 before local imports
# uncomment this for compiling builds
if not (sys.platform == 'win32' or sys.platform == 'win64'):
os.chdir(os.path.dirname(os.path.realpath(__file__)))
from nefi2.main import Main
def runner(args):
"""
Load all available categories and algorithms into the pipeline.
Args:
| *args* : a Namespace object of supplied command-line arguments
"""
if args.dir or args.file:
Main.batch_mode(args)
else:
Main.gui_mode()
if __name__ == '__main__':
prs = argparse.ArgumentParser(description="""NEFI2 is a tool created to
extract networks from images. Given a suitable 2D image of a network as
input, NEFI2 outputs a mathematical representation of the structure of the
depicted network as a weighted undirected planar graph.""")
prs.add_argument('-p', '--pipeline',
help='Specify a saved pipeline xml file.',
required=False)
prs.add_argument('-d', '--dir',
help='Specify a directory with images '
'for batch processing.',
required=False)
prs.add_argument('-f', '--file',
help='Specify an image file to process.',
required=False)
prs.add_argument('-o', '--out',
help='Specify output directory.',
required=False)
arguments = prs.parse_args()
runner(arguments)
|
{
"content_hash": "5def31f227c3b5f00729a7b65a15d2d3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 35.25581395348837,
"alnum_prop": 0.5956464379947229,
"repo_name": "LumPenPacK/NetworkExtractionFromImages",
"id": "d5d9d8210c85565d8356a388e91c37020a9c9755",
"size": "1539",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nefi2.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1577"
},
{
"name": "C",
"bytes": "3035840"
},
{
"name": "C++",
"bytes": "147394619"
},
{
"name": "CMake",
"bytes": "603"
},
{
"name": "CSS",
"bytes": "4298"
},
{
"name": "FORTRAN",
"bytes": "14321"
},
{
"name": "HTML",
"bytes": "41126"
},
{
"name": "Lex",
"bytes": "20920"
},
{
"name": "Makefile",
"bytes": "350419"
},
{
"name": "Python",
"bytes": "25507066"
},
{
"name": "QMake",
"bytes": "22941"
},
{
"name": "Shell",
"bytes": "19080"
},
{
"name": "Yacc",
"bytes": "248826"
}
],
"symlink_target": ""
}
|
"""
byceps.blueprints.admin.attendance.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort
from ....services.brand import service as brand_service
from ....services.party import service as party_service
from ....services.ticketing import attendance_service
from ....services.user import service as user_service
from ....util.framework.blueprint import create_blueprint
from ....util.framework.templating import templated
blueprint = create_blueprint('attendance_admin', __name__)
@blueprint.route('/brands/<brand_id>')
@templated
def view_for_brand(brand_id):
"""Show most frequent attendees for parties of this brand."""
brand = brand_service.find_brand(brand_id)
if brand is None:
abort(404)
brand_party_total = party_service.count_parties_for_brand(brand.id)
top_attendees = attendance_service.get_top_attendees_for_brand(brand.id)
user_ids = {user_id for user_id, attendance_count in top_attendees}
users = user_service.find_users(user_ids, include_avatars=False)
users_by_id = user_service.index_users_by_id(users)
top_attendees = [
(users_by_id[user_id], attendance_count)
for user_id, attendance_count in top_attendees
]
# Sort by highest attendance count first, alphabetical screen name second.
top_attendees.sort(key=lambda att: (-att[1], att[0].screen_name))
return {
'brand': brand,
'brand_party_total': brand_party_total,
'top_attendees': top_attendees,
}
|
{
"content_hash": "94aa487d88e42f3ce43874e6cdc4973a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 32,
"alnum_prop": 0.690625,
"repo_name": "m-ober/byceps",
"id": "ac144d7c53e0cc1d4073d60379841ecd44c5b644",
"size": "1600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byceps/blueprints/admin/attendance/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38499"
},
{
"name": "Dockerfile",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "369989"
},
{
"name": "JavaScript",
"bytes": "9483"
},
{
"name": "Python",
"bytes": "1152996"
}
],
"symlink_target": ""
}
|
from .csv2LaTeX import *
import doctest
import sys
if __name__=="__main__":
args=sys.argv
if len(args)>1:
if args[1].upper()=="TEST":
doctest.testfile("test.py",verbose=True)
else:
print("Bad Input")
else:
check_update()
print(signature())
list_of_files=os.listdir()
csv_files=[]
make_dir()
for i in list_of_files:
if i.find(".csv")!=-1:
csv_files.append(i)
if len(csv_files)==0:
print("There is no csv file in this folder (Please copy your csv files here)")
print("Current Directory : "+str(os.getcwd()))
sys.exit()
else:
print(str(len(csv_files))+" CSV file found in this folder! ;-) ")
for item in csv_files:
create_latex(item)
|
{
"content_hash": "60622027d7763910c82cce9a9de5ef76",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 90,
"avg_line_length": 31.48148148148148,
"alnum_prop": 0.5117647058823529,
"repo_name": "sepandhaghighi/csv2latex",
"id": "887f7d4fd8eb2f756c12319fad6d477bdcf25cd6",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csv2latex/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7497"
}
],
"symlink_target": ""
}
|
import os
from traits.api import HasTraits, Button, Str, Any, List
from traitsui.api import View, UItem, HGroup, VGroup, Controller, TabularEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
from traitsui.editors.api import TextEditor
from traitsui.tabular_adapter import TabularAdapter
class Conflict(HasTraits):
path = Str
class MergeModel(HasTraits):
conflicts = List
our_text = Str
their_text = Str
branch = Str
remote = Str
def __init__(self, paths, *args, **kw):
super(MergeModel, self).__init__(*args, **kw)
self.conflicts = [Conflict(path=p) for p in paths]
def set_conflict(self, c):
ourtext = []
theirtext = []
with open(os.path.join(self.repo.path, c.path), "r") as rfile:
oflag = False
tflag = False
for line in rfile:
if line.startswith("<<<<<<<"):
oflag = True
elif line.strip() == "=======":
oflag = False
tflag = True
elif line.startswith(">>>>>>>"):
tflag = False
elif oflag:
ourtext.append(line)
elif tflag:
theirtext.append(line)
self.our_text = "".join(ourtext)
self.their_text = "".join(theirtext)
def accept_their(self, fl=None):
self._merge_accept(fl, "theirs")
def accept_our(self, fl=None):
self._merge_accept(fl, "ours")
def commit(self):
self.repo.commit(
"merged {}/{} with local/{}".format(self.remote, self.branch, self.branch)
)
def _merge_accept(self, fl, strategy):
if fl is None:
fl = self.conflicts
repo = self.repo.active_repo
for fi in fl:
repo.git.checkout("--{}".format(strategy), fi.path)
self.repo.add(fi.path, commit=False)
self.conflicts.remove(fi)
self.repo.commit(
"merged {} with local/{}. strategy={}".format(
self.remote, self.branch, strategy
)
)
class ConflictAdapter(TabularAdapter):
columns = [("Name", "path")]
class MergeView(Controller):
dclicked = Any
selected = List
accept_their_button = Button("Accept Their")
accept_our_button = Button("Accept Our")
def closed(self, info, is_ok):
self.model.commit()
def controller_accept_their_button_changed(self, info):
if self.selected:
self.model.accept_their(self.selected)
def controller_accept_our_button_changed(self, info):
if self.selected:
self.model.accept_our(self.selected)
def controller_dclicked_changed(self, info):
if self.selected:
self.model.set_conflict(self.selected[0])
def traits_view(self):
cgrp = VGroup(
UItem(
"conflicts",
editor=TabularEditor(
adapter=ConflictAdapter(),
operations=[],
multi_select=True,
selected="controller.selected",
dclicked="controller.dclicked",
),
)
)
bgrp = VGroup(
UItem("controller.accept_our_button"),
UItem("controller.accept_their_button"),
enabled_when="controller.selected",
)
tgrp = HGroup(
VGroup(
UItem("our_text", style="custom", editor=TextEditor(read_only=True)),
show_border=True,
label="Our",
),
VGroup(
UItem("their_text", style="custom", editor=TextEditor(read_only=True)),
show_border=True,
label="Their",
),
)
v = View(
VGroup(HGroup(cgrp, bgrp), tgrp),
buttons=["OK"],
title="Merge",
resizable=True,
)
return v
# ============= EOF =============================================
|
{
"content_hash": "360b786afca3a37e4021f112553e7db5",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 87,
"avg_line_length": 28.472602739726028,
"alnum_prop": 0.5056531152273274,
"repo_name": "USGSDenverPychron/pychron",
"id": "c3a4855e6478a67588ebe3693b6aac02f2b58219",
"size": "4957",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/git_archive/merge_view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.i18n import _LE, _LW
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
sriov_opts = [
cfg.ListOpt('supported_pci_vendor_devs',
default=['15b3:1004', '8086:10ca'],
help=_("Supported PCI vendor devices, defined by "
"vendor_id:product_id according to the PCI ID "
"Repository. Default enables support for Intel "
"and Mellanox SR-IOV capable NICs")),
cfg.BoolOpt('agent_required',
default=False,
help=_("SRIOV neutron agent is required for port binding")),
]
cfg.CONF.register_opts(sriov_opts, "ml2_sriov")
class SriovNicSwitchMechanismDriver(api.MechanismDriver):
"""Mechanism Driver for SR-IOV capable NIC based switching.
The SriovNicSwitchMechanismDriver integrates the ml2 plugin with the
sriovNicSwitch L2 agent depending on configuration option.
Port binding with this driver may require the sriovNicSwitch agent
to be running on the port's host, and that agent to have connectivity
to at least one segment of the port's network.
L2 agent is not essential for port binding; port binding is handled by
VIF Driver via libvirt domain XML.
L2 Agent presents in order to manage port update events.
If vendor NIC does not support updates, setting agent_required = False
will allow to use Mechanism Driver without L2 agent.
"""
def __init__(self,
agent_type=constants.AGENT_TYPE_NIC_SWITCH,
vif_type=portbindings.VIF_TYPE_HW_VEB,
vif_details={portbindings.CAP_PORT_FILTER: False},
supported_vnic_types=[portbindings.VNIC_DIRECT,
portbindings.VNIC_MACVTAP],
supported_pci_vendor_info=None):
"""Initialize base class for SriovNicSwitch L2 agent type.
:param agent_type: Constant identifying agent type in agents_db
:param vif_type: Value for binding:vif_type when bound
:param vif_details: Dictionary with details for VIF driver when bound
:param supported_vnic_types: The binding:vnic_type values we can bind
:param supported_pci_vendor_info: The pci_vendor_info values to bind
"""
self.agent_type = agent_type
self.supported_vnic_types = supported_vnic_types
self.vif_type = vif_type
self.vif_details = vif_details
def initialize(self):
try:
self.pci_vendor_info = self._parse_pci_vendor_config(
cfg.CONF.ml2_sriov.supported_pci_vendor_devs)
self.agent_required = cfg.CONF.ml2_sriov.agent_required
except ValueError:
LOG.exception(_LE("Failed to parse supported PCI vendor devices"))
raise cfg.Error(_("Parsing supported pci_vendor_devs failed"))
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
if not self._check_supported_pci_vendor_device(context):
LOG.debug("Refusing to bind due to unsupported pci_vendor device")
return
if self.agent_required:
for agent in context.host_agents(self.agent_type):
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
if self.try_to_bind(context, agent):
return
else:
LOG.warning(_LW("Attempting to bind with dead agent: %s"),
agent)
else:
self.try_to_bind(context)
def try_to_bind(self, context, agent=None):
for segment in context.network.network_segments:
if self.check_segment(segment, agent):
context.set_binding(segment[api.ID],
self.vif_type,
self.get_vif_details(context, segment),
constants.PORT_STATUS_ACTIVE)
LOG.debug("Bound using segment: %s", segment)
return True
return False
def check_segment(self, segment, agent=None):
"""Check if segment can be bound.
:param segment: segment dictionary describing segment to bind
:param agent: agents_db entry describing agent to bind or None
:returns: True if segment can be bound for agent
"""
network_type = segment[api.NETWORK_TYPE]
if network_type == p_const.TYPE_VLAN:
if agent:
mappings = agent['configurations'].get('device_mappings', {})
LOG.debug("Checking segment: %(segment)s "
"for mappings: %(mappings)s ",
{'segment': segment, 'mappings': mappings})
return segment[api.PHYSICAL_NETWORK] in mappings
return True
return False
def _check_supported_pci_vendor_device(self, context):
if self.pci_vendor_info:
profile = context.current.get(portbindings.PROFILE, {})
if not profile:
LOG.debug("Missing profile in port binding")
return False
pci_vendor_info = profile.get('pci_vendor_info')
if not pci_vendor_info:
LOG.debug("Missing pci vendor info in profile")
return False
if pci_vendor_info not in self.pci_vendor_info:
LOG.debug("Unsupported pci_vendor %s", pci_vendor_info)
return False
return True
return False
def get_vif_details(self, context, segment):
if segment[api.NETWORK_TYPE] == p_const.TYPE_VLAN:
vlan_id = str(segment[api.SEGMENTATION_ID])
self.vif_details[portbindings.VIF_DETAILS_VLAN] = vlan_id
return self.vif_details
def _parse_pci_vendor_config(self, pci_vendor_list):
parsed_list = []
for elem in pci_vendor_list:
elem = elem.strip()
if not elem:
continue
split_result = elem.split(':')
if len(split_result) != 2:
raise ValueError(_("Invalid pci_vendor_info: '%s'") % elem)
vendor_id = split_result[0].strip()
if not vendor_id:
raise ValueError(_("Missing vendor_id in: '%s'") % elem)
product_id = split_result[1].strip()
if not product_id:
raise ValueError(_("Missing product_id in: '%s'") % elem)
parsed_list.append(elem)
return parsed_list
|
{
"content_hash": "c5f0b4eb514ae9abaf3457852dd96de8",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 78,
"avg_line_length": 43,
"alnum_prop": 0.5871749002339343,
"repo_name": "vveerava/Openstack",
"id": "577ba57bda8f2c5e489826d6478d3d68a8eebf54",
"size": "7860",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/drivers/mech_sriov/mech_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "8569441"
},
{
"name": "Shell",
"bytes": "15503"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
from . import *
Build = Method("Build")
@Build.implementation(Months)
def build(month_filter, dsl_aggregation_node):
month_numbers = list(month_filter.month_numbers)
month_numbers.sort()
dsl_aggregation_node.set_months(month_numbers)
@Build.implementation(FromDate)
def build(from_date, dsl_aggregation_node):
dsl_aggregation_node.set_from_date(from_date.date)
@Build.implementation(ToDate)
def build(to_date, dsl_aggregation_node):
dsl_aggregation_node.set_to_date(to_date.date)
@Build.implementation(*aggregations)
def apply_context(aggregation):
aggregation.sample_table = SampleTable.with_name(aggregation.dataset_name)
aggregation.from_date = None
aggregation.to_date = None
aggregation.month_numbers = None
for specification in aggregation.specification:
Build(specification, aggregation)
@Build.implementation(Addition, Subtraction, Multiplication, Division)
def binop_build(binop):
Build(binop.left)
Build(binop.right)
@Build.implementation(Pow)
def binop_build(binop):
Build(binop.left)
def set_months(aggregation, month_numbers):
assert aggregation.month_numbers is None, "Months were specified twice."
aggregation.month_numbers = month_numbers
AggregationNode.set_months = set_months
def set_to_date(aggregation, to_date):
assert aggregation.to_date is None, "ToDate was specified twice."
aggregation.to_date = to_date
AggregationNode.set_to_date = set_to_date
def set_from_date(aggregation, from_date):
assert aggregation.from_date is None, "FromDate was specified twice."
aggregation.from_date = from_date
AggregationNode.set_from_date = set_from_date
@Build.implementation(Number)
def Number_build(positive_number):
pass
|
{
"content_hash": "b3da73622521a8f619173ae045f9f36f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 32.111111111111114,
"alnum_prop": 0.7554786620530565,
"repo_name": "flavour/helios",
"id": "7fd2043ba076153bb32724d9c1956ddd103457d9",
"size": "1736",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "modules/ClimateDataPortal/DSL/Build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "13065177"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "21048713"
},
{
"name": "Shell",
"bytes": "1645"
}
],
"symlink_target": ""
}
|
from raggregate.models import DBSession
from raggregate.models.motd import MOTD
from raggregate.queries import motd as motd_queries
from raggregate.queries import general
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPNotFound
@view_config(renderer='motd.mak', route_name='motd')
def motd(request):
session = request.session
post = session['safe_post']
dbsession = DBSession()
# Only allow admins to add a message at this time
#@TODO: unify admin-only page handling so that we can easily change this
# some day if we want.
if 'logged_in_admin' not in session or session['logged_in_admin'] == False:
return HTTPNotFound()
# Handle the form input
if post:
if 'add_motd_button' in post:
message_to_add = general.get_from_post(post, 'message_to_add')
author = general.get_from_post(post, 'author')
source = general.get_from_post(post, 'source')
link = general.get_from_post(post, 'link')
datestring = general.get_from_post(post, 'datestring')
added_by = session['users.id']
if message_to_add:
if not author:
author = 'Unknown'
if not source:
source = 'Source unknown'
if not datestring:
date = 'Date unknown'
try:
new_motd = motd_queries.create_motd(message = message_to_add,
author = author, source = source, link = link,
added_by = added_by, datestring = datestring)
session['message'] = "Message of the Day Added!"
except Exception, ex:
print str(ex)
session['message'] = 'There was a problem adding your message.'
return {'motds': [], 'success': False, 'code': 'EBADPOST'}
else:
session['message'] = 'Please enter a message'
motds = motd_queries.get_all_messages()
return {'motds': motds}
|
{
"content_hash": "6f1d4ae13232fd014f1144c995a8eb43",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 83,
"avg_line_length": 39.611111111111114,
"alnum_prop": 0.5820476858345021,
"repo_name": "sjuxax/raggregate",
"id": "82b6919facab87c114eea454daa17d533a667578",
"size": "2139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raggregate/views/motd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "68898"
},
{
"name": "Python",
"bytes": "158538"
}
],
"symlink_target": ""
}
|
"""Provide access to Python's configuration information.
"""
import sys
import os
from os.path import pardir, realpath
_INSTALL_SCHEMES = {
'posix_prefix': {
'stdlib': '{base}/lib/python{py_version_short}',
'platstdlib': '{platbase}/lib/python{py_version_short}',
'purelib': '{base}/lib/python{py_version_short}/site-packages',
'platlib': '{platbase}/lib/python{py_version_short}/site-packages',
'include': '{base}/include/python{py_version_short}',
'platinclude': '{platbase}/include/python{py_version_short}',
'scripts': '{base}/bin',
'data': '{base}',
},
'posix_home': {
'stdlib': '{base}/lib/python',
'platstdlib': '{base}/lib/python',
'purelib': '{base}/lib/python',
'platlib': '{base}/lib/python',
'include': '{base}/include/python',
'platinclude': '{base}/include/python',
'scripts': '{base}/bin',
'data' : '{base}',
},
'nt': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2_home': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'nt_user': {
'stdlib': '{userbase}/Python{py_version_nodot}',
'platstdlib': '{userbase}/Python{py_version_nodot}',
'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
'include': '{userbase}/Python{py_version_nodot}/Include',
'scripts': '{userbase}/Scripts',
'data' : '{userbase}',
},
'posix_user': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'osx_framework_user': {
'stdlib': '{userbase}/lib/python',
'platstdlib': '{userbase}/lib/python',
'purelib': '{userbase}/lib/python/site-packages',
'platlib': '{userbase}/lib/python/site-packages',
'include': '{userbase}/include',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
}
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
# the build directory for posix builds
_PROJECT_BASE = os.path.normpath(os.path.abspath("."))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_INSTALL_SCHEMES[scheme]['include'] = '{projectbase}/Include'
_INSTALL_SCHEMES[scheme]['platinclude'] = '{srcdir}'
def _subst_vars(s, local_vars):
try:
return s.format(**local_vars)
except KeyError:
try:
return s.format(**os.environ)
except KeyError, var:
raise AttributeError('{%s}' % var)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _INSTALL_SCHEMES[scheme].items():
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
return env_base if env_base else joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
return env_base if env_base else \
joinuser("~", "Library", framework, "%d.%d"
% (sys.version_info[:2]))
return env_base if env_base else joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with open(filename) as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def _get_makefile_filename():
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
return os.path.join(get_path('platstdlib'), "config", "Makefile")
def _generate_posix_vars():
"""Generate the Python module containing build-time variables."""
import pprint
vars = {}
# load the installed Makefile:
makefile = _get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
# There's a chicken-and-egg situation on OS X with regards to the
# _sysconfigdata module after the changes introduced by #15298:
# get_config_vars() is called by get_platform() as part of the
# `make pybuilddir.txt` target -- which is a precursor to the
# _sysconfigdata.py module being constructed. Unfortunately,
# get_config_vars() eventually calls _init_posix(), which attempts
# to import _sysconfigdata, which we won't have built yet. In order
# for _init_posix() to work, if we're on Darwin, just mock up the
# _sysconfigdata module manually and populate it with the build vars.
# This is more than sufficient for ensuring the subsequent call to
# get_platform() succeeds.
name = '_sysconfigdata'
if 'darwin' in sys.platform:
import imp
module = imp.new_module(name)
module.build_time_vars = vars
sys.modules[name] = module
pybuilddir = 'build/lib.%s-%s' % (get_platform(), sys.version[:3])
if hasattr(sys, "gettotalrefcount"):
pybuilddir += '-pydebug'
try:
os.makedirs(pybuilddir)
except OSError:
pass
destfile = os.path.join(pybuilddir, name + '.py')
with open(destfile, 'wb') as f:
f.write('# system configuration generated and used by'
' the sysconfig module\n')
f.write('build_time_vars = ')
pprint.pprint(vars, stream=f)
# Create file used for sys.path fixup -- see Modules/getpath.c
with open('pybuilddir.txt', 'w') as f:
f.write(pybuilddir)
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# _sysconfigdata is generated at build time, see _generate_posix_vars()
from _sysconfigdata import build_time_vars
vars.update(build_time_vars)
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Returns the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Returns a tuple containing the schemes names."""
schemes = _INSTALL_SCHEMES.keys()
schemes.sort()
return tuple(schemes)
def get_path_names():
"""Returns a tuple containing the paths names."""
return _SCHEME_KEYS
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
if expand:
return _expand_vars(scheme, vars)
else:
return _INSTALL_SCHEMES[scheme]
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
import re
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
# OS X platforms require special customization to handle
# multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
import _osx_support
_osx_support.customize_config_vars(_CONFIG_VARS)
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
import re
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
# Set for cross builds explicitly
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxint]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
import _osx_support
osname, release, machine = _osx_support.get_platform_osx(
get_config_vars(),
osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print '%s: ' % (title)
print '\t%s = "%s"' % (key, value)
def _main():
"""Display all information sysconfig detains."""
if '--generate-posix-vars' in sys.argv:
_generate_posix_vars()
return
print 'Platform: "%s"' % get_platform()
print 'Python version: "%s"' % get_python_version()
print 'Current installation scheme: "%s"' % _get_default_scheme()
print
_print_dict('Paths', get_paths())
print
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
|
{
"content_hash": "2881845c07a5cce647a7e3ee5ab1c996",
"timestamp": "",
"source": "github",
"line_count": 636,
"max_line_length": 79,
"avg_line_length": 36.19496855345912,
"alnum_prop": 0.5566029539530842,
"repo_name": "kleientertainment/ds_mod_tools",
"id": "e55c834213e31bdeaf4c3e2cb79fceead723bfbc",
"size": "23020",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pkg/win32/Python27/Lib/sysconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "C",
"bytes": "10931"
},
{
"name": "C++",
"bytes": "437813"
},
{
"name": "Lua",
"bytes": "9787"
},
{
"name": "Python",
"bytes": "8021665"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
}
|
"""
demo_sample_uf_faculty.py -- Generate a random sample of current UF faculty
Version 0.1 MC 2013-12-27
-- Initial version
Version 0.2 MC 2014-08-30
-- Updated for vivofoundation, code formatting for PEP 8
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.2"
from vivofoundation import vivo_sparql_query
import random
from datetime import datetime
query = """
SELECT ?uri WHERE
{
?uri a vivo:FacultyMember .
?uri a ufVivo:UFCurrentEntity .
}
"""
print datetime.now(), "Gathering Current UF Faculty from VIVO"
data = vivo_sparql_query(query)
print datetime.now(), "Current UF Faculty found = ", \
len(data["results"]["bindings"])
print datetime.now(), "Load data structure with results"
d = []
for item in data["results"]["bindings"]:
d.append(item["uri"]["value"])
print datetime.now(), "Select random sample"
random.shuffle(d)
print datetime.now(), "Show selected faculty by VIVO URI"
for i in range(100):
print d[i]
print datetime.now(), "Finished"
|
{
"content_hash": "3dc8f087e38e97cccf09105537ecc62a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 27.65,
"alnum_prop": 0.6808318264014467,
"repo_name": "mconlon17/vivo-1.5-improvement",
"id": "c89681c628231d6b7e50e89b808441a963f70fcc",
"size": "1106",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/demo_sample_uf_faculty.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "70774"
},
{
"name": "TeX",
"bytes": "551"
}
],
"symlink_target": ""
}
|
import os
import re
def remove_duplicates_preserve_order(items):
found = set([])
keep = []
for item in items:
if item not in found:
found.add(item)
keep.append(item)
return keep
def check_dir(element, final_list):
#replace all "\\ " with " " as os.path cannot check escaped spaces
elementPath = re.subn(r'\\\s', ' ', element)[0]
if os.path.isdir(elementPath):
final_list.append(elementPath)
def parse_gtmroutines():
var = os.getenv('gtmroutines')
final_list = extract_m_source_dirs(var)
final_str = ';'.join(final_list)
print final_str
def extract_m_source_dirs(var):
#First, replace unescaped spaces with semicolons
tmp = var.replace(" ",";").replace("\;","\ ")
tmpl = tmp.split(";")
num_elements = len(tmpl)
final_list = []
for ind in xrange(num_elements):
element = tmpl[ind]
element = element.strip(")")
paren_check = [m.start() for m in re.finditer("\(",element)]
if not paren_check:
check_dir(element, final_list)
else:
stripElement = element[paren_check[0]+1:]
check_dir(stripElement, final_list)
# Remove duplicates, and print the semicolon separated string
final_list = remove_duplicates_preserve_order(final_list)
return final_list
if __name__ == "__main__":
parse_gtmroutines()
|
{
"content_hash": "ec55747636e1271820813eedfa95d212",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 68,
"avg_line_length": 27.574468085106382,
"alnum_prop": 0.6550925925925926,
"repo_name": "apexdatasolutions/VistA",
"id": "8130556a9203141907529cc02ab1d7c117602094",
"size": "2083",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Testing/Python/ParseGTMRoutines.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "49247"
},
{
"name": "CSS",
"bytes": "9885"
},
{
"name": "Genshi",
"bytes": "71549015"
},
{
"name": "HTML",
"bytes": "6005"
},
{
"name": "JavaScript",
"bytes": "5277"
},
{
"name": "M",
"bytes": "435579"
},
{
"name": "Pascal",
"bytes": "11430263"
},
{
"name": "Python",
"bytes": "1178432"
},
{
"name": "Ruby",
"bytes": "11817"
},
{
"name": "Shell",
"bytes": "89944"
}
],
"symlink_target": ""
}
|
import collections
from supriya import CalculationRate
from supriya.ugens.PureUGen import PureUGen
class BufAllpassL(PureUGen):
"""
A buffer-based linear-interpolating allpass delay line unit generator.
::
>>> buffer_id = 0
>>> source = supriya.ugens.In.ar(bus=0)
>>> supriya.ugens.BufAllpassL.ar(
... buffer_id=buffer_id,
... source=source,
... )
BufAllpassL.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Delay UGens"
_ordered_input_names = collections.OrderedDict(
[
("buffer_id", None),
("source", None),
("maximum_delay_time", 0.2),
("delay_time", 0.2),
("decay_time", 1.0),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
|
{
"content_hash": "926a57071e682ae25c27cd4b840fc09f",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 23.37837837837838,
"alnum_prop": 0.5549132947976878,
"repo_name": "Pulgama/supriya",
"id": "d4a124ccfcd95dbf7a9660fd27db11556587c6fc",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/ugens/BufAllpassL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
}
|
"""Showcases *ATD (1995)* colour appearance model computations."""
import numpy as np
import colour
from colour.appearance.atd95 import CAM_ReferenceSpecification_ATD95
from colour.utilities import message_box
message_box('"ATD (1995)" Colour Appearance Model Computations')
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_0 = np.array([95.05, 100.00, 108.88])
Y_0 = 318.31
k_1 = 0.0
k_2 = 50.0
message_box(
f'Converting to the "ATD (1995)" colour appearance model specification '
f"using given parameters:\n\n"
f"\tXYZ: {XYZ}\n"
f"\tXYZ_0: {XYZ_0}\n"
f"\tY_0: {Y_0}\n"
f"\tk_1: {k_1}\n"
f"\tk_2: {k_2}"
)
specification = colour.XYZ_to_ATD95(XYZ, XYZ_0, Y_0, k_1, k_2)
print(specification)
print("\n")
message_box(
'Broadcasting the current output "ATD (1995)" colour appearance '
"model specification to the reference specification.\n"
"The intent of this reference specification is to provide names "
'as closest as possible to the "Mark D. Fairchild" reference.\n'
"The current output specification is meant to be consistent with "
"the other colour appearance model specification by using same "
"argument names for consistency wherever possible."
)
print(CAM_ReferenceSpecification_ATD95(*specification.values))
|
{
"content_hash": "403c2cdfd5f97f5d7d26c5eb90eb91fb",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 76,
"avg_line_length": 31.85,
"alnum_prop": 0.7009419152276295,
"repo_name": "colour-science/colour",
"id": "8dd8f928bccd415562dc5ff3b723edc316c95680",
"size": "1274",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "colour/examples/appearance/examples_atd95.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7967270"
},
{
"name": "TeX",
"bytes": "163213"
},
{
"name": "Visual Basic 6.0",
"bytes": "1170"
}
],
"symlink_target": ""
}
|
from optparse import make_option
import django
from evostream.default import api
from evostream.management.base import BaseEvoStreamCommand
class Command(BaseEvoStreamCommand):
help = 'Stop the stream and remove the corresponding configuration entry.'
requires_system_checks = False
silent_keys = 'configId',
if django.VERSION[:2] > (1, 7):
def add_arguments(self, parser):
parser.add_argument('idOrGroupName', type=str,
help='The configId of the configuration that '
'needs to be removed or the name of the '
'group that needs to be removed.')
parser.add_argument('--remove_hls_hds_files', action='store',
type=int, choices=[0, 1], default=0,
dest='removeHlsHdsFiles',
help='Remove folder associated with HLS/HDS '
'stream')
else:
args = '<idOrGroupName>'
option_list = BaseEvoStreamCommand.option_list + (
make_option('--remove_hls_hds_files', action='store',
type='choice', choices=['0', '1'], default='0',
dest='removeHlsHdsFiles',
help='Remove folder associated with HLS/HDS stream'),
)
def get_results(self, idOrGroupName, *args, **options):
try:
return api.remove_config(id=int(idOrGroupName), **options)
except ValueError:
return api.remove_config(groupName=idOrGroupName, **options)
|
{
"content_hash": "cf994439ef16a0638255822db436c56e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 78,
"avg_line_length": 40.09756097560975,
"alnum_prop": 0.551094890510949,
"repo_name": "tomi77/django-evostream",
"id": "91c7de43437ba94979b0345b8cb8f4a4981698d5",
"size": "1644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evostream/management/commands/removeconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146473"
}
],
"symlink_target": ""
}
|
"""
The render job watch panel allows you to
1. setup filters to automatically load jobs. (defaults to loading your jobs)
2. individually add jobs you want to watch.
"""
import os
import logging
from functools import partial
import plow.client
from plow.client import JobState, TaskState
from plow.gui import constants
from plow.gui.manifest import QtCore, QtGui
from plow.gui.panels import Panel
from plow.gui.common.widgets import CheckableListBox, BooleanCheckBox, SpinSliderWidget, ManagedListWidget
from plow.gui.common.job import JobProgressBar, JobSelectionDialog, JobStateWidget, JobContextMenu
from plow.gui.util import formatMaxValue, formatDateTime, formatDuration, copyToClipboard
from plow.gui.event import EventManager
LOGGER = logging.getLogger(__name__)
JOBID_ROLE = QtCore.Qt.UserRole
JOB_ROLE = QtCore.Qt.UserRole + 1
class RenderJobWatchPanel(Panel):
def __init__(self, name="Render Watch", parent=None):
Panel.__init__(self, name, "Render Watch", parent)
# Setup the default configuration
self.setAttr("loadMine", True)
self.setAttr("projects", [])
self.setAttr("allProjects", True)
self.setAttr("refreshSeconds", 5)
self.setAttr("users", [])
self.setAttr("loadedJobs", [])
self.setWidget(RenderJobWatchWidget(self.attrs, self))
self.setWindowTitle(name)
def init(self):
# TODO
# comment button (multi-select)
#
self.titleBarWidget().addAction(
QtGui.QIcon(":/images/load.png"), "Load", self.openLoadDialog)
self.titleBarWidget().addAction(
QtGui.QIcon(":/images/sweep.png"), "Remove Finished Jobs", self.removeFinishedJobs)
def openLoadDialog(self):
dialog = JobSelectionDialog(parent=self)
if dialog.exec_():
widget = self.widget()
for job in dialog.getSelectedJobs():
widget.addJob(job)
def _openPanelSettingsDialog(self):
self.__fixAttrs()
d = RenderJobWatchSettingsDialog(self.attrs)
if d.exec_():
attrs = d.getAttrs()
attrs['allProjects'] = int(attrs['allProjects'])
self.attrs.update(attrs)
self.setRefreshTime(self.attrs["refreshSeconds"])
def save(self, settings):
widget = self.widget()
ids = [j.id for j in widget.jobs()]
self.setAttr("loadedJobs", ids)
self.setAttr("allProjects", int(self.attrs["allProjects"]))
super(RenderJobWatchPanel, self).save(settings)
def restore(self, settings):
super(RenderJobWatchPanel, self).restore(settings)
self.__fixAttrs()
jobIds = self.getAttr("loadedJobs")
if jobIds:
QtCore.QTimer.singleShot(0, partial(self.__loadJobList, jobIds))
def removeFinishedJobs(self):
self.widget().removeFinishedJobs()
def refresh(self):
self.widget().refresh()
def __fixAttrs(self):
# Older PySide QSettings may serialize a single item
# list to just a string
for attr in ('users', 'projects', 'loadedJobs'):
val = self.attrs[attr]
if isinstance(val, (str, unicode)):
self.attrs[attr] = [val]
self.attrs['allProjects'] = int(self.attrs['allProjects'])
def __loadJobList(self, jobIds):
widget = self.widget()
jobs = plow.client.get_jobs(jobIds=jobIds)
for job in jobs:
widget.addJob(job)
class RenderJobWatchWidget(QtGui.QWidget):
HEADER = ["Job", "State", "Run", "Pend.", "Min", "Max", "Max MB", "Duration", "Progress"]
WIDTH = [400, 75, 60, 60, 60, 60, 65, 100, 125]
COLOR_DEAD = constants.COLOR_TASK_STATE[TaskState.DEAD]
COLOR_PAUSED = constants.BLUE
def __init__(self, attrs, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
layout.setContentsMargins(4,0,4,4)
self.attrs = attrs
self.__jobs = {}
self.__tree = tree = QtGui.QTreeWidget(self)
tree.setHeaderLabels(self.HEADER)
tree.setColumnCount(len(self.HEADER))
tree.setUniformRowHeights(True)
tree.viewport().setFocusPolicy(QtCore.Qt.NoFocus)
tree.header().setStretchLastSection(True)
tree.setSelectionMode(tree.ExtendedSelection)
tree.setAlternatingRowColors(True)
for i, v in enumerate(self.WIDTH):
tree.setColumnWidth(i, v)
layout.addWidget(tree)
# connections
tree.itemClicked.connect(lambda item: copyToClipboard(item.text(0)))
tree.itemDoubleClicked.connect(self.__itemDoubleClicked)
tree.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
tree.customContextMenuRequested.connect(self.__showContextMenu)
def refresh(self):
self.__updateExistingJobs()
self.__findNewJobs()
def addJob(self, job):
if self.__jobs.has_key(job.id):
return False
item = QtGui.QTreeWidgetItem([
job.name,
"",
"%02d" % job.totals.running,
"%02d" % (job.totals.waiting + job.totals.depend),
"%02d" % job.minCores,
formatMaxValue(job.maxCores),
formatMaxValue(job.stats.highRam),
formatDuration(job.startTime, job.stopTime)
])
center = QtCore.Qt.AlignCenter
for i in xrange(2, item.columnCount()):
item.setTextAlignment(i, center)
self.__jobs[job.id] = item
item.setToolTip(0, job.name)
item.setToolTip(6, "Started: %s\nStopped:%s" %
(formatDateTime(job.startTime), formatDateTime(job.stopTime)))
item.setData(0, JOBID_ROLE, job.id)
item.setData(0, JOB_ROLE, job)
self.__tree.addTopLevelItem(item)
progress = JobProgressBar(job.totals, self.__tree)
self.__tree.setItemWidget(item, len(self.HEADER)-1, progress);
self.__setJobStateAndColor(item)
return True
def updateJob(self, job):
item = self.__jobs[job.id]
item.setData(0, JOB_ROLE, job)
item.setText(2, "%02d" % job.totals.running)
item.setText(3, "%02d" % job.totals.waiting)
item.setText(4, "%02d" % job.minCores)
item.setText(5, formatMaxValue(job.maxCores))
item.setText(6, formatMaxValue(job.stats.highRam))
item.setText(7, formatDuration(job.startTime, job.stopTime))
item.setToolTip(7, "Started: %s\nStopped:%s" %
(formatDateTime(job.startTime), formatDateTime(job.stopTime)))
self.__tree.itemWidget(item, len(self.HEADER)-1).setTotals(job.totals)
self.__setJobStateAndColor(item)
def removeFinishedJobs(self):
finished = []
for item in self.__jobs.itervalues():
if item.data(0, JOB_ROLE).state == JobState.FINISHED:
finished.append(item)
for item in finished:
self.removeJobItem(item)
def removeJobItem(self, item):
jobid = str(item.data(0, JOBID_ROLE))
try:
del self.__jobs[jobid]
except Exception, e:
LOGGER.error(e)
idx = self.__tree.indexOfTopLevelItem(item)
self.__tree.takeTopLevelItem(idx)
def jobs(self):
tree = self.__tree
items = tree.findItems("", QtCore.Qt.MatchContains|QtCore.Qt.MatchRecursive)
jobs = [item.data(0, JOB_ROLE) for item in items]
return jobs
def selectedJobs(self):
tree = self.__tree
jobs = [item.data(0, JOB_ROLE) for item in tree.selectedItems()]
return jobs
def __updateJobs(self, jobs):
for job in jobs:
if not self.__jobs.has_key(job.id):
self.addJob(job)
else:
self.updateJob(job)
def __updateExistingJobs(self):
FINISHED = JobState.FINISHED
now = QtCore.QDateTime.currentDateTimeUtc()
toDateTime = QtCore.QDateTime.fromMSecsSinceEpoch
req = {"matchingOnly": True}
jobs = []
for jobId, item in self.__jobs.iteritems():
job = item.data(0, JOB_ROLE)
# we will continue to pull updates on finished jobs for
# 60 seconds longer
if job.state == FINISHED:
sec = toDateTime(job.stopTime).secsTo(now)
if sec > 60:
continue
jobs.append(jobId)
req["jobIds"] = jobs
self.__updateJobs(plow.client.get_jobs(**req))
def __findNewJobs(self):
req = {}
req["matchingOnly"] = True
req["user"] = []
req["states"] = [JobState.RUNNING]
if self.attrs["loadMine"]:
req["user"].append(os.environ["USER"])
if self.attrs["users"]:
req["user"].extend(self.attrs["users"])
if self.attrs["projects"]:
req["project"] = self.attrs["projects"]
self.__updateJobs(plow.client.get_jobs(**req))
def __showContextMenu(self, pos):
tree = self.__tree
item = tree.itemAt(pos)
if not item:
return
jobs = self.selectedJobs()
menu = JobContextMenu(jobs, parent=tree)
menu.popup(tree.mapToGlobal(pos))
def __setJobStateAndColor(self, item):
job = item.data(0, JOB_ROLE)
totals = job.totals
color = QtCore.Qt.black
text = constants.JOB_STATES[job.state]
if job.paused:
bgcolor = self.COLOR_PAUSED
color = QtCore.Qt.white
elif totals.dead:
bgcolor = self.COLOR_DEAD
color = QtCore.Qt.white
else:
bgcolor = constants.COLOR_JOB_STATE[job.state]
item.setText(1, text)
item.setBackground(1, bgcolor)
item.setForeground(1, color)
def __itemDoubleClicked(self, item, col):
uid = item.data(0, JOBID_ROLE)
EventManager.JobOfInterest.emit(uid)
class RenderJobWatchSettingsDialog(QtGui.QDialog):
"""
A dialog box that lets you configure how the render job widget.
"""
def __init__(self, attrs, parent=None):
QtGui.QDialog.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
self.sliderRefresh = SpinSliderWidget(1, 60, attrs["refreshSeconds"], self)
self.sliderRefresh.slider.setTickInterval(5)
self.sliderRefresh.slider.setTickPosition(QtGui.QSlider.TicksBelow)
self.checkboxLoadMine = BooleanCheckBox(bool(attrs["loadMine"]))
self.listUsers = ManagedListWidget(attrs["users"], "name", self)
self.checkboxLoadErrors = QtGui.QCheckBox(self)
projects = [project.code for project in plow.client.get_projects()]
self.listProjects = CheckableListBox("Projects",
projects,
attrs["projects"],
bool(attrs["allProjects"]),
self)
group_box1 = QtGui.QGroupBox("Auto Load Jobs", self)
form_layout1 = QtGui.QFormLayout(group_box1)
form_layout1.addRow("Refresh", self.sliderRefresh)
form_layout1.addRow("Load Mine:", self.checkboxLoadMine)
form_layout1.addRow("Load User:", self.listUsers)
form_layout1.addRow("Load With Errors:", self.checkboxLoadErrors)
# move to project multi-select widget
group_box2 = QtGui.QGroupBox("Filters", self)
form_layout2 = QtGui.QFormLayout(group_box2)
form_layout2.addRow("For Projects:", self.listProjects)
buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel);
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addWidget(group_box1)
layout.addWidget(group_box2)
layout.addWidget(buttons)
def getAttrs(self):
return {
"refreshSeconds": self.sliderRefresh.value(),
"loadMine": self.checkboxLoadMine.isChecked(),
"users": self.listUsers.getValues(),
"projects": self.listProjects.getCheckedOptions(),
"allProjects": self.listProjects.isAllSelected()
}
|
{
"content_hash": "b0e616b81b1e66669528be50f7a13efb",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 106,
"avg_line_length": 33.38648648648649,
"alnum_prop": 0.6039018861814943,
"repo_name": "Br3nda/plow",
"id": "132832d7f15529bf5e07d73b366839491502b0f9",
"size": "12353",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/plow/gui/panels/watch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "85468"
},
{
"name": "Java",
"bytes": "646022"
},
{
"name": "JavaScript",
"bytes": "125402"
},
{
"name": "Python",
"bytes": "577774"
},
{
"name": "Scala",
"bytes": "577"
},
{
"name": "Shell",
"bytes": "7324"
}
],
"symlink_target": ""
}
|
from lxml import etree
import webob
from nova.compute import flavors
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"swap": '5',
"disabled": False,
"ephemeral_gb": '20',
"rxtx_factor": '1.0',
"vcpus": 1,
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '10',
"swap": '10',
"ephemeral_gb": '25',
"rxtx_factor": None,
"disabled": False,
"vcpus": 1,
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return [
fake_flavor_get_by_flavor_id(1),
fake_flavor_get_by_flavor_id(2)
]
class FlavorRxtxTestV21(test.NoDBTestCase):
content_type = 'application/json'
_prefix = '/v3'
def setUp(self):
super(FlavorRxtxTestV21, self).setUp()
ext = ('nova.api.openstack.compute.contrib'
'.flavor_rxtx.Flavor_rxtx')
self.flags(osapi_compute_extension=[ext])
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(self._get_app())
return res
def _get_app(self):
return fakes.wsgi_app_v3(init_only=('servers',
'flavors', 'os-flavor-rxtx'))
def _get_flavor(self, body):
return jsonutils.loads(body).get('flavor')
def _get_flavors(self, body):
return jsonutils.loads(body).get('flavors')
def assertFlavorRxtx(self, flavor, rxtx):
self.assertEqual(str(flavor.get('rxtx_factor')), rxtx)
def test_show(self):
url = self._prefix + '/flavors/1'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertFlavorRxtx(self._get_flavor(res.body), '1.0')
def test_detail(self):
url = self._prefix + '/flavors/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
flavors = self._get_flavors(res.body)
self.assertFlavorRxtx(flavors[0], '1.0')
self.assertFlavorRxtx(flavors[1], '')
class FlavorRxtxTestV20(FlavorRxtxTestV21):
_prefix = "/v2/fake"
def _get_app(self):
return fakes.wsgi_app()
class FlavorRxtxXmlTest(FlavorRxtxTestV20):
content_type = 'application/xml'
def _get_flavor(self, body):
return etree.XML(body)
def _get_flavors(self, body):
return etree.XML(body).getchildren()
|
{
"content_hash": "62a1cd205f7193a3d98c37fa09397ace",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 28.29824561403509,
"alnum_prop": 0.573775573465592,
"repo_name": "saleemjaveds/https-github.com-openstack-nova",
"id": "8324147cfd25b61f2d1b76c58eac2f87169bf036",
"size": "3831",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14935646"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
}
|
"""This example updates user team associations.
It updates a user team association by setting the overridden access type to read
only for all teams that the user belongs to. To determine which users exists,
run get_all_users.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: UserTeamAssociationService.getUserTeamAssociationsByStatement
Tags: UserTeamAssociationService.updateUserTeamAssociations
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
USER_ID = 'INSERT_USER_ID_TO_UPDATE_HERE'
def main(client, user_id):
# Initialize appropriate service.
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201505')
# Create filter text to select user team associations by the user ID.
values = [{
'key': 'userId',
'value': {
'xsi_type': 'NumberValue',
'value': user_id
}
}]
query = 'WHERE userId = :userId'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Get user team associations by statement.
response = user_team_association_service.getUserTeamAssociationsByStatement(
statement.ToStatement())
if 'results' in response:
updated_user_team_associations = []
# Update each local user team association to read only access.
for user_team_association in response['results']:
user_team_association['overriddenTeamAccessType'] = 'READ_ONLY'
updated_user_team_associations.append(user_team_association)
# Update user team associations on the server.
user_team_associations = (
user_team_association_service.updateUserTeamAssociations(
updated_user_team_associations))
# Display results.
if user_team_associations:
for user_team_association in user_team_associations:
print ('User team association between user with ID \'%s\' and team with'
' ID \'%s\' was updated.' % (user_team_association['userId'],
user_team_association['teamId']))
else:
print 'No user team associations were updated.'
else:
print 'No user team associations found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, USER_ID)
|
{
"content_hash": "00aeba4dbc49977d3c0f8022d4690a6a",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 80,
"avg_line_length": 35.28767123287671,
"alnum_prop": 0.702251552795031,
"repo_name": "ya7lelkom/googleads-python-lib",
"id": "0ad6b3145b6cc73b933062efda34bf4000b0ed16",
"size": "3194",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/dfp/v201505/user_team_association_service/update_user_team_associations.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2535232"
}
],
"symlink_target": ""
}
|
import datetime
import ujson
import re
import mock
from email.utils import parseaddr
from django.conf import settings
from django.http import HttpResponse
from django.conf import settings
from mock import patch
from typing import Any, Dict, List, Union, Mapping
from zerver.lib.actions import (
do_change_is_admin,
do_change_realm_subdomain,
do_set_realm_property,
do_deactivate_realm,
do_deactivate_stream,
do_create_realm,
do_scrub_realm,
create_stream_if_needed,
do_change_plan_type,
do_send_realm_reactivation_email
)
from confirmation.models import create_confirmation_link, Confirmation
from zerver.lib.send_email import send_future_email
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import tornado_redirected_to_list
from zerver.lib.test_runner import slow
from zerver.models import get_realm, Realm, UserProfile, ScheduledEmail, get_stream, \
CustomProfileField, Message, UserMessage, Attachment, get_user_profile_by_email
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, user_profile: UserProfile,
new_realm_name: str) -> None:
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self) -> None:
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
self.example_user('hamlet')
realm = get_realm('zulip')
new_name = u'Zed You Elle Eye Pea'
do_set_realm_property(realm, 'name', new_name)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user('hamlet'), new_name)
def test_update_realm_name_events(self) -> None:
realm = get_realm('zulip')
new_name = u'Puliz'
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'name', new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='name',
value=new_name,
))
def test_update_realm_description_events(self) -> None:
realm = get_realm('zulip')
new_description = u'zulip dev group'
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'description', new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_update_realm_description(self) -> None:
email = self.example_email("iago")
self.login(email)
realm = get_realm('zulip')
new_description = u'zulip dev group'
data = dict(description=ujson.dumps(new_description))
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/realm', data)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.description, new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_realm_description_length(self) -> None:
new_description = u'A' * 1001
data = dict(description=ujson.dumps(new_description))
# create an admin user
email = self.example_email("iago")
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Organization description is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.description, new_description)
def test_realm_name_length(self) -> None:
new_name = u'A' * (Realm.MAX_REALM_NAME_LENGTH + 1)
data = dict(name=ujson.dumps(new_name))
# create an admin user
email = self.example_email("iago")
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Organization name is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.name, new_name)
def test_admin_restrictions_for_changing_realm_name(self) -> None:
new_name = 'Mice will play while the cat is away'
user_profile = self.example_user('othello')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be an organization administrator')
def test_unauthorized_name_change(self) -> None:
data = {'full_name': 'Sir Hamlet'}
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
do_set_realm_property(user_profile.realm, 'name_changes_disabled', True)
url = '/json/settings'
result = self.client_patch(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
# Realm admins can change their name even setting is disabled.
data = {'full_name': 'New Iago'}
self.login(self.example_email("iago"))
url = '/json/settings'
result = self.client_patch(url, data)
self.assert_in_success_response(['"full_name":"New Iago"'], result)
def test_do_deactivate_realm_clears_user_realm_cache(self) -> None:
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
self.example_user('hamlet')
realm = get_realm('zulip')
do_deactivate_realm(realm)
user = self.example_user('hamlet')
self.assertTrue(user.realm.deactivated)
def test_do_change_realm_subdomain_clears_user_realm_cache(self) -> None:
"""The main complicated thing about changing realm subdomains is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
user = get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip')
do_change_realm_subdomain(realm, "newzulip")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.realm.string_id, "newzulip")
# This doesn't use a cache right now, but may later.
self.assertIsNone(get_realm("zulip"))
def test_do_deactivate_realm_clears_scheduled_jobs(self) -> None:
user = self.example_user('hamlet')
send_future_email('zerver/emails/followup_day1', user.realm,
to_user_ids=[user.id], delay=datetime.timedelta(hours=1))
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_realm(user.realm)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_do_deactivate_realm_on_deactived_realm(self) -> None:
"""Ensure early exit is working in realm deactivation"""
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
def test_realm_reactivation_link(self) -> None:
realm = get_realm('zulip')
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
confirmation_url = create_confirmation_link(realm, realm.host, Confirmation.REALM_REACTIVATION)
response = self.client_get(confirmation_url)
self.assert_in_success_response(['Your organization has been successfully reactivated'], response)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_do_send_realm_reactivation_email(self) -> None:
realm = get_realm('zulip')
do_send_realm_reactivation_email(realm)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
from_email = outbox[0].from_email
tokenized_no_reply_email = parseaddr(from_email)[1]
self.assertIn("Zulip Account Security", from_email)
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Reactivate your Zulip organization', outbox[0].subject)
self.assertIn('To reactivate organization, please click here:', outbox[0].body)
admins = realm.get_admin_users()
confirmation_url = self.get_confirmation_url_from_outbox(admins[0].email)
response = self.client_get(confirmation_url)
self.assert_in_success_response(['Your organization has been successfully reactivated'], response)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_realm_reactivation_with_random_link(self) -> None:
random_link = "/reactivate/5e89081eb13984e0f3b130bf7a4121d153f1614b"
response = self.client_get(random_link)
self.assert_in_success_response(['The organization reactivation link has expired or is not valid.'], response)
def test_change_notifications_stream(self) -> None:
# We need an admin user.
email = 'iago@zulip.com'
self.login(email)
disabled_notif_stream_id = -1
req = dict(notifications_stream_id = ujson.dumps(disabled_notif_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.notifications_stream, None)
new_notif_stream_id = 4
req = dict(notifications_stream_id = ujson.dumps(new_notif_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.notifications_stream.id, new_notif_stream_id)
invalid_notif_stream_id = 1234
req = dict(notifications_stream_id = ujson.dumps(invalid_notif_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid stream id')
realm = get_realm('zulip')
self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id)
def test_get_default_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.notifications_stream_id = verona.id
realm.save(update_fields=["notifications_stream"])
notifications_stream = realm.get_notifications_stream()
self.assertEqual(notifications_stream.id, verona.id)
do_deactivate_stream(notifications_stream)
self.assertIsNone(realm.get_notifications_stream())
def test_change_signup_notifications_stream(self) -> None:
# We need an admin user.
email = 'iago@zulip.com'
self.login(email)
disabled_signup_notifications_stream_id = -1
req = dict(signup_notifications_stream_id = ujson.dumps(disabled_signup_notifications_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.signup_notifications_stream, None)
new_signup_notifications_stream_id = 4
req = dict(signup_notifications_stream_id = ujson.dumps(new_signup_notifications_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.signup_notifications_stream.id, new_signup_notifications_stream_id)
invalid_signup_notifications_stream_id = 1234
req = dict(signup_notifications_stream_id = ujson.dumps(invalid_signup_notifications_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid stream id')
realm = get_realm('zulip')
self.assertNotEqual(realm.signup_notifications_stream.id, invalid_signup_notifications_stream_id)
def test_get_default_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.signup_notifications_stream = verona
realm.save(update_fields=["signup_notifications_stream"])
signup_notifications_stream = realm.get_signup_notifications_stream()
self.assertEqual(signup_notifications_stream, verona)
do_deactivate_stream(signup_notifications_stream)
self.assertIsNone(realm.get_signup_notifications_stream())
def test_change_realm_default_language(self) -> None:
new_lang = "de"
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = self.example_email("iago")
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, invalid_lang)
def test_deactivate_realm_by_admin(self) -> None:
email = self.example_email('iago')
self.login(email)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
result = self.client_post('/json/realm/deactivate')
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertTrue(realm.deactivated)
def test_deactivate_realm_by_non_admin(self) -> None:
email = self.example_email('hamlet')
self.login(email)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
result = self.client_post('/json/realm/deactivate')
self.assert_json_error(result, "Must be an organization administrator")
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_change_bot_creation_policy(self) -> None:
# We need an admin user.
email = 'iago@zulip.com'
self.login(email)
req = dict(bot_creation_policy = ujson.dumps(Realm.BOT_CREATION_LIMIT_GENERIC_BOTS))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_add_bot_permission = 4
req = dict(bot_creation_policy = ujson.dumps(invalid_add_bot_permission))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid bot creation policy')
def test_change_email_address_visibility(self) -> None:
# We need an admin user.
email = 'iago@zulip.com'
self.login(email)
invalid_value = 4
req = dict(email_address_visibility = ujson.dumps(invalid_value))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid email address visibility policy')
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
req = dict(email_address_visibility = ujson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
def test_change_video_chat_provider(self) -> None:
self.assertEqual(get_realm('zulip').video_chat_provider, "Jitsi")
email = self.example_email("iago")
self.login(email)
req = {"video_chat_provider": ujson.dumps("Google Hangouts")}
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid domain: Domain can't be empty.")
req = {
"video_chat_provider": ujson.dumps("Google Hangouts"),
"google_hangouts_domain": ujson.dumps("invaliddomain"),
}
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid domain: Domain must have at least one dot (.)")
req = {
"video_chat_provider": ujson.dumps("Google Hangouts"),
"google_hangouts_domain": ujson.dumps("zulip.com"),
}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider, "Google Hangouts")
req = {"video_chat_provider": ujson.dumps("Jitsi")}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider, "Jitsi")
def test_initial_plan_type(self) -> None:
with self.settings(BILLING_ENABLED=True):
self.assertEqual(do_create_realm('hosted', 'hosted').plan_type, Realm.LIMITED)
self.assertEqual(get_realm("hosted").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(get_realm("hosted").message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
with self.settings(BILLING_ENABLED=False):
self.assertEqual(do_create_realm('onpremise', 'onpremise').plan_type, Realm.SELF_HOSTED)
self.assertEqual(get_realm('onpremise').max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(get_realm('onpremise').message_visibility_limit, None)
def test_change_plan_type(self) -> None:
user = self.example_user('iago')
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
do_change_plan_type(user, Realm.STANDARD)
realm = get_realm('zulip')
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
do_change_plan_type(user, Realm.LIMITED)
realm = get_realm('zulip')
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
do_change_plan_type(user, Realm.STANDARD_FREE)
realm = get_realm('zulip')
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
class RealmAPITest(ZulipTestCase):
def setUp(self) -> None:
user_profile = self.example_user('cordelia')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, True)
def set_up_db(self, attr: str, value: Any) -> None:
realm = get_realm('zulip')
setattr(realm, attr, value)
realm.save(update_fields=[attr])
def update_with_api(self, name: str, value: int) -> Realm:
result = self.client_patch('/json/realm', {name: ujson.dumps(value)})
self.assert_json_success(result)
return get_realm('zulip') # refresh data
def do_test_realm_update_api(self, name: str) -> None:
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests = [False, True] # type: List[bool]
test_values = dict(
default_language=[u'de', u'en'],
description=[u'Realm description', u'New description'],
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
waiting_period_threshold=[10, 20],
bot_creation_policy=[1, 2],
email_address_visibility=[Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS],
video_chat_provider=[u'Jitsi', u'Hangouts'],
google_hangouts_domain=[u'zulip.com', u'zulip.org'],
) # type: Dict[str, Any]
vals = test_values.get(name)
if Realm.property_types[name] is bool:
vals = bool_tests
if vals is None:
raise AssertionError('No test created for %s' % (name))
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
@slow("Tests a dozen properties in a loop")
def test_update_realm_properties(self) -> None:
for prop in Realm.property_types:
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self) -> None:
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db('allow_message_editing', False)
self.set_up_db('message_content_edit_limit_seconds', 0)
self.set_up_db('allow_community_topic_editing', False)
realm = self.update_with_api('allow_message_editing', True)
realm = self.update_with_api('message_content_edit_limit_seconds', 100)
realm = self.update_with_api('allow_community_topic_editing', True)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('allow_message_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('message_content_edit_limit_seconds', 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('allow_community_topic_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, False)
def test_update_realm_allow_message_deleting(self) -> None:
"""Tests updating the realm property 'allow_message_deleting'."""
self.set_up_db('allow_message_deleting', True)
self.set_up_db('message_content_delete_limit_seconds', 0)
realm = self.update_with_api('allow_message_deleting', False)
self.assertEqual(realm.allow_message_deleting, False)
self.assertEqual(realm.message_content_delete_limit_seconds, 0)
realm = self.update_with_api('allow_message_deleting', True)
realm = self.update_with_api('message_content_delete_limit_seconds', 100)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 100)
realm = self.update_with_api('message_content_delete_limit_seconds', 600)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 600)
class ScrubRealmTest(ZulipTestCase):
def test_scrub_realm(self) -> None:
zulip = get_realm("zulip")
lear = get_realm("lear")
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
create_stream_if_needed(lear, "Shakespeare")
self.subscribe(cordelia, "Shakespeare")
self.subscribe(king, "Shakespeare")
Message.objects.all().delete()
UserMessage.objects.all().delete()
for i in range(5):
self.send_stream_message(iago.email, "Scotland")
self.send_stream_message(othello.email, "Scotland")
self.send_stream_message(cordelia.email, "Shakespeare", sender_realm="lear")
self.send_stream_message(king.email, "Shakespeare", sender_realm="lear")
Attachment.objects.filter(realm=zulip).delete()
Attachment.objects.create(realm=zulip, owner=iago, path_id="a/b/temp1.txt")
Attachment.objects.create(realm=zulip, owner=othello, path_id="a/b/temp2.txt")
Attachment.objects.filter(realm=lear).delete()
Attachment.objects.create(realm=lear, owner=cordelia, path_id="c/d/temp1.txt")
Attachment.objects.create(realm=lear, owner=king, path_id="c/d/temp2.txt")
CustomProfileField.objects.create(realm=lear)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 10)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 20)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertNotEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
with mock.patch('logging.warning'):
do_scrub_realm(zulip)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 0)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 0)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertEqual(Attachment.objects.filter(realm=zulip).count(), 0)
self.assertEqual(Attachment.objects.filter(realm=lear).count(), 2)
self.assertEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
self.assertNotEqual(CustomProfileField.objects.filter(realm=lear).count(), 0)
zulip_users = UserProfile.objects.filter(realm=zulip)
for user in zulip_users:
self.assertTrue(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
lear_users = UserProfile.objects.filter(realm=lear)
for user in lear_users:
self.assertIsNone(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
|
{
"content_hash": "8c5e734316a2beb09991869dfa57e0bf",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 118,
"avg_line_length": 46.084019769357496,
"alnum_prop": 0.6543095127444322,
"repo_name": "jackrzhang/zulip",
"id": "7ecd61cebbda200d57a156302571c1b0763f7858",
"size": "27974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_realm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "428151"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "660198"
},
{
"name": "JavaScript",
"bytes": "2910049"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90611"
},
{
"name": "Python",
"bytes": "6065880"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "112340"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.