max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
queues/queue.py | nataz77/cs-py | 0 | 6617751 | <gh_stars>0
from typing import Any
from .linkedlistnode import LinkedListNode
class Queue:
def __init__(self):
self.front = None
self.back = None
self.len = 0
def peek(self) -> Any:
if self.is_empty():
raise Exception("The queue is empty")
else:
return self.front.data
def is_empty(self) -> bool:
return self.front is None
def enqueue(self, value) -> None:
NewNode = LinkedListNode(value)
if self.front is None and self.back is None:
self.head = NewNode
else:
self.back.next = NewNode
self.back = NewNode
self.len += 1
def dequeue(self) -> Any:
if self.is_empty():
raise Exception("The queue is empty")
val = self.front.data
self.front = self.front.next
self.count -= 1
return val
| from typing import Any
from .linkedlistnode import LinkedListNode
class Queue:
def __init__(self):
self.front = None
self.back = None
self.len = 0
def peek(self) -> Any:
if self.is_empty():
raise Exception("The queue is empty")
else:
return self.front.data
def is_empty(self) -> bool:
return self.front is None
def enqueue(self, value) -> None:
NewNode = LinkedListNode(value)
if self.front is None and self.back is None:
self.head = NewNode
else:
self.back.next = NewNode
self.back = NewNode
self.len += 1
def dequeue(self) -> Any:
if self.is_empty():
raise Exception("The queue is empty")
val = self.front.data
self.front = self.front.next
self.count -= 1
return val | none | 1 | 3.880226 | 4 | |
{{ cookiecutter.project_name }}/src/configs/settings.py | tiagoarasilva/cookiecutter-flask | 0 | 6617752 | """
All the custom settings are placed here. The settings are therefore loaded
trough environment variable `FLASK_APP_SETTINGS` that should be just a location
of the file
"""
import os
import ast
import datetime
import binascii
# GENERAL SETTINGS
DEBUG = False
FLASK_ENV = os.getenv('FLASK_ENV', 'production')
SECRET_KEY = os.getenv('SECRET_KEY', binascii.hexlify(os.urandom(24)))
# HOSTS AND SECURITY
ALLOWED_HOSTS = ast.literal_eval(os.getenv('ALLOWED_HOSTS', "['*']"))
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# LOGGING
LOG_BACKTRACE = True
LOG_LEVEL = 'INFO'
# DATABASE CONFIGURATION
DATABASE_USER = os.environ.get('POSTGRES_USER', 'postgres')
DATABASE_PASSWORD = os.environ.get('POSTGRES_PASSWORD', '<PASSWORD>')
DATABASE_NAME = os.environ.get('POSTGRES_DB', '{{ cookiecutter.project_name }}')
DATABASE_HOST = os.environ.get('POSTGRES_HOST', 'localhost')
SQLALCHEMY_DATABASE_URI = "postgresql://{}:{}@{}/{}".format(
DATABASE_USER, DATABASE_PASSWORD, DATABASE_HOST, DATABASE_NAME
)
# CACHES
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', 6379)
# These control flask-seasurf.
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_TIMEOUT = datetime.timedelta(days=1)
# SWAGGER
SWAGGER_SPECS = False
CORS_ORIGINS = ast.literal_eval(os.getenv('CORS_ORIGINS', '[]'))
| """
All the custom settings are placed here. The settings are therefore loaded
trough environment variable `FLASK_APP_SETTINGS` that should be just a location
of the file
"""
import os
import ast
import datetime
import binascii
# GENERAL SETTINGS
DEBUG = False
FLASK_ENV = os.getenv('FLASK_ENV', 'production')
SECRET_KEY = os.getenv('SECRET_KEY', binascii.hexlify(os.urandom(24)))
# HOSTS AND SECURITY
ALLOWED_HOSTS = ast.literal_eval(os.getenv('ALLOWED_HOSTS', "['*']"))
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# LOGGING
LOG_BACKTRACE = True
LOG_LEVEL = 'INFO'
# DATABASE CONFIGURATION
DATABASE_USER = os.environ.get('POSTGRES_USER', 'postgres')
DATABASE_PASSWORD = os.environ.get('POSTGRES_PASSWORD', '<PASSWORD>')
DATABASE_NAME = os.environ.get('POSTGRES_DB', '{{ cookiecutter.project_name }}')
DATABASE_HOST = os.environ.get('POSTGRES_HOST', 'localhost')
SQLALCHEMY_DATABASE_URI = "postgresql://{}:{}@{}/{}".format(
DATABASE_USER, DATABASE_PASSWORD, DATABASE_HOST, DATABASE_NAME
)
# CACHES
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', 6379)
# These control flask-seasurf.
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_TIMEOUT = datetime.timedelta(days=1)
# SWAGGER
SWAGGER_SPECS = False
CORS_ORIGINS = ast.literal_eval(os.getenv('CORS_ORIGINS', '[]'))
| en | 0.830414 | All the custom settings are placed here. The settings are therefore loaded trough environment variable `FLASK_APP_SETTINGS` that should be just a location of the file # GENERAL SETTINGS # HOSTS AND SECURITY # LOGGING # DATABASE CONFIGURATION # CACHES # These control flask-seasurf. # SWAGGER | 1.860006 | 2 |
stage_check/stage_check/OutputRecentCoresText.py | 128technology/stage_check | 2 | 6617753 | <gh_stars>1-10
###############################################################################
#
###############################################################################
import pprint
try:
from stage_check import Output
except ImportError:
import Output
try:
from stage_check import OutputRecentCores
except ImportError:
import OutputRecentCores
def create_instance():
return OutputRecentCoresText()
class OutputRecentCoresText(OutputRecentCores.Base, Output.Text):
"""
"""
def __init__(self):
super().__init__()
def amend_uptime_match(
self,
cores
):
exec_counts = {}
for core in cores:
exec_name = core["EXE"]
exec_name = exec_name.split('/')[-1]
try:
exec_counts[exec_name] += 1
except KeyError:
exec_counts[exec_name] = 1
for exec_name in exec_counts:
self.message_list.append(f"{exec_counts[exec_name]} {exec_name} crashes since OS boot")
return True
def amend_service_match(
self,
service,
cores
):
exec_counts = {}
for core in cores:
exec_name = core["EXE"]
exec_name = exec_name.split('/')[-1]
try:
exec_counts[exec_name] += 1
except KeyError:
exec_counts[exec_name] = 1
for exec_name in exec_counts:
self.message_list.append(f"{exec_counts[exec_name]} {exec_name} crashes since {service} start")
return True
def amend_test_result(
self,
message
):
self.message = message
| ###############################################################################
#
###############################################################################
import pprint
try:
from stage_check import Output
except ImportError:
import Output
try:
from stage_check import OutputRecentCores
except ImportError:
import OutputRecentCores
def create_instance():
return OutputRecentCoresText()
class OutputRecentCoresText(OutputRecentCores.Base, Output.Text):
"""
"""
def __init__(self):
super().__init__()
def amend_uptime_match(
self,
cores
):
exec_counts = {}
for core in cores:
exec_name = core["EXE"]
exec_name = exec_name.split('/')[-1]
try:
exec_counts[exec_name] += 1
except KeyError:
exec_counts[exec_name] = 1
for exec_name in exec_counts:
self.message_list.append(f"{exec_counts[exec_name]} {exec_name} crashes since OS boot")
return True
def amend_service_match(
self,
service,
cores
):
exec_counts = {}
for core in cores:
exec_name = core["EXE"]
exec_name = exec_name.split('/')[-1]
try:
exec_counts[exec_name] += 1
except KeyError:
exec_counts[exec_name] = 1
for exec_name in exec_counts:
self.message_list.append(f"{exec_counts[exec_name]} {exec_name} crashes since {service} start")
return True
def amend_test_result(
self,
message
):
self.message = message | de | 0.862255 | ############################################################################### # ############################################################################### | 2.283603 | 2 |
tabnine-vim/python/ycm/tests/testdata/.ycm_extra_conf.py | MrMonk3y/vimrc | 10 | 6617754 | <reponame>MrMonk3y/vimrc
def FlagsForFile( filename, **kwargs ):
temp_dir = kwargs[ 'client_data' ][ 'tempname()' ]
return {
'flags': [ temp_dir ],
'do_cache': False
}
| def FlagsForFile( filename, **kwargs ):
temp_dir = kwargs[ 'client_data' ][ 'tempname()' ]
return {
'flags': [ temp_dir ],
'do_cache': False
} | none | 1 | 1.958485 | 2 | |
cannabis_reports/tests/test_apis_producer.py | kwaaak/python-cannabis-reports | 13 | 6617755 | # -*- coding: utf-8 -*-
# Copyright 2017-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
from .api_common import recorder
from .api_entity import ApiEntityAbstract
from ..models.producer import Producer
class TestApisProducers(ApiEntityAbstract):
"""Tests the Producers API endpoint."""
UID = '0000000000L6M7E0000000000'
def setUp(self):
super(TestApisProducers, self).setUp()
self.endpoint = self.api.Producers
@recorder.use_cassette()
def test_apis_producers_list(self):
"""It should parse the response and return the proper object."""
self._test_apis_objects_list(Producer)
@recorder.use_cassette()
def test_apis_producers_get(self):
"""It should return the proper singleton."""
self._test_apis_objects_get('Kiva')
@recorder.use_cassette()
def test_apis_producers_get_extracts(self):
"""It should return the extracts for a producer."""
self.UID = '0000000000VU7TG0000000000'
self._test_apis_objects_get_extracts()
@recorder.use_cassette()
def test_apis_producers_get_edibles(self):
"""It should return the edibles for a producer."""
self._test_apis_objects_get_edibles()
@recorder.use_cassette()
def test_apis_producers_get_products(self):
"""It should return the products for a producer."""
self.UID = '0000000000N4E9N0000000000'
self._test_apis_objects_get_products()
@recorder.use_cassette()
def test_apis_producers_get_available(self):
"""It should return the availables for a producer."""
self._test_apis_objects_get_available()
| # -*- coding: utf-8 -*-
# Copyright 2017-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
from .api_common import recorder
from .api_entity import ApiEntityAbstract
from ..models.producer import Producer
class TestApisProducers(ApiEntityAbstract):
"""Tests the Producers API endpoint."""
UID = '0000000000L6M7E0000000000'
def setUp(self):
super(TestApisProducers, self).setUp()
self.endpoint = self.api.Producers
@recorder.use_cassette()
def test_apis_producers_list(self):
"""It should parse the response and return the proper object."""
self._test_apis_objects_list(Producer)
@recorder.use_cassette()
def test_apis_producers_get(self):
"""It should return the proper singleton."""
self._test_apis_objects_get('Kiva')
@recorder.use_cassette()
def test_apis_producers_get_extracts(self):
"""It should return the extracts for a producer."""
self.UID = '0000000000VU7TG0000000000'
self._test_apis_objects_get_extracts()
@recorder.use_cassette()
def test_apis_producers_get_edibles(self):
"""It should return the edibles for a producer."""
self._test_apis_objects_get_edibles()
@recorder.use_cassette()
def test_apis_producers_get_products(self):
"""It should return the products for a producer."""
self.UID = '0000000000N4E9N0000000000'
self._test_apis_objects_get_products()
@recorder.use_cassette()
def test_apis_producers_get_available(self):
"""It should return the availables for a producer."""
self._test_apis_objects_get_available()
| en | 0.757085 | # -*- coding: utf-8 -*- # Copyright 2017-TODAY LasLabs Inc. # License MIT (https://opensource.org/licenses/MIT). Tests the Producers API endpoint. It should parse the response and return the proper object. It should return the proper singleton. It should return the extracts for a producer. It should return the edibles for a producer. It should return the products for a producer. It should return the availables for a producer. | 2.109188 | 2 |
start.py | kyleperik/squeaky | 0 | 6617756 | import socketio
import eventlet
import eventlet.wsgi
import werkzeug.serving
from squeaky import data
from squeaky import services
from squeaky import models
from flask import Flask, render_template
sio = socketio.Server()
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return render_template('index.html')
@sio.on('connect', namespace='/')
def connect(sid, environ):
sio.emit('code', data.get_code(), room=sid)
sio.emit('last changeid', data.get_changeid(), room=sid)
print('connect', sid)
@sio.on('code change', namespace='/')
def code_change(sid, environ):
last_changeids = [
change['last_changeid']
for change in environ['changes']]
changes = [
models.Change(
from_pos = models.Position(
change['from']['line'],
change['from']['ch'],
),
to_pos = models.Position(
change['to']['line'],
change['to']['ch'],
),
text = str.join('\n', change['text']),
)
for change in environ['changes']]
new_code = services.transform(data.get_code(), changes)
data.set_code(new_code)
current_changeid = data.get_changeid()
new_changes = [
services.apply_change(c[0], c[1], current_changeid)
for c in zip(changes, last_changeids)]
print(new_changes)
sio.emit('last changeid', data.get_changeid(), room=sid)
result_changes = [
c.serialize()
for c in new_changes]
sio.emit('code change', result_changes, skip_sid=sid)
@sio.on('disconnect', namespace='/')
def disconnect(sid):
print('disconnect', sid)
@werkzeug.serving.run_with_reloader
def run_server():
sioapp = socketio.Middleware(sio, app)
ws = eventlet.wsgi.server(eventlet.listen(('', 8080)), sioapp)
ws.serve_forever()
if __name__ == '__main__':
run_server()
| import socketio
import eventlet
import eventlet.wsgi
import werkzeug.serving
from squeaky import data
from squeaky import services
from squeaky import models
from flask import Flask, render_template
sio = socketio.Server()
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return render_template('index.html')
@sio.on('connect', namespace='/')
def connect(sid, environ):
sio.emit('code', data.get_code(), room=sid)
sio.emit('last changeid', data.get_changeid(), room=sid)
print('connect', sid)
@sio.on('code change', namespace='/')
def code_change(sid, environ):
last_changeids = [
change['last_changeid']
for change in environ['changes']]
changes = [
models.Change(
from_pos = models.Position(
change['from']['line'],
change['from']['ch'],
),
to_pos = models.Position(
change['to']['line'],
change['to']['ch'],
),
text = str.join('\n', change['text']),
)
for change in environ['changes']]
new_code = services.transform(data.get_code(), changes)
data.set_code(new_code)
current_changeid = data.get_changeid()
new_changes = [
services.apply_change(c[0], c[1], current_changeid)
for c in zip(changes, last_changeids)]
print(new_changes)
sio.emit('last changeid', data.get_changeid(), room=sid)
result_changes = [
c.serialize()
for c in new_changes]
sio.emit('code change', result_changes, skip_sid=sid)
@sio.on('disconnect', namespace='/')
def disconnect(sid):
print('disconnect', sid)
@werkzeug.serving.run_with_reloader
def run_server():
sioapp = socketio.Middleware(sio, app)
ws = eventlet.wsgi.server(eventlet.listen(('', 8080)), sioapp)
ws.serve_forever()
if __name__ == '__main__':
run_server()
| none | 1 | 2.19427 | 2 | |
bundles/finance/vendors/sa.py | briancappello/flask-techan-unchained | 0 | 6617757 | import json
import requests
def get_symbol_info_url(symbol):
return f'https://seekingalpha.com/symbol/{symbol.upper()}/overview'
def get_symbol_info(symbol):
"""
company info keys:
company_name
long_desc
"""
r = requests.get(get_symbol_info_url(symbol))
return json_string_to_dict(r.text)
def json_string_to_dict(html: str, search_start: str = '"symbolQuoteInfo":') -> dict:
start = html.find(search_start) + len(search_start)
end = -1
stack = 0
for i, char in enumerate(html[start:]):
if char == '{':
stack += 1
elif char == '}':
stack -= 1
if stack == 0:
end = start + i + 1
break
return json.loads(html[start:end])
| import json
import requests
def get_symbol_info_url(symbol):
return f'https://seekingalpha.com/symbol/{symbol.upper()}/overview'
def get_symbol_info(symbol):
"""
company info keys:
company_name
long_desc
"""
r = requests.get(get_symbol_info_url(symbol))
return json_string_to_dict(r.text)
def json_string_to_dict(html: str, search_start: str = '"symbolQuoteInfo":') -> dict:
start = html.find(search_start) + len(search_start)
end = -1
stack = 0
for i, char in enumerate(html[start:]):
if char == '{':
stack += 1
elif char == '}':
stack -= 1
if stack == 0:
end = start + i + 1
break
return json.loads(html[start:end])
| en | 0.321713 | company info keys: company_name long_desc | 3.234806 | 3 |
soke-scripts/process/wrangle.py | Wilfongjt/soke | 0 | 6617758 | <reponame>Wilfongjt/soke
from process.process import Process
class Wrangle(Process):
def __init__(self, maps, expected_output_columns_list):
self.maps=maps
self.expected_output_columns_list=expected_output_columns_list
def filename(self, in_f):
ps = in_f.split('/')
return ps[len(ps)-1]
def get_dataframe(self):
return self.dataframe
def set_dataframe(self, dataframe):
self.dataframe = dataframe | from process.process import Process
class Wrangle(Process):
def __init__(self, maps, expected_output_columns_list):
self.maps=maps
self.expected_output_columns_list=expected_output_columns_list
def filename(self, in_f):
ps = in_f.split('/')
return ps[len(ps)-1]
def get_dataframe(self):
return self.dataframe
def set_dataframe(self, dataframe):
self.dataframe = dataframe | none | 1 | 2.486434 | 2 | |
corehq/apps/sso/tests/test_context_helpers.py | akashkj/commcare-hq | 471 | 6617759 | import datetime
from unittest.mock import patch
from django.test import SimpleTestCase, TestCase
from corehq.apps.sso.utils.context_helpers import (
render_multiple_to_strings,
get_idp_cert_expiration_email_context,
)
from corehq.apps.sso.tests import generator
class TestSimpleIdPContextHelpers(SimpleTestCase):
@patch("corehq.apps.sso.utils.context_helpers.render_to_string")
def test_render_multiple_to_strings(self, mock_render):
context = object()
for templates in (["t0"], ["t1", "t2"]):
list(render_multiple_to_strings(context, *templates))
self.assertEqual(mock_render.call_count, len(templates))
mock_render.assert_called_with(templates[-1], context)
mock_render.reset_mock()
class TestIdPContextHelpers(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.account = generator.get_billing_account_for_idp()
def setUp(self):
super().setUp()
self.idp = generator.create_idp('vaultwax', self.account)
def test_get_idp_cert_expiration_email_context(self):
self.idp.date_idp_cert_expiration = datetime.datetime.utcnow()
self.idp.save()
self.assertSetEqual(set(get_idp_cert_expiration_email_context(self.idp)),
{"subject", "from", "to", "bcc", "html", "plaintext"})
def tearDown(self):
self.idp.delete()
super().tearDown()
@classmethod
def tearDownClass(cls):
cls.account.delete()
super().tearDownClass()
| import datetime
from unittest.mock import patch
from django.test import SimpleTestCase, TestCase
from corehq.apps.sso.utils.context_helpers import (
render_multiple_to_strings,
get_idp_cert_expiration_email_context,
)
from corehq.apps.sso.tests import generator
class TestSimpleIdPContextHelpers(SimpleTestCase):
@patch("corehq.apps.sso.utils.context_helpers.render_to_string")
def test_render_multiple_to_strings(self, mock_render):
context = object()
for templates in (["t0"], ["t1", "t2"]):
list(render_multiple_to_strings(context, *templates))
self.assertEqual(mock_render.call_count, len(templates))
mock_render.assert_called_with(templates[-1], context)
mock_render.reset_mock()
class TestIdPContextHelpers(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.account = generator.get_billing_account_for_idp()
def setUp(self):
super().setUp()
self.idp = generator.create_idp('vaultwax', self.account)
def test_get_idp_cert_expiration_email_context(self):
self.idp.date_idp_cert_expiration = datetime.datetime.utcnow()
self.idp.save()
self.assertSetEqual(set(get_idp_cert_expiration_email_context(self.idp)),
{"subject", "from", "to", "bcc", "html", "plaintext"})
def tearDown(self):
self.idp.delete()
super().tearDown()
@classmethod
def tearDownClass(cls):
cls.account.delete()
super().tearDownClass()
| none | 1 | 2.254206 | 2 | |
esphome/components/pn532/__init__.py | OttoWinter/esphomeyaml | 249 | 6617760 | <filename>esphome/components/pn532/__init__.py
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import automation
from esphome.components import nfc
from esphome.const import CONF_ID, CONF_ON_TAG_REMOVED, CONF_ON_TAG, CONF_TRIGGER_ID
CODEOWNERS = ["@OttoWinter", "@jesserockz"]
AUTO_LOAD = ["binary_sensor", "nfc"]
MULTI_CONF = True
CONF_PN532_ID = "pn532_id"
CONF_ON_FINISHED_WRITE = "on_finished_write"
pn532_ns = cg.esphome_ns.namespace("pn532")
PN532 = pn532_ns.class_("PN532", cg.PollingComponent)
PN532OnFinishedWriteTrigger = pn532_ns.class_(
"PN532OnFinishedWriteTrigger", automation.Trigger.template()
)
PN532IsWritingCondition = pn532_ns.class_(
"PN532IsWritingCondition", automation.Condition
)
PN532_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(PN532),
cv.Optional(CONF_ON_TAG): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(nfc.NfcOnTagTrigger),
}
),
cv.Optional(CONF_ON_FINISHED_WRITE): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(
PN532OnFinishedWriteTrigger
),
}
),
cv.Optional(CONF_ON_TAG_REMOVED): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(nfc.NfcOnTagTrigger),
}
),
}
).extend(cv.polling_component_schema("1s"))
def CONFIG_SCHEMA(conf):
if conf:
raise cv.Invalid(
"This component has been moved in 1.16, please see the docs for updated "
"instructions. https://esphome.io/components/binary_sensor/pn532.html"
)
async def setup_pn532(var, config):
await cg.register_component(var, config)
for conf in config.get(CONF_ON_TAG, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID])
cg.add(var.register_ontag_trigger(trigger))
await automation.build_automation(
trigger, [(cg.std_string, "x"), (nfc.NfcTag, "tag")], conf
)
for conf in config.get(CONF_ON_TAG_REMOVED, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID])
cg.add(var.register_ontagremoved_trigger(trigger))
await automation.build_automation(
trigger, [(cg.std_string, "x"), (nfc.NfcTag, "tag")], conf
)
for conf in config.get(CONF_ON_FINISHED_WRITE, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID], var)
await automation.build_automation(trigger, [], conf)
@automation.register_condition(
"pn532.is_writing",
PN532IsWritingCondition,
cv.Schema(
{
cv.GenerateID(): cv.use_id(PN532),
}
),
)
async def pn532_is_writing_to_code(config, condition_id, template_arg, args):
var = cg.new_Pvariable(condition_id, template_arg)
await cg.register_parented(var, config[CONF_ID])
return var
| <filename>esphome/components/pn532/__init__.py
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import automation
from esphome.components import nfc
from esphome.const import CONF_ID, CONF_ON_TAG_REMOVED, CONF_ON_TAG, CONF_TRIGGER_ID
CODEOWNERS = ["@OttoWinter", "@jesserockz"]
AUTO_LOAD = ["binary_sensor", "nfc"]
MULTI_CONF = True
CONF_PN532_ID = "pn532_id"
CONF_ON_FINISHED_WRITE = "on_finished_write"
pn532_ns = cg.esphome_ns.namespace("pn532")
PN532 = pn532_ns.class_("PN532", cg.PollingComponent)
PN532OnFinishedWriteTrigger = pn532_ns.class_(
"PN532OnFinishedWriteTrigger", automation.Trigger.template()
)
PN532IsWritingCondition = pn532_ns.class_(
"PN532IsWritingCondition", automation.Condition
)
PN532_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(PN532),
cv.Optional(CONF_ON_TAG): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(nfc.NfcOnTagTrigger),
}
),
cv.Optional(CONF_ON_FINISHED_WRITE): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(
PN532OnFinishedWriteTrigger
),
}
),
cv.Optional(CONF_ON_TAG_REMOVED): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(nfc.NfcOnTagTrigger),
}
),
}
).extend(cv.polling_component_schema("1s"))
def CONFIG_SCHEMA(conf):
if conf:
raise cv.Invalid(
"This component has been moved in 1.16, please see the docs for updated "
"instructions. https://esphome.io/components/binary_sensor/pn532.html"
)
async def setup_pn532(var, config):
await cg.register_component(var, config)
for conf in config.get(CONF_ON_TAG, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID])
cg.add(var.register_ontag_trigger(trigger))
await automation.build_automation(
trigger, [(cg.std_string, "x"), (nfc.NfcTag, "tag")], conf
)
for conf in config.get(CONF_ON_TAG_REMOVED, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID])
cg.add(var.register_ontagremoved_trigger(trigger))
await automation.build_automation(
trigger, [(cg.std_string, "x"), (nfc.NfcTag, "tag")], conf
)
for conf in config.get(CONF_ON_FINISHED_WRITE, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID], var)
await automation.build_automation(trigger, [], conf)
@automation.register_condition(
"pn532.is_writing",
PN532IsWritingCondition,
cv.Schema(
{
cv.GenerateID(): cv.use_id(PN532),
}
),
)
async def pn532_is_writing_to_code(config, condition_id, template_arg, args):
var = cg.new_Pvariable(condition_id, template_arg)
await cg.register_parented(var, config[CONF_ID])
return var
| none | 1 | 2.006848 | 2 | |
scripts/h3m/utils.py | gaurvigoyal/lifting_events_to_3d_hpe | 19 | 6617761 | <filename>scripts/h3m/utils.py
"""
Author: <NAME> - <EMAIL>
"""
from typing import Tuple
import numpy as np
def normalized_3sigma(input_img: np.ndarray) -> np.ndarray:
img = input_img.copy().astype('float')
sig_img = img[img > 0].std()
if sig_img < 0.1 / 255:
sig_img = 0.1 / 255
numSdevs = 3.0
range = numSdevs * sig_img
img[img != 0] *= 255 / range
img[img < 0] = 0
img[img > 255] = 255
return img.astype('uint8')
def voxel_grid_joint_generator(
events: np.ndarray,
joints: np.ndarray,
num_events: int,
frame_size: Tuple[int, int],
n_cameras: int = 4,
n_bins: int = 4,
) -> np.ndarray:
"""
Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps
"""
voxel_frame = np.zeros(
(n_cameras, frame_size[0], frame_size[1], n_bins), dtype="int"
)
upper_bound = len(joints) * 1 / 200
init_slice = 0
t0 = events[0][2]
dt = events[num_events][2] - events[0][2]
for ind, event in enumerate(events):
y = int(event[0])
x = int(event[1])
ti = event[2]
pi = event[3]
cam = int(event[-1]) # using camera info similar to DHP19
voxel_frame[cam, x, y] += 1
t_split = (n_bins - 1) / dt * (ti - t0) + 1
for t_bin in range(0, n_bins):
voxel_frame[cam, x, y, t_bin] += pi * max(0, 1 - np.abs(t_bin - t_split))
if ti > upper_bound:
# Recording ends here
return
if (ind + 1) % num_events == 0:
for idx in range(n_cameras):
voxel_frame[idx] = normalized_3sigma(voxel_frame[idx])
yield voxel_frame, ti
voxel_frame = np.zeros_like(voxel_frame)
init_slice = ind
final_slice = min(init_slice + num_events - 1, len(events) - 1)
t0 = events[init_slice, 2]
dt = events[final_slice, 2] - t0
def joint_generator(
events: np.ndarray, joints: np.ndarray, num_events: int
) -> np.ndarray:
"""
Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps
"""
start_joint_data_index = 0
joint_data_fps = 200
upper_bound = len(joints) * 1 / 200
for ind, event in enumerate(events):
ti = event[2]
if ti > upper_bound:
# Recording ends here
return
if (ind + 1) % num_events == 0:
end_joint_data_index = int(ti * joint_data_fps) + 1
joints_per_frame = np.nanmean(
joints[start_joint_data_index:end_joint_data_index, :], 0
)
yield joints_per_frame
start_joint_data_index = end_joint_data_index
def constant_count_joint_generator(
events: np.ndarray,
joints: np.ndarray,
num_events: int,
frame_size: Tuple[int, int],
n_cameras: int = 4,
) -> np.ndarray:
"""
Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps
"""
event_count_frame = np.zeros((n_cameras, frame_size[0], frame_size[1]), dtype="int")
upper_bound = len(joints) * 1 / 200
for ind, event in enumerate(events):
y = int(event[0])
x = int(event[1])
t = event[2]
cam = int(event[-1]) # using camera info similar to DHP19
event_count_frame[cam, x, y] += 1
if t > upper_bound:
# Recording ends here
return
if (ind + 1) % num_events == 0:
for idx in range(n_cameras):
event_count_frame[idx] = normalized_3sigma(event_count_frame[idx])
yield event_count_frame, ti
event_count_frame = np.zeros_like(event_count_frame)
def timestamps_generator(
events: np.ndarray,
joints: np.ndarray,
num_events: int,
frame_size: Tuple[int, int],
n_cameras: int = 4,
) -> np.ndarray:
"""
Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps
"""
upper_bound = len(joints) * 1 / 200
for ind, event in enumerate(events):
t = event[2]
if t > upper_bound:
# Recording ends here
return
if (ind + 1) % num_events == 0:
yield None, t
| <filename>scripts/h3m/utils.py
"""
Author: <NAME> - <EMAIL>
"""
from typing import Tuple
import numpy as np
def normalized_3sigma(input_img: np.ndarray) -> np.ndarray:
img = input_img.copy().astype('float')
sig_img = img[img > 0].std()
if sig_img < 0.1 / 255:
sig_img = 0.1 / 255
numSdevs = 3.0
range = numSdevs * sig_img
img[img != 0] *= 255 / range
img[img < 0] = 0
img[img > 255] = 255
return img.astype('uint8')
def voxel_grid_joint_generator(
events: np.ndarray,
joints: np.ndarray,
num_events: int,
frame_size: Tuple[int, int],
n_cameras: int = 4,
n_bins: int = 4,
) -> np.ndarray:
"""
Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps
"""
voxel_frame = np.zeros(
(n_cameras, frame_size[0], frame_size[1], n_bins), dtype="int"
)
upper_bound = len(joints) * 1 / 200
init_slice = 0
t0 = events[0][2]
dt = events[num_events][2] - events[0][2]
for ind, event in enumerate(events):
y = int(event[0])
x = int(event[1])
ti = event[2]
pi = event[3]
cam = int(event[-1]) # using camera info similar to DHP19
voxel_frame[cam, x, y] += 1
t_split = (n_bins - 1) / dt * (ti - t0) + 1
for t_bin in range(0, n_bins):
voxel_frame[cam, x, y, t_bin] += pi * max(0, 1 - np.abs(t_bin - t_split))
if ti > upper_bound:
# Recording ends here
return
if (ind + 1) % num_events == 0:
for idx in range(n_cameras):
voxel_frame[idx] = normalized_3sigma(voxel_frame[idx])
yield voxel_frame, ti
voxel_frame = np.zeros_like(voxel_frame)
init_slice = ind
final_slice = min(init_slice + num_events - 1, len(events) - 1)
t0 = events[init_slice, 2]
dt = events[final_slice, 2] - t0
def joint_generator(
events: np.ndarray, joints: np.ndarray, num_events: int
) -> np.ndarray:
"""
Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps
"""
start_joint_data_index = 0
joint_data_fps = 200
upper_bound = len(joints) * 1 / 200
for ind, event in enumerate(events):
ti = event[2]
if ti > upper_bound:
# Recording ends here
return
if (ind + 1) % num_events == 0:
end_joint_data_index = int(ti * joint_data_fps) + 1
joints_per_frame = np.nanmean(
joints[start_joint_data_index:end_joint_data_index, :], 0
)
yield joints_per_frame
start_joint_data_index = end_joint_data_index
def constant_count_joint_generator(
events: np.ndarray,
joints: np.ndarray,
num_events: int,
frame_size: Tuple[int, int],
n_cameras: int = 4,
) -> np.ndarray:
"""
Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps
"""
event_count_frame = np.zeros((n_cameras, frame_size[0], frame_size[1]), dtype="int")
upper_bound = len(joints) * 1 / 200
for ind, event in enumerate(events):
y = int(event[0])
x = int(event[1])
t = event[2]
cam = int(event[-1]) # using camera info similar to DHP19
event_count_frame[cam, x, y] += 1
if t > upper_bound:
# Recording ends here
return
if (ind + 1) % num_events == 0:
for idx in range(n_cameras):
event_count_frame[idx] = normalized_3sigma(event_count_frame[idx])
yield event_count_frame, ti
event_count_frame = np.zeros_like(event_count_frame)
def timestamps_generator(
events: np.ndarray,
joints: np.ndarray,
num_events: int,
frame_size: Tuple[int, int],
n_cameras: int = 4,
) -> np.ndarray:
"""
Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps
"""
upper_bound = len(joints) * 1 / 200
for ind, event in enumerate(events):
t = event[2]
if t > upper_bound:
# Recording ends here
return
if (ind + 1) % num_events == 0:
yield None, t
| en | 0.929039 | Author: <NAME> - <EMAIL> Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps # using camera info similar to DHP19 # Recording ends here Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps # Recording ends here Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps # using camera info similar to DHP19 # Recording ends here Generate constant_count frames and corresponding gt 3D joints labels. 3D joints labels were acquired at 200fps # Recording ends here | 2.531568 | 3 |
pydantabase/database.py | tombulled/pydantable | 1 | 6617762 | <gh_stars>1-10
import tinydb
import functools
from . import document
from . import table
class Database(tinydb.TinyDB):
def __init__(self, model: type, *args, **kwargs):
super().__init__(*args, **kwargs)
@functools.wraps(model, updated = ())
class Document(document.Document, model): pass
@functools.wraps(table.Table, updated = ())
class Table(table.Table):
document_class: type = Document
self.table_class = Table
| import tinydb
import functools
from . import document
from . import table
class Database(tinydb.TinyDB):
def __init__(self, model: type, *args, **kwargs):
super().__init__(*args, **kwargs)
@functools.wraps(model, updated = ())
class Document(document.Document, model): pass
@functools.wraps(table.Table, updated = ())
class Table(table.Table):
document_class: type = Document
self.table_class = Table | none | 1 | 2.414808 | 2 | |
pycan/tools/traffic_generator.py | m-jez/pycan | 13 | 6617763 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 The pycan developers. All rights reserved.
# Project site: https://github.com/questrail/pycan
# Use of this source code is governed by a MIT-style license that
# can be found in the LICENSE.txt file for the project.
"""Generic CAN Traffic Generator Built on pycan
These base classes provide the common/base CAN functionality that is shared
among a
"""
class TrafficGenerator(object):
def __init__(self, driver):
pass
| # -*- coding: utf-8 -*-
# Copyright (c) 2013 The pycan developers. All rights reserved.
# Project site: https://github.com/questrail/pycan
# Use of this source code is governed by a MIT-style license that
# can be found in the LICENSE.txt file for the project.
"""Generic CAN Traffic Generator Built on pycan
These base classes provide the common/base CAN functionality that is shared
among a
"""
class TrafficGenerator(object):
def __init__(self, driver):
pass
| en | 0.906047 | # -*- coding: utf-8 -*- # Copyright (c) 2013 The pycan developers. All rights reserved. # Project site: https://github.com/questrail/pycan # Use of this source code is governed by a MIT-style license that # can be found in the LICENSE.txt file for the project. Generic CAN Traffic Generator Built on pycan These base classes provide the common/base CAN functionality that is shared among a | 1.650683 | 2 |
kodownik/widget/MainMenu.py | przemekkot/kodownik | 0 | 6617764 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from kivy.app import App
from kivy.properties import VariableListProperty
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
_learn = "Nauka"
_test = "Test"
_exit = "Wyjście"
class MainMenu(GridLayout):
padding = VariableListProperty(100)
spacing = VariableListProperty(10, length=2)
learn_button = Button(text=_learn, size_hint=(.3, .3))
test_button = Button(text=_test, size_hint=(.3, .3))
exit_button = Button(text=_exit, size_hint=(.3, .3))
def __init__(self, **kwargs):
super(MainMenu, self).__init__(**kwargs)
self.show_menu()
def show_menu(self):
self.add_widget(self.learn_button)
self.add_widget(self.test_button)
self.add_widget(self.exit_button)
self.learn_button.bind(on_press=self.begin_learning)
self.test_button.bind(on_press=self.begin_test)
self.exit_button.bind(on_press=self.exit_app)
def begin_learning(self, learn_button):
kodownik_window = self.parent
kodownik_window.show_learning_screen()
pass
def begin_test(self, test_button):
kodownik_window = self.parent
kodownik_window.show_testing_screen()
def exit_app(self, exit_button):
App.get_running_app().stop() | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from kivy.app import App
from kivy.properties import VariableListProperty
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
_learn = "Nauka"
_test = "Test"
_exit = "Wyjście"
class MainMenu(GridLayout):
padding = VariableListProperty(100)
spacing = VariableListProperty(10, length=2)
learn_button = Button(text=_learn, size_hint=(.3, .3))
test_button = Button(text=_test, size_hint=(.3, .3))
exit_button = Button(text=_exit, size_hint=(.3, .3))
def __init__(self, **kwargs):
super(MainMenu, self).__init__(**kwargs)
self.show_menu()
def show_menu(self):
self.add_widget(self.learn_button)
self.add_widget(self.test_button)
self.add_widget(self.exit_button)
self.learn_button.bind(on_press=self.begin_learning)
self.test_button.bind(on_press=self.begin_test)
self.exit_button.bind(on_press=self.exit_app)
def begin_learning(self, learn_button):
kodownik_window = self.parent
kodownik_window.show_learning_screen()
pass
def begin_test(self, test_button):
kodownik_window = self.parent
kodownik_window.show_testing_screen()
def exit_app(self, exit_button):
App.get_running_app().stop() | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.810242 | 3 |
libavg_charts/aid_lines/helper/data_point_snapping_methods.py | imldresden/mcv-displaywall | 2 | 6617765 | <filename>libavg_charts/aid_lines/helper/data_point_snapping_methods.py<gh_stars>1-10
from libavg.avg import RectNode, CircleNode, PolyLineNode
from libavg_charts.aid_lines.helper.data_point_snapping_method_holder import DataPointSnappingMethodHolder
from libavg_charts.axis.chart_axis_enums import Orientation
from libavg_charts.charts.bar_chart import BarChart
from libavg_charts.charts.bar_chart_special import *
from libavg_charts.charts.line_chart import LineChart
from libavg_charts.charts.parallel_coordinates_plot import ParallelCoordinatesPlot
from libavg_charts.charts.scatter_plot import ScatterPlot
def add_snapping_methods_to_method_holder():
"""
Adds the default methods to the DataPointSnappingMethodHolder.
"""
DataPointSnappingMethodHolder.add_snapping_method(LineChart, Orientation.Horizontal, get_snapping_pos_line_chart_horizontal)
DataPointSnappingMethodHolder.add_snapping_method(LineChart, Orientation.Vertical, get_snapping_pos_line_chart_vertical)
def get_snapping_pos_line_chart_horizontal(data_objects, line_pos):
"""
Calculates the nearest pos of and objects from a line chart with an horizontal line.
:param data_objects: The nodes of the data objects in the chart.
:type data_objects: dict[str, PolyLineNode]
:param line_pos: The necessary position of the node of the line according to the orientation.
:type line_pos: float
:return: The nearest pos (y) of an data object in the given chart.
:rtype: float
"""
nearest_pos = []
for node in data_objects.itervalues():
nearest_pos.append(sorted(node.pos, key=lambda pos: abs(line_pos - pos[1]))[0])
return sorted(nearest_pos, key=lambda pos: abs(line_pos - pos[1]))[0][1]
def get_snapping_pos_line_chart_vertical(data_objects, line_pos):
"""
Calculates the nearest pos of and objects from a line chart with an vertical line.
:param data_objects: The nodes of the data objects in the chart.
:type data_objects: dict[str, PolyLineNode]
:param line_pos: The necessary position of the node of the line according to the orientation.
:type line_pos: float
:return: The nearest pos (x) of an data object in the given chart.
:rtype: float
"""
nearest_pos = []
for node in data_objects.itervalues():
nearest_pos.append(sorted(node.pos, key=lambda pos: abs(line_pos - pos[0]))[0])
return sorted(nearest_pos, key=lambda pos: abs(line_pos - pos[0]))[0][0]
| <filename>libavg_charts/aid_lines/helper/data_point_snapping_methods.py<gh_stars>1-10
from libavg.avg import RectNode, CircleNode, PolyLineNode
from libavg_charts.aid_lines.helper.data_point_snapping_method_holder import DataPointSnappingMethodHolder
from libavg_charts.axis.chart_axis_enums import Orientation
from libavg_charts.charts.bar_chart import BarChart
from libavg_charts.charts.bar_chart_special import *
from libavg_charts.charts.line_chart import LineChart
from libavg_charts.charts.parallel_coordinates_plot import ParallelCoordinatesPlot
from libavg_charts.charts.scatter_plot import ScatterPlot
def add_snapping_methods_to_method_holder():
"""
Adds the default methods to the DataPointSnappingMethodHolder.
"""
DataPointSnappingMethodHolder.add_snapping_method(LineChart, Orientation.Horizontal, get_snapping_pos_line_chart_horizontal)
DataPointSnappingMethodHolder.add_snapping_method(LineChart, Orientation.Vertical, get_snapping_pos_line_chart_vertical)
def get_snapping_pos_line_chart_horizontal(data_objects, line_pos):
"""
Calculates the nearest pos of and objects from a line chart with an horizontal line.
:param data_objects: The nodes of the data objects in the chart.
:type data_objects: dict[str, PolyLineNode]
:param line_pos: The necessary position of the node of the line according to the orientation.
:type line_pos: float
:return: The nearest pos (y) of an data object in the given chart.
:rtype: float
"""
nearest_pos = []
for node in data_objects.itervalues():
nearest_pos.append(sorted(node.pos, key=lambda pos: abs(line_pos - pos[1]))[0])
return sorted(nearest_pos, key=lambda pos: abs(line_pos - pos[1]))[0][1]
def get_snapping_pos_line_chart_vertical(data_objects, line_pos):
"""
Calculates the nearest pos of and objects from a line chart with an vertical line.
:param data_objects: The nodes of the data objects in the chart.
:type data_objects: dict[str, PolyLineNode]
:param line_pos: The necessary position of the node of the line according to the orientation.
:type line_pos: float
:return: The nearest pos (x) of an data object in the given chart.
:rtype: float
"""
nearest_pos = []
for node in data_objects.itervalues():
nearest_pos.append(sorted(node.pos, key=lambda pos: abs(line_pos - pos[0]))[0])
return sorted(nearest_pos, key=lambda pos: abs(line_pos - pos[0]))[0][0]
| en | 0.725093 | Adds the default methods to the DataPointSnappingMethodHolder. Calculates the nearest pos of and objects from a line chart with an horizontal line. :param data_objects: The nodes of the data objects in the chart. :type data_objects: dict[str, PolyLineNode] :param line_pos: The necessary position of the node of the line according to the orientation. :type line_pos: float :return: The nearest pos (y) of an data object in the given chart. :rtype: float Calculates the nearest pos of and objects from a line chart with an vertical line. :param data_objects: The nodes of the data objects in the chart. :type data_objects: dict[str, PolyLineNode] :param line_pos: The necessary position of the node of the line according to the orientation. :type line_pos: float :return: The nearest pos (x) of an data object in the given chart. :rtype: float | 2.558004 | 3 |
aio_osservaprezzi/exceptions.py | eliseomartelli/aio_osservaprezzi | 0 | 6617766 | """Exceptions for aio_osservaprezzi."""
class OsservaPrezziException(Exception):
"""OsservaPrezzi generic exception."""
pass
class OsservaPrezziConnectionError(OsservaPrezziException):
"""OsservaPrezzi connection error exception."""
pass
class RegionNotFoundException(OsservaPrezziException):
"""RegionNotFoundException."""
pass
class StationsNotFoundException(OsservaPrezziException):
"""StationsNotFoundException."""
pass
| """Exceptions for aio_osservaprezzi."""
class OsservaPrezziException(Exception):
"""OsservaPrezzi generic exception."""
pass
class OsservaPrezziConnectionError(OsservaPrezziException):
"""OsservaPrezzi connection error exception."""
pass
class RegionNotFoundException(OsservaPrezziException):
"""RegionNotFoundException."""
pass
class StationsNotFoundException(OsservaPrezziException):
"""StationsNotFoundException."""
pass
| en | 0.315511 | Exceptions for aio_osservaprezzi. OsservaPrezzi generic exception. OsservaPrezzi connection error exception. RegionNotFoundException. StationsNotFoundException. | 2.044659 | 2 |
code/solution.py | kyclark/remote.python.pizza | 3 | 6617767 | <filename>code/solution.py<gh_stars>1-10
#!/usr/bin/env python3
"""Add two integer values"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Add two integer values',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('numbers',
metavar='int',
nargs=2,
type=int,
help='Two numbers to add')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
n1, n2 = args.numbers
print(f'{n1} + {n2} = {n1 + n2}')
# --------------------------------------------------
if __name__ == '__main__':
main()
| <filename>code/solution.py<gh_stars>1-10
#!/usr/bin/env python3
"""Add two integer values"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Add two integer values',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('numbers',
metavar='int',
nargs=2,
type=int,
help='Two numbers to add')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
n1, n2 = args.numbers
print(f'{n1} + {n2} = {n1 + n2}')
# --------------------------------------------------
if __name__ == '__main__':
main()
| en | 0.149998 | #!/usr/bin/env python3 Add two integer values # -------------------------------------------------- Get command-line arguments # -------------------------------------------------- Make a jazz noise here # -------------------------------------------------- | 3.811996 | 4 |
HW6/Andrii_Haponov/hw_6_2.py | kolyasalubov/Lv-677.PythonCore | 0 | 6617768 | <filename>HW6/Andrii_Haponov/hw_6_2.py
# Task2. Write a program that calculates the area of a rectangle, triangle and circle
# (write three functions to calculate the square,
# and call them in the main program depending on the user's choice).
# Задача2. Написать программу, вычисляющую площадь прямоугольника, треугольника и круга
# (напишите три функции для вычисления квадрата,
# и вызывать их в основной программе в зависимости от выбора пользователя).
import math
def erea_rectangle(a, b):
p_rectangle = a*b
return p_rectangle
def erea_triangle(c, h):
p_triangle = c * h * 0.5
return p_triangle
def erea_circle(r):
p_circle = math.pi * r**2
return p_circle
figure = int(input(
"Enter 1 if calculates the area of a rectangle,\
2 if calculates the area of a triangle and \
3 if calculates the area of a circle : "
))
if figure == 1:
print(erea_rectangle(int(input("Enter the side 1: ")), int(input("Enter the side 1: "))))
elif figure == 2 :
print(erea_triangle(int(input("Enter the triangle height: ")), int(input("Enter the base of a triangle: "))))
elif figure == 3:
print(erea_circle(int(input("Enter the circle radius: "))))
else:
print("Error you do not enter a valid number!!!")
| <filename>HW6/Andrii_Haponov/hw_6_2.py
# Task2. Write a program that calculates the area of a rectangle, triangle and circle
# (write three functions to calculate the square,
# and call them in the main program depending on the user's choice).
# Задача2. Написать программу, вычисляющую площадь прямоугольника, треугольника и круга
# (напишите три функции для вычисления квадрата,
# и вызывать их в основной программе в зависимости от выбора пользователя).
import math
def erea_rectangle(a, b):
p_rectangle = a*b
return p_rectangle
def erea_triangle(c, h):
p_triangle = c * h * 0.5
return p_triangle
def erea_circle(r):
p_circle = math.pi * r**2
return p_circle
figure = int(input(
"Enter 1 if calculates the area of a rectangle,\
2 if calculates the area of a triangle and \
3 if calculates the area of a circle : "
))
if figure == 1:
print(erea_rectangle(int(input("Enter the side 1: ")), int(input("Enter the side 1: "))))
elif figure == 2 :
print(erea_triangle(int(input("Enter the triangle height: ")), int(input("Enter the base of a triangle: "))))
elif figure == 3:
print(erea_circle(int(input("Enter the circle radius: "))))
else:
print("Error you do not enter a valid number!!!")
| ru | 0.784076 | # Task2. Write a program that calculates the area of a rectangle, triangle and circle # (write three functions to calculate the square, # and call them in the main program depending on the user's choice). # Задача2. Написать программу, вычисляющую площадь прямоугольника, треугольника и круга # (напишите три функции для вычисления квадрата, # и вызывать их в основной программе в зависимости от выбора пользователя). | 4.495961 | 4 |
inc_luigi.py | SANDAG/DEFM | 0 | 6617769 | import luigi
import os
import pandas as pd
from db import extract
from db import sql
from forecast import util
import shutil
import luigi.contrib.hadoop
from sqlalchemy import create_engine
from pysandag.database import get_connection_string
from pysandag import database
from db import log
class IncPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return None
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
# db_run_id = log.new_run(name='inc_run_log', run_id=db_run_id['max'].iloc[0])
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
dem_sim_rates = extract.create_df('dem_sim_rates', 'dem_sim_rates_table',
rate_id=self.dem_id, index=None)
dem_sim_rates.to_hdf('temp/data.h5', 'dem_sim_rates', mode='a')
econ_sim_rates = extract.create_df('econ_sim_rates', 'econ_sim_rates_table',
rate_id=self.econ_id, index=None)
econ_sim_rates.to_hdf('temp/data.h5', 'econ_sim_rates', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
in_query = getattr(sql, 'inc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop_mil = pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop = pop.join(pop_mil)
pop['persons'] = (pop['persons'] - pop['mil_mildep'])
pop = pop.reset_index(drop=False)
pop = pop[pop['age'] >= 18]
pop['age_cat'] = ''
pop.loc[pop['age'].isin(list(range(18, 25))), ['age_cat']] = '18_24'
pop.loc[pop['age'].isin(list(range(25, 35))), ['age_cat']] = '25_34'
pop.loc[pop['age'].isin(list(range(35, 45))), ['age_cat']] = '35_44'
pop.loc[pop['age'].isin(list(range(45, 55))), ['age_cat']] = '45_54'
pop.loc[pop['age'].isin(list(range(55, 60))), ['age_cat']] = '55_59'
pop.loc[pop['age'].isin(list(range(60, 65))), ['age_cat']] = '60_64'
pop.loc[pop['age'].isin(list(range(65, 75))), ['age_cat']] = '65_74'
pop.loc[pop['age'].isin(list(range(75, 103))), ['age_cat']] = '75_99'
pop = pd.DataFrame(pop['persons'].groupby([pop['yr'], pop['age_cat']]).sum())
pop.to_hdf('temp/data.h5', 'pop', mode='a')
class IncomeByType(luigi.Task):
econ = luigi.Parameter()
dem = luigi.Parameter()
@property
def priority(self):
return 3
def requires(self):
return IncPopulation(econ_id=self.econ, dem_id=self.dem)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
pop = pd.read_hdf('temp/data.h5', 'pop')
inc_type_rates = extract.create_df('inc_shares', 'inc_shares_table', rate_id=econ_sim_rates.inc1_id[0], index=['yr', 'age_cat'])
inc_type_rates = inc_type_rates.join(pop)
inc_type_rates['totals'] = (inc_type_rates['income'] * inc_type_rates['persons'] * inc_type_rates['share'])
inc_type_rates = inc_type_rates.reset_index(drop=False)
inc_type_rates['multiplier'] = 0
aigr_table = extract.create_df('aigr', 'aigr_table', rate_id=econ_sim_rates.aigr_id[0], index=None)
inc_type_rates.loc[inc_type_rates['yr'] > 2014, ['multiplier']] = (aigr_table.aigr[0] * (inc_type_rates['yr'] - 2014))
# pow(1.01, mil_wages.index.get_level_values('yr') - 2014)
inc_type_rates['totals'] = (inc_type_rates['totals'] + inc_type_rates['totals'] * inc_type_rates['multiplier'])
inc_type_rates = pd.DataFrame(inc_type_rates['totals'].groupby([inc_type_rates['yr'], inc_type_rates['income_type']]).sum())
inc_type_rates = inc_type_rates.reset_index(drop=False)
inc_type_rates = pd.pivot_table(inc_type_rates, values='totals',
index=['yr'],
columns=['income_type'])
# inc_type_rates.to_hdf('temp/data.h5', 'inc_type_rates', mode='a')
inc_type_rates.rename(columns={'intp': 'Interest'}, inplace=True)
inc_type_rates.rename(columns={'oip': 'Other'}, inplace=True)
inc_type_rates.rename(columns={'pap': 'Public_Assistance'}, inplace=True)
inc_type_rates.rename(columns={'retp': 'Retirement'}, inplace=True)
inc_type_rates.rename(columns={'ssip': 'Supplemental_Social_Security'}, inplace=True)
inc_type_rates.rename(columns={'ssp': 'Social_Security'}, inplace=True)
inc_type_rates.rename(columns={'semp': 'Selfemp_Income'}, inplace=True)
inc_type_rates = inc_type_rates[['Interest', 'Other', 'Public_Assistance', 'Retirement',
'Supplemental_Social_Security', 'Social_Security', 'Selfemp_Income']]
inc_type_rates.to_hdf('temp/data.h5', 'ue_income')
run_table = pd.read_hdf('temp/data.h5', 'run_id')
run_id = run_table[0]
inc_type_rates['run_id'] = run_id
inc_type_rates.to_sql(name='non_wage_income', con=engine, schema='defm', if_exists='append', index=True)
if __name__ == '__main__':
os.makedirs('temp')
luigi.run(main_task_cls=IncomeByType, cmdline_args=['--dem=1005', '--econ=1002'])
shutil.rmtree('temp')
| import luigi
import os
import pandas as pd
from db import extract
from db import sql
from forecast import util
import shutil
import luigi.contrib.hadoop
from sqlalchemy import create_engine
from pysandag.database import get_connection_string
from pysandag import database
from db import log
class IncPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return None
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
# db_run_id = log.new_run(name='inc_run_log', run_id=db_run_id['max'].iloc[0])
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
dem_sim_rates = extract.create_df('dem_sim_rates', 'dem_sim_rates_table',
rate_id=self.dem_id, index=None)
dem_sim_rates.to_hdf('temp/data.h5', 'dem_sim_rates', mode='a')
econ_sim_rates = extract.create_df('econ_sim_rates', 'econ_sim_rates_table',
rate_id=self.econ_id, index=None)
econ_sim_rates.to_hdf('temp/data.h5', 'econ_sim_rates', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
in_query = getattr(sql, 'inc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop_mil = pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop = pop.join(pop_mil)
pop['persons'] = (pop['persons'] - pop['mil_mildep'])
pop = pop.reset_index(drop=False)
pop = pop[pop['age'] >= 18]
pop['age_cat'] = ''
pop.loc[pop['age'].isin(list(range(18, 25))), ['age_cat']] = '18_24'
pop.loc[pop['age'].isin(list(range(25, 35))), ['age_cat']] = '25_34'
pop.loc[pop['age'].isin(list(range(35, 45))), ['age_cat']] = '35_44'
pop.loc[pop['age'].isin(list(range(45, 55))), ['age_cat']] = '45_54'
pop.loc[pop['age'].isin(list(range(55, 60))), ['age_cat']] = '55_59'
pop.loc[pop['age'].isin(list(range(60, 65))), ['age_cat']] = '60_64'
pop.loc[pop['age'].isin(list(range(65, 75))), ['age_cat']] = '65_74'
pop.loc[pop['age'].isin(list(range(75, 103))), ['age_cat']] = '75_99'
pop = pd.DataFrame(pop['persons'].groupby([pop['yr'], pop['age_cat']]).sum())
pop.to_hdf('temp/data.h5', 'pop', mode='a')
class IncomeByType(luigi.Task):
econ = luigi.Parameter()
dem = luigi.Parameter()
@property
def priority(self):
return 3
def requires(self):
return IncPopulation(econ_id=self.econ, dem_id=self.dem)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
pop = pd.read_hdf('temp/data.h5', 'pop')
inc_type_rates = extract.create_df('inc_shares', 'inc_shares_table', rate_id=econ_sim_rates.inc1_id[0], index=['yr', 'age_cat'])
inc_type_rates = inc_type_rates.join(pop)
inc_type_rates['totals'] = (inc_type_rates['income'] * inc_type_rates['persons'] * inc_type_rates['share'])
inc_type_rates = inc_type_rates.reset_index(drop=False)
inc_type_rates['multiplier'] = 0
aigr_table = extract.create_df('aigr', 'aigr_table', rate_id=econ_sim_rates.aigr_id[0], index=None)
inc_type_rates.loc[inc_type_rates['yr'] > 2014, ['multiplier']] = (aigr_table.aigr[0] * (inc_type_rates['yr'] - 2014))
# pow(1.01, mil_wages.index.get_level_values('yr') - 2014)
inc_type_rates['totals'] = (inc_type_rates['totals'] + inc_type_rates['totals'] * inc_type_rates['multiplier'])
inc_type_rates = pd.DataFrame(inc_type_rates['totals'].groupby([inc_type_rates['yr'], inc_type_rates['income_type']]).sum())
inc_type_rates = inc_type_rates.reset_index(drop=False)
inc_type_rates = pd.pivot_table(inc_type_rates, values='totals',
index=['yr'],
columns=['income_type'])
# inc_type_rates.to_hdf('temp/data.h5', 'inc_type_rates', mode='a')
inc_type_rates.rename(columns={'intp': 'Interest'}, inplace=True)
inc_type_rates.rename(columns={'oip': 'Other'}, inplace=True)
inc_type_rates.rename(columns={'pap': 'Public_Assistance'}, inplace=True)
inc_type_rates.rename(columns={'retp': 'Retirement'}, inplace=True)
inc_type_rates.rename(columns={'ssip': 'Supplemental_Social_Security'}, inplace=True)
inc_type_rates.rename(columns={'ssp': 'Social_Security'}, inplace=True)
inc_type_rates.rename(columns={'semp': 'Selfemp_Income'}, inplace=True)
inc_type_rates = inc_type_rates[['Interest', 'Other', 'Public_Assistance', 'Retirement',
'Supplemental_Social_Security', 'Social_Security', 'Selfemp_Income']]
inc_type_rates.to_hdf('temp/data.h5', 'ue_income')
run_table = pd.read_hdf('temp/data.h5', 'run_id')
run_id = run_table[0]
inc_type_rates['run_id'] = run_id
inc_type_rates.to_sql(name='non_wage_income', con=engine, schema='defm', if_exists='append', index=True)
if __name__ == '__main__':
os.makedirs('temp')
luigi.run(main_task_cls=IncomeByType, cmdline_args=['--dem=1005', '--econ=1002'])
shutil.rmtree('temp')
| en | 0.169275 | # db_run_id = log.new_run(name='inc_run_log', run_id=db_run_id['max'].iloc[0]) # pow(1.01, mil_wages.index.get_level_values('yr') - 2014) # inc_type_rates.to_hdf('temp/data.h5', 'inc_type_rates', mode='a') | 2.255057 | 2 |
examples/TechnicHub/technic_hub_disconnect_test.py | NStrijbosch/Robot-Inventor-SPIKE-Prime-hub2hub | 2 | 6617770 | from hub2hub import TechnicHub, ble_handler
from time import sleep_ms
# Initialize ble handler and a technic hub
ble = ble_handler()
Thub = TechnicHub(ble)
# connect to a technic hub: press green button on the technic hub
Thub.connect()
Thub.led(8)
sleep_ms(2000)
Thub.disconnect()
sleep_ms(10000)
Thub.connect()
| from hub2hub import TechnicHub, ble_handler
from time import sleep_ms
# Initialize ble handler and a technic hub
ble = ble_handler()
Thub = TechnicHub(ble)
# connect to a technic hub: press green button on the technic hub
Thub.connect()
Thub.led(8)
sleep_ms(2000)
Thub.disconnect()
sleep_ms(10000)
Thub.connect()
| en | 0.814626 | # Initialize ble handler and a technic hub # connect to a technic hub: press green button on the technic hub | 2.929623 | 3 |
dev/potential/TersoffPotential/threebody_tersoff.py | eragasa/pypospack | 4 | 6617771 | <filename>dev/potential/TersoffPotential/threebody_tersoff.py
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2017,2018"
__license__ = "Simplified BSD License"
__version__ = "1.0"
import os
from collections import OrderedDict
from pypospack.potential import ThreeBodyPotential
def get_3body_parameter_names(symbols, names):
parameter_names = []
for i1,s1 in enumerate(symbols):
for i2,s2 in enumerate(symbols):
if i1 <= i2:
for n in symbols:
parameter_names.append('{}{}.{}'.format(s1,s2,n))
for s3 in symbols:
for n in names:
parameter_names.append('{}{}{}.{}'.format(s1,s2,s3,n))
return parameter_names
class TersoffPotential(ThreeBodyPotential):
potential_type = 'tersoff'
twobody_parameter_names = ['n', 'beta', 'lambda2', 'B', 'lambda1', 'A', 'R', 'D']
threebody_parameter_names= ['m', 'gamma', 'lambda3', 'c', 'd', 'costheta0']
def __init__(self, symbols):
"""
Args:
symbols: list of str
Attributes:
symbols
potential_type
is_charge
References:
"""
potential_type = TersoffPotential.potential_type
is_charge = False
ThreeBodyPotential.__init__(self,
symbols=symbols,
potential_type=potential_type,
is_charge=is_charge)
self.lmps_parameter_filename = 'lmps_parameter_filename'
def _init_parameter_names(self):
symbols = self.symbols
self.parameter_names = []
for i1,s1 in enumerate(symbols):
for i2,s2 in enumerate(symbols):
if i1 <= i2:
for n in TersoffPotential.twobody_parameter_names:
self.parameter_names.append('{}{}.{}'.format(s1,s2,n))
for s3 in symbols:
for n in TersoffPotential.threebody_parameter_names:
self.parameter_names.append('{}{}{}.{}'.format(s1,s2,s3,n))
return self.parameter_names
def _init_parameters(self):
self.parameters = OrderedDict()
for p in self.parameter_names:
self.parameters[p] = None
def get_parameter_names(self, symbols=None):
if symbols is not None:
self.symbols = symbols
symbols_ = self.symbols
parameter_names = []
for i1,s1 in enumerate(symbols):
for i2,s2 in enumerate(symbols):
if i1 <= i2:
for n in TersoffPotential.twobody_parameter_names:
parameter_names.append('{}{}.{}'.format(s1,s2,n))
for s3 in symbols:
for n in TersoffPotential.threebody:
parameter_names.append('{}{}{}.{}'.format(s1,s2,s3,n))
self.parameter_names = parameter_names
return parameter_names
def lammps_potential_section_to_string(self, parameters=None):
if parameters is not None:
for p in self.parameters:
self.parameters[p] = parameters[p]
str_out = ''
for i, s in enumerate(self.symbols):
str_out += "mass {} {}\n".format(i+1, self._get_mass(s))
str_out += "\n"
for i, s in enumerate(self.symbols):
str_out += "group {} type {}\n".format(s, i+1)
str_out += "\n"
parameter_filename_ = self.lmps_parameter_filename
str_symbols_ = " ".join(self.symbols)
str_out += "pair_style tersoff\n"
str_out += "pair_coeff * * {} {}\n".format(parameter_filename_, str_symbols_)
return str_out
def write_lammps_parameter_file(self,dst_dir,dst_filename):
assert type(dst_dir) == str
assert type(dst_filename) == str
_strout = self.lammps_parameter_file_to_str()
with open(os.path.join(dst_dir,dst_filename)) as f:
f.write(_strout)
def lammps_parameter_file_to_str(self):
str_out = ''
for i, el1 in enumerate(self.symbols):
for j, el2 in enumerate(self.symbols):
for k, el3 in enumerate(self.symbols):
line_args_names = [
'element1', 'element2', 'element3',
'm', 'gamma', 'lambda3', 'c', 'd', 'costheta0',
'n', 'beta', 'lambda2', 'B', 'R', 'D', 'lambda1', 'A']
s = '{}{}{}'.format(el1, el2, el3)
for n in line_arg_names:
if n in TersoffPotential.twobody_parameter_names:
pn1 = '{}{}_{}'.format(el1, el2, n)
pn2 = '{}{}_{}'.format(el2, el1, n)
if pn1 in self.parameters:
str_out += ' ' + self.parameters[pn1]
elif pn2 in self.parameters:
str_out += ' ' + self.parameters[pn2]
else:
raise NameError()
elif n in TersoffPotential.threebody_parameter_names:
pn = '{}{}{}_{}'.format(el1, el2, el3, n)
str_out += ' ' + self.parameters[pn]
else:
msg = "cannot find a parameter value for {}{}{}_{}".format(
el1, el2, el3, n
)
raise ValueError(msg)
str_out += '\n'
return str_out
def write_lammps_parameter_file(self,dst_dir,dst_filename):
str_out = self.lammps_parameter_file_to_str()
filename_ = os.path.join(dst_dir, dst_filename)
with open(filename_, 'w') as f:
f.write(str_out)
def read_lammps_potential_file(self, filename):
if self.symbols is None:
self.symbols = []
parameters = OrderedDict()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line == '\n':
pass
else:
line_args_names = [
'element1', 'element2', 'element3',
'm', 'gamma', 'lambda3', 'c', 'd', 'costheta0',
'n', 'beta', 'lambda2', 'B', 'R', 'D', 'lambda1', 'A']
line_args = [k.strip() for k in line.strip().split()]
symbol1 = line_args[0]
symbol2 = line_args[1]
symbol3 = line_args[2]
if symbol1 not in self.symbols: self.symbols.append(symbol1)
if symbol2 not in self.symbols: self.symbols.append(symbol2)
if symbol3 not in self.symbols: self.symbols.append(symbol3)
for i,v in enumerate(zip(line_args_names, line_args)):
if i > 2:
parameter_name = '{}{}{}_{}'.format(symbol1,symbol2,symbol3,v[0])
try:
parameter_value = int(v[1])
except ValueError as e:
parameter_value = float(v[1])
parameters[parameter_name] = parameter_value
self.parameters = OrderedDict()
for i1, s1 in enumerate(self.symbols):
for i2, s2 in enumerate(self.symbols):
for i3, s3 in enumerate(self.symbols):
for p in TersoffPotential.twobody_parameter_names:
name1 = '{}{}_{}'.format(s1,s2,p)
name2 = '{}{}_{}'.format(s2,s1,p)
if name1 not in self.parameters or name2 not in self.parameters:
self.parameters[name1] = parameters['{}{}{}_{}'.format(s1,s2,s3,p)]
for p in TersoffPotential.threebody_parameter_names:
name1 = '{}{}{}_{}'.format(s1,s2,s3,p)
self.parameters[name1] = parameters[name1]
| <filename>dev/potential/TersoffPotential/threebody_tersoff.py
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2017,2018"
__license__ = "Simplified BSD License"
__version__ = "1.0"
import os
from collections import OrderedDict
from pypospack.potential import ThreeBodyPotential
def get_3body_parameter_names(symbols, names):
parameter_names = []
for i1,s1 in enumerate(symbols):
for i2,s2 in enumerate(symbols):
if i1 <= i2:
for n in symbols:
parameter_names.append('{}{}.{}'.format(s1,s2,n))
for s3 in symbols:
for n in names:
parameter_names.append('{}{}{}.{}'.format(s1,s2,s3,n))
return parameter_names
class TersoffPotential(ThreeBodyPotential):
potential_type = 'tersoff'
twobody_parameter_names = ['n', 'beta', 'lambda2', 'B', 'lambda1', 'A', 'R', 'D']
threebody_parameter_names= ['m', 'gamma', 'lambda3', 'c', 'd', 'costheta0']
def __init__(self, symbols):
"""
Args:
symbols: list of str
Attributes:
symbols
potential_type
is_charge
References:
"""
potential_type = TersoffPotential.potential_type
is_charge = False
ThreeBodyPotential.__init__(self,
symbols=symbols,
potential_type=potential_type,
is_charge=is_charge)
self.lmps_parameter_filename = 'lmps_parameter_filename'
def _init_parameter_names(self):
symbols = self.symbols
self.parameter_names = []
for i1,s1 in enumerate(symbols):
for i2,s2 in enumerate(symbols):
if i1 <= i2:
for n in TersoffPotential.twobody_parameter_names:
self.parameter_names.append('{}{}.{}'.format(s1,s2,n))
for s3 in symbols:
for n in TersoffPotential.threebody_parameter_names:
self.parameter_names.append('{}{}{}.{}'.format(s1,s2,s3,n))
return self.parameter_names
def _init_parameters(self):
self.parameters = OrderedDict()
for p in self.parameter_names:
self.parameters[p] = None
def get_parameter_names(self, symbols=None):
if symbols is not None:
self.symbols = symbols
symbols_ = self.symbols
parameter_names = []
for i1,s1 in enumerate(symbols):
for i2,s2 in enumerate(symbols):
if i1 <= i2:
for n in TersoffPotential.twobody_parameter_names:
parameter_names.append('{}{}.{}'.format(s1,s2,n))
for s3 in symbols:
for n in TersoffPotential.threebody:
parameter_names.append('{}{}{}.{}'.format(s1,s2,s3,n))
self.parameter_names = parameter_names
return parameter_names
def lammps_potential_section_to_string(self, parameters=None):
if parameters is not None:
for p in self.parameters:
self.parameters[p] = parameters[p]
str_out = ''
for i, s in enumerate(self.symbols):
str_out += "mass {} {}\n".format(i+1, self._get_mass(s))
str_out += "\n"
for i, s in enumerate(self.symbols):
str_out += "group {} type {}\n".format(s, i+1)
str_out += "\n"
parameter_filename_ = self.lmps_parameter_filename
str_symbols_ = " ".join(self.symbols)
str_out += "pair_style tersoff\n"
str_out += "pair_coeff * * {} {}\n".format(parameter_filename_, str_symbols_)
return str_out
def write_lammps_parameter_file(self,dst_dir,dst_filename):
assert type(dst_dir) == str
assert type(dst_filename) == str
_strout = self.lammps_parameter_file_to_str()
with open(os.path.join(dst_dir,dst_filename)) as f:
f.write(_strout)
def lammps_parameter_file_to_str(self):
str_out = ''
for i, el1 in enumerate(self.symbols):
for j, el2 in enumerate(self.symbols):
for k, el3 in enumerate(self.symbols):
line_args_names = [
'element1', 'element2', 'element3',
'm', 'gamma', 'lambda3', 'c', 'd', 'costheta0',
'n', 'beta', 'lambda2', 'B', 'R', 'D', 'lambda1', 'A']
s = '{}{}{}'.format(el1, el2, el3)
for n in line_arg_names:
if n in TersoffPotential.twobody_parameter_names:
pn1 = '{}{}_{}'.format(el1, el2, n)
pn2 = '{}{}_{}'.format(el2, el1, n)
if pn1 in self.parameters:
str_out += ' ' + self.parameters[pn1]
elif pn2 in self.parameters:
str_out += ' ' + self.parameters[pn2]
else:
raise NameError()
elif n in TersoffPotential.threebody_parameter_names:
pn = '{}{}{}_{}'.format(el1, el2, el3, n)
str_out += ' ' + self.parameters[pn]
else:
msg = "cannot find a parameter value for {}{}{}_{}".format(
el1, el2, el3, n
)
raise ValueError(msg)
str_out += '\n'
return str_out
def write_lammps_parameter_file(self,dst_dir,dst_filename):
str_out = self.lammps_parameter_file_to_str()
filename_ = os.path.join(dst_dir, dst_filename)
with open(filename_, 'w') as f:
f.write(str_out)
def read_lammps_potential_file(self, filename):
if self.symbols is None:
self.symbols = []
parameters = OrderedDict()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line == '\n':
pass
else:
line_args_names = [
'element1', 'element2', 'element3',
'm', 'gamma', 'lambda3', 'c', 'd', 'costheta0',
'n', 'beta', 'lambda2', 'B', 'R', 'D', 'lambda1', 'A']
line_args = [k.strip() for k in line.strip().split()]
symbol1 = line_args[0]
symbol2 = line_args[1]
symbol3 = line_args[2]
if symbol1 not in self.symbols: self.symbols.append(symbol1)
if symbol2 not in self.symbols: self.symbols.append(symbol2)
if symbol3 not in self.symbols: self.symbols.append(symbol3)
for i,v in enumerate(zip(line_args_names, line_args)):
if i > 2:
parameter_name = '{}{}{}_{}'.format(symbol1,symbol2,symbol3,v[0])
try:
parameter_value = int(v[1])
except ValueError as e:
parameter_value = float(v[1])
parameters[parameter_name] = parameter_value
self.parameters = OrderedDict()
for i1, s1 in enumerate(self.symbols):
for i2, s2 in enumerate(self.symbols):
for i3, s3 in enumerate(self.symbols):
for p in TersoffPotential.twobody_parameter_names:
name1 = '{}{}_{}'.format(s1,s2,p)
name2 = '{}{}_{}'.format(s2,s1,p)
if name1 not in self.parameters or name2 not in self.parameters:
self.parameters[name1] = parameters['{}{}{}_{}'.format(s1,s2,s3,p)]
for p in TersoffPotential.threebody_parameter_names:
name1 = '{}{}{}_{}'.format(s1,s2,s3,p)
self.parameters[name1] = parameters[name1]
| en | 0.432664 | # -*- coding: utf-8 -*- Args: symbols: list of str Attributes: symbols potential_type is_charge References: | 2.431087 | 2 |
schedule.py | welch/sportsball | 4 | 6617772 | <filename>schedule.py
#
# giants schedule manager. we cache a parsed, processed version
# of their ical schedule feed as a DataStore object.
#
import logging
import json
from datetime import datetime, timedelta, date
from pytz import timezone, utc
from google.appengine.ext import db
from ics import Calendar
from urllib2 import urlopen
# This iCal URL will be updated throughout the season as schedules change
SCHED_URL = 'http://www.ticketing-client.com/ticketing-client/ical/EventTicketPromotionPrice.tiksrv?team_id=137&display_in=singlegame&ticket_category=Tickets&site_section=Default&sub_category=Default&leave_empty_games=true&event_type=T&begin_date=20190201'
# for sanity, all date/time storage and manipulations will be in
# Oracle Park's local TZ
ORACLE_TZ = timezone('US/Pacific')
def localize(dt):
"""
set tzinfo for dt to Oracle Park timezone, converting if it already
has tzinfo set.
"""
if dt.tzinfo == None:
return ORACLE_TZ.localize(dt)
else:
return dt.astimezone(ORACLE_TZ)
def oraclenow():
"""now in the ORACLE_TZ"""
return localize(datetime.now(utc))
class Schedule(db.Model):
"""
Factory for schedule instances backed by entities in the datastore.
Don't instantiate this class -- use Schedule.get() to retrieve an
instance by url.
"""
url = db.StringProperty() # feed url, also used as primary key
json = db.TextProperty()
timestamp = db.DateTimeProperty(auto_now=True)
_events = {} # event lists cached by isodate
@classmethod
def get(cls, url=SCHED_URL, every_secs=(24 * 3600)):
"""
fetch the cached schedule for this url from the datastore. If it
does not exist, or is over every_secs seconds old, refresh it from the
url feed before returning.
Returns: Schedule instance for the url
"""
sched = cls.all().filter("url ==", url).get()
if (not sched or not sched.json or
sched.timestamp < datetime.now() - timedelta(seconds=every_secs)):
sched = cls.refresh(url=url)
if not sched:
logging.error("cannot fetch sched.json from DataStore")
return None
return sched
@classmethod
def refresh(cls, url=SCHED_URL):
"""
update our schedule.json from the feed url and store it in
DataStore, creating a persistent Schedule object if necessary.
Returns: Schedule instance with refreshed schedule json.
"""
sched = cls.all().filter("url ==", url).get()
if not sched:
sched = cls()
sched.url = url
sched.json = None
sched._events = {} # cache for parsed events, not persisted.
events = get_feed(url)
if events:
sched.json = json.dumps(events)
sched._events = {}
sched.put()
return sched
def get_events(self, min_isodate=None):
"""decode instance json into a dictionary of event dictionaries keyed
by isodate string, limited by min_isodate (today if null).
Cache the parsed dictionary for speed during the lifetime of
this app.
Returns: list of event dictionaries for given date and beyond
"""
if not min_isodate:
min_isodate = oraclenow().date().isoformat()
try:
return self._events[min_isodate]
except KeyError:
pass
schedule = [e for e in json.loads(self.json) if min_isodate <= e['date']]
self._events[min_isodate] = schedule
return schedule
def get_next_here_event(self, isodate=None):
"""
return an event dictionary for the earliest event on or after
the given date, or None if no events are scheduled.
If isodate is None, use today
Returns: an event dictionary, or none if no more games scheduled for here
"""
for e in self.get_events(isodate):
if e['is_here']:
return e
return None
@staticmethod
def next_isodate(iso, days=1):
"""
add days to isodate, return new isodate string
"""
next = (datetime.strptime(iso, "%Y-%m-%d").date() + timedelta(days=days))
return next.isoformat()
def get_next_non_here_datetime(self, isodate=None):
"""
return the earliest day on or after the given date having no
event here. If isodate is None, use today.
Returns: an isodate string
"""
if isodate is None:
isodate = oraclenow().date().isoformat()
for e in self.get_events(isodate):
if (not e['is_here'] or isodate != e['date']):
break
isodate = Schedule.next_isodate(isodate)
return datetime.strptime(isodate, "%Y-%m-%d")
def get_feed(url=SCHED_URL):
"""
fetch the giants schedule as a remote csv file, parse it,
and create a list of events from it.
Returns: a sorted list of event dictionaries, or None
if there is a problem (eg, an http timeout. they happen.)
"""
sched = []
logging.info("get_feed %s" % url)
try:
c = Calendar(urlopen(SCHED_URL).read().decode('iso-8859-1'))
for event in c.events:
if event.name.startswith("FINAL"):
continue # skip games already played
event.begin = localize(event.begin)
is_home = (event.name.endswith("Giants"))
is_here = event.location.startswith('Oracle')
them = event.name.split(" vs. ")[0 if is_home else 1]
sched.append({
'date': event.begin.date().isoformat(),
'day': event.begin.strftime("%A, %b %d"),
'time': event.begin.strftime("%I:%M %p"),
'is_home': is_home,
'is_here': is_here,
'location': event.location,
'them': them
})
except Exception, e:
logging.error("can't download/parse schedule: " + str(e))
return None
return sched
| <filename>schedule.py
#
# giants schedule manager. we cache a parsed, processed version
# of their ical schedule feed as a DataStore object.
#
import logging
import json
from datetime import datetime, timedelta, date
from pytz import timezone, utc
from google.appengine.ext import db
from ics import Calendar
from urllib2 import urlopen
# This iCal URL will be updated throughout the season as schedules change
SCHED_URL = 'http://www.ticketing-client.com/ticketing-client/ical/EventTicketPromotionPrice.tiksrv?team_id=137&display_in=singlegame&ticket_category=Tickets&site_section=Default&sub_category=Default&leave_empty_games=true&event_type=T&begin_date=20190201'
# for sanity, all date/time storage and manipulations will be in
# Oracle Park's local TZ
ORACLE_TZ = timezone('US/Pacific')
def localize(dt):
"""
set tzinfo for dt to Oracle Park timezone, converting if it already
has tzinfo set.
"""
if dt.tzinfo == None:
return ORACLE_TZ.localize(dt)
else:
return dt.astimezone(ORACLE_TZ)
def oraclenow():
"""now in the ORACLE_TZ"""
return localize(datetime.now(utc))
class Schedule(db.Model):
"""
Factory for schedule instances backed by entities in the datastore.
Don't instantiate this class -- use Schedule.get() to retrieve an
instance by url.
"""
url = db.StringProperty() # feed url, also used as primary key
json = db.TextProperty()
timestamp = db.DateTimeProperty(auto_now=True)
_events = {} # event lists cached by isodate
@classmethod
def get(cls, url=SCHED_URL, every_secs=(24 * 3600)):
"""
fetch the cached schedule for this url from the datastore. If it
does not exist, or is over every_secs seconds old, refresh it from the
url feed before returning.
Returns: Schedule instance for the url
"""
sched = cls.all().filter("url ==", url).get()
if (not sched or not sched.json or
sched.timestamp < datetime.now() - timedelta(seconds=every_secs)):
sched = cls.refresh(url=url)
if not sched:
logging.error("cannot fetch sched.json from DataStore")
return None
return sched
@classmethod
def refresh(cls, url=SCHED_URL):
"""
update our schedule.json from the feed url and store it in
DataStore, creating a persistent Schedule object if necessary.
Returns: Schedule instance with refreshed schedule json.
"""
sched = cls.all().filter("url ==", url).get()
if not sched:
sched = cls()
sched.url = url
sched.json = None
sched._events = {} # cache for parsed events, not persisted.
events = get_feed(url)
if events:
sched.json = json.dumps(events)
sched._events = {}
sched.put()
return sched
def get_events(self, min_isodate=None):
"""decode instance json into a dictionary of event dictionaries keyed
by isodate string, limited by min_isodate (today if null).
Cache the parsed dictionary for speed during the lifetime of
this app.
Returns: list of event dictionaries for given date and beyond
"""
if not min_isodate:
min_isodate = oraclenow().date().isoformat()
try:
return self._events[min_isodate]
except KeyError:
pass
schedule = [e for e in json.loads(self.json) if min_isodate <= e['date']]
self._events[min_isodate] = schedule
return schedule
def get_next_here_event(self, isodate=None):
"""
return an event dictionary for the earliest event on or after
the given date, or None if no events are scheduled.
If isodate is None, use today
Returns: an event dictionary, or none if no more games scheduled for here
"""
for e in self.get_events(isodate):
if e['is_here']:
return e
return None
@staticmethod
def next_isodate(iso, days=1):
"""
add days to isodate, return new isodate string
"""
next = (datetime.strptime(iso, "%Y-%m-%d").date() + timedelta(days=days))
return next.isoformat()
def get_next_non_here_datetime(self, isodate=None):
"""
return the earliest day on or after the given date having no
event here. If isodate is None, use today.
Returns: an isodate string
"""
if isodate is None:
isodate = oraclenow().date().isoformat()
for e in self.get_events(isodate):
if (not e['is_here'] or isodate != e['date']):
break
isodate = Schedule.next_isodate(isodate)
return datetime.strptime(isodate, "%Y-%m-%d")
def get_feed(url=SCHED_URL):
"""
fetch the giants schedule as a remote csv file, parse it,
and create a list of events from it.
Returns: a sorted list of event dictionaries, or None
if there is a problem (eg, an http timeout. they happen.)
"""
sched = []
logging.info("get_feed %s" % url)
try:
c = Calendar(urlopen(SCHED_URL).read().decode('iso-8859-1'))
for event in c.events:
if event.name.startswith("FINAL"):
continue # skip games already played
event.begin = localize(event.begin)
is_home = (event.name.endswith("Giants"))
is_here = event.location.startswith('Oracle')
them = event.name.split(" vs. ")[0 if is_home else 1]
sched.append({
'date': event.begin.date().isoformat(),
'day': event.begin.strftime("%A, %b %d"),
'time': event.begin.strftime("%I:%M %p"),
'is_home': is_home,
'is_here': is_here,
'location': event.location,
'them': them
})
except Exception, e:
logging.error("can't download/parse schedule: " + str(e))
return None
return sched
| en | 0.861161 | # # giants schedule manager. we cache a parsed, processed version # of their ical schedule feed as a DataStore object. # # This iCal URL will be updated throughout the season as schedules change # for sanity, all date/time storage and manipulations will be in # Oracle Park's local TZ set tzinfo for dt to Oracle Park timezone, converting if it already has tzinfo set. now in the ORACLE_TZ Factory for schedule instances backed by entities in the datastore. Don't instantiate this class -- use Schedule.get() to retrieve an instance by url. # feed url, also used as primary key # event lists cached by isodate fetch the cached schedule for this url from the datastore. If it does not exist, or is over every_secs seconds old, refresh it from the url feed before returning. Returns: Schedule instance for the url update our schedule.json from the feed url and store it in DataStore, creating a persistent Schedule object if necessary. Returns: Schedule instance with refreshed schedule json. # cache for parsed events, not persisted. decode instance json into a dictionary of event dictionaries keyed by isodate string, limited by min_isodate (today if null). Cache the parsed dictionary for speed during the lifetime of this app. Returns: list of event dictionaries for given date and beyond return an event dictionary for the earliest event on or after the given date, or None if no events are scheduled. If isodate is None, use today Returns: an event dictionary, or none if no more games scheduled for here add days to isodate, return new isodate string return the earliest day on or after the given date having no event here. If isodate is None, use today. Returns: an isodate string fetch the giants schedule as a remote csv file, parse it, and create a list of events from it. Returns: a sorted list of event dictionaries, or None if there is a problem (eg, an http timeout. they happen.) # skip games already played | 2.865708 | 3 |
coursera 3/Triang_retangulo.py | SricardoSdSouza/Curso-da-USP | 0 | 6617773 | from math import sqrt
class Triangulo:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def retangulo(self):
if self.a > self.b and self.a > self.c:
hip = self.a
cop = self.b
cad = self.c
elif self.b > self.a and self.b > self.c:
hip = self.b
cop = self.a
cad = self.c
else:
hip = self.c
cop = self.a
cad = self.b
if hip == sqrt(cop**2 + cad**2):
return True
else:
return False
'''ret = Triangulo(1, 3, 5)
print(ret.retangulo())
print('=='*20)
ret1 = Triangulo(3, 4, 5)
print(ret1.retangulo())
print('=='*20)
ret2 = Triangulo(4, 2, 3)
print(ret2.retangulo())
print('=='*20)
ret3 = Triangulo(3, 4, 5)
print(ret3.retangulo())'''
| from math import sqrt
class Triangulo:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def retangulo(self):
if self.a > self.b and self.a > self.c:
hip = self.a
cop = self.b
cad = self.c
elif self.b > self.a and self.b > self.c:
hip = self.b
cop = self.a
cad = self.c
else:
hip = self.c
cop = self.a
cad = self.b
if hip == sqrt(cop**2 + cad**2):
return True
else:
return False
'''ret = Triangulo(1, 3, 5)
print(ret.retangulo())
print('=='*20)
ret1 = Triangulo(3, 4, 5)
print(ret1.retangulo())
print('=='*20)
ret2 = Triangulo(4, 2, 3)
print(ret2.retangulo())
print('=='*20)
ret3 = Triangulo(3, 4, 5)
print(ret3.retangulo())'''
| en | 0.269871 | ret = Triangulo(1, 3, 5) print(ret.retangulo()) print('=='*20) ret1 = Triangulo(3, 4, 5) print(ret1.retangulo()) print('=='*20) ret2 = Triangulo(4, 2, 3) print(ret2.retangulo()) print('=='*20) ret3 = Triangulo(3, 4, 5) print(ret3.retangulo()) | 4.015474 | 4 |
imm/samplers/generic.py | tscholak/imm | 9 | 6617774 | <filename>imm/samplers/generic.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import abc
import numpy as np
from collections import defaultdict
from ..utils import _logsumexp
from ..models.processes import GenericProcess
from ..models.mixtures import GenericMixture
class GenericSampler(object):
"""
Class which encapsulates common functionality between all samplers.
"""
__metaclass__ = abc.ABCMeta
compatible_process_models = set()
compatible_mixture_models = set()
def __init__(self, process_model, max_iter=1000, warmup=None):
self._process_model = self._check_process_model(process_model)
self._mixture_model = self._check_mixture_model(
self._process_model._mixture_model)
self._max_iter, self._warmup = self._check_max_iter(max_iter, warmup)
@classmethod
def _check_process_model(cls, process_model):
if isinstance(process_model, GenericProcess):
pm_class = process_model.__class__
if pm_class in cls.compatible_process_models:
return process_model
raise ValueError('A process model of type %r cannot be used with'
' this sampler' % pm_class.__name__)
raise ValueError("'process_model' must be a compatible process model"
" instance. Got process_model = %r" % process_model)
@property
def process_model(self):
return self._process_model
@process_model.setter
def process_model(self, process_model):
self._process_model = self._check_process_model(process_model)
def _get_process_model(self, process_model):
if process_model is not None:
return self._check_process_model(process_model)
else:
return self._process_model
@classmethod
def _check_mixture_model(cls, mixture_model):
if isinstance(mixture_model, GenericMixture):
mm_class = mixture_model.__class__
if mm_class in cls.compatible_mixture_models:
return mixture_model
raise ValueError('A mixture model of type %r cannot be used with'
' this sampler' % mm_class.__name__)
raise ValueError("'mixture_model' must be a compatible mixture model."
" Got mixture_model = %r" % mixture_model)
@property
def mixture_model(self):
return self._mixture_model
@mixture_model.setter
def mixture_model(self, mixture_model):
self._mixture_model = self._check_mixture_model(mixture_model)
def _get_mixture_model(self, mixture_model):
if mixture_model is not None:
return self._check_mixture_model(mixture_model)
else:
return self._mixture_model
@staticmethod
def _check_max_iter(max_iter, warmup):
if max_iter is not None:
if not np.isscalar(max_iter):
raise ValueError("Integer 'max_iter' must be a scalar.")
max_iter = int(max_iter)
if max_iter < 1:
raise ValueError("Integer 'max_iter' must be larger than"
" zero, but max_iter = %d" % max_iter)
else:
max_iter = 1000
if warmup is not None:
if not np.isscalar(warmup):
raise ValueError("Integer 'warmup' must be a scalar.")
warmup = int(warmup)
if warmup < 0:
raise ValueError("Integer 'warmup' must not be smaller than"
" zero, but warmup = %d" % warmup)
if not warmup < max_iter:
raise ValueError("Integer 'warmup' must be smaller than"
" 'max_iter', but warmup = %d" % warmup)
else:
warmup = max_iter / 2
return max_iter, warmup
@property
def max_iter(self):
return self._max_iter
@max_iter.setter
def max_iter(self, max_iter):
self._max_iter, _ = self._check_max_iter(max_iter, self._warmup)
def _get_max_iter(self, max_iter):
if max_iter is not None:
max_iter, _ = self._check_max_iter(max_iter, self._warmup)
return max_iter
else:
return self._max_iter
@property
def warmup(self):
return self._warmup
@warmup.setter
def warmup(self, warmup):
_, self._warmup = self._check_max_iter(self._max_iter, warmup)
def _get_warmup(self, warmup):
if warmup is not None:
_, warmup = self._check_max_iter(self._max_iter, warmup)
return warmup
else:
return self._warmup
@staticmethod
def _check_examples(x_n):
# TODO: Make this truly model-agnostic. Get rid of dtype=float
x_n = np.asarray(x_n, dtype=float)
if x_n.ndim == 0:
x_n = x_n[np.newaxis, np.newaxis]
elif x_n.ndim == 1:
x_n = x_n[:, np.newaxis]
elif x_n.ndim > 2:
raise ValueError("'x_n' must be at most two-dimensional,"
" but x_n.ndim = %d" % x_n.ndim)
return x_n.shape[0], x_n
@staticmethod
def _check_components(n, c_n):
if c_n == None:
c_n = np.zeros(n, dtype=int)
else:
c_n = np.asarray(c_n, dtype=int)
if c_n.ndim == 0:
c_n = c_n[np.newaxis]
elif c_n.ndim > 1:
raise ValueError("'c_n' must be at most one-dimensional,"
" but c_n.ndim = %d" % c_n.ndim)
if not c_n.shape == (n, ):
raise ValueError("'c_n' has incompatible dimensions: should"
" be %s, got %s." % ((n,), c_n.shape))
return c_n
class GenericGibbsSampler(GenericSampler):
"""
Class which encapsulates common functionality between all Gibbs samplers.
"""
def __init__(self, process_model, m=10, max_iter=1000, warmup=None):
super(GenericGibbsSampler, self).__init__(process_model,
max_iter=max_iter, warmup=warmup)
self._m = self._check_m(m)
@staticmethod
def _check_m(m):
if m is not None:
if not np.isscalar(m):
raise ValueError("Integer 'm' must be a scalar.")
m = int(m)
if m < 1:
raise ValueError("Integer 'm' must be larger than"
" zero, but m = %d" % m)
else:
m = 10
return m
@property
def m(self):
return self._m
@m.setter
def m(self, m):
self._m = self._check_m(m)
def _get_m(self, m):
if m is not None:
return self._check_m(m)
else:
return self._m
@staticmethod
def _gibbs_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state):
"""
Performs a single iteration of Radford Neal's algorithms 3 or 8, see
Neal (2000).
"""
for i in range(n):
prev_k = c_n[i]
# Bookkeeping. Note that Neal's algorithms do not need inv_c to
# work. It is used only in the split & merge algorithms
if inv_c is not None:
inv_c[prev_k].remove(i)
# Downdate component counter
n_c[prev_k] -= 1
# Downdate model-dependent parameters
mixture_params[prev_k].downdate(x_n[i])
# If the previous component is empty after example i is removed,
# recycle it and propose it as new component. If it is not empty,
# we need to get a new component from the inactive_components set
if n_c[prev_k] == 0:
proposed_components = set([prev_k])
else:
proposed_components = set([inactive_components.pop()])
for _ in range(1, m):
proposed_components.add(inactive_components.pop())
active_components |= proposed_components
# Make sure the proposed components are not contaminated with
# obsolete information
for k in (proposed_components - set([prev_k])):
mixture_params[k].iterate()
# Initialize and populate the total log probability accumulator
log_dist = np.empty(len(n_c), dtype=float)
log_dist.fill(-np.inf)
for k in active_components:
# Calculate the process prior and mixture likelihood
log_dist[k] = process_param.log_prior(n, n_c[k], m) + \
mixture_params[k].log_likelihood(x_n[i])
# Sample from log_dist. Normalization is required
log_dist -= _logsumexp(len(n_c), log_dist)
# TODO: Can we expect performance improvements if we exclude those
# elements of `log_dist` that are -inf?
next_k = random_state.choice(a=len(n_c), p=np.exp(log_dist))
c_n[i] = next_k
# More bookkeeping
if inv_c is not None:
inv_c[next_k].add(i)
# Update component counter
n_c[next_k] += 1
# Update model-dependent parameters
mixture_params[next_k].update(x_n[i])
# Cleanup
proposed_components.discard(next_k)
active_components -= proposed_components
inactive_components |= proposed_components
def _inference_step(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state):
for k in active_components:
mixture_params[k].iterate()
self._gibbs_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state)
process_param.iterate(n, len(active_components))
def infer(self, x_n, c_n=None, m=None, max_iter=None, warmup=None,
random_state=None):
"""
Component and latent variable inference.
Parameters
----------
x_n : array-like
Examples
c_n : None or array-like, optional
Vector of component indicator variables. If None, then the
examples will be assigned to the same component initially
m : None or int, optional
The number of auxiliary components
max_iter : None or int, optional
The maximum number of iterations
warmup: None or int, optional
The number of warm-up iterations
random_state : np.random.RandomState instance, optional
Used for drawing the random variates
Returns
-------
c_n : ndarray
Inferred component vectors
phi_c : ndarray
Inferred latent variables
"""
m = self._get_m(m)
max_iter = self._get_max_iter(max_iter)
warmup = self._get_warmup(warmup)
pm = self.process_model
random_state = pm._get_random_state(random_state)
process_param = pm.InferParam(pm, random_state)
# TODO: Move into mixture model?
n, x_n = self._check_examples(x_n)
c_n = self._check_components(n, c_n)
# Maximum number of components
c_max = n + m - 1
# Inverse mapping from components to examples
# TODO: Only needed for split and merge samplers
inv_c = defaultdict(set)
for i in range(n):
inv_c[c_n[i]].add(i)
# Number of examples per component
n_c = np.bincount(c_n, minlength=c_max)
# active_components is an unordered set of unique components
active_components = set(np.unique(c_n))
# inactive_components is an unordered set of currently unassigned
# components
inactive_components = set(range(c_max)) - active_components
# Initialize model-dependent parameters lazily
mm = self.mixture_model
mixture_params = [mm.InferParam(mm, random_state)
for _ in range(c_max)]
for k in active_components:
mixture_params[k].iterate()
# TODO: Substitute for inv_c?
for i in inv_c[k]:
mixture_params[k].update(x_n[i])
c_n_samples = np.empty((max_iter-warmup)*n, dtype=int).reshape(
(max_iter-warmup,n))
phi_c_samples = [{} for _ in range(max_iter-warmup)]
for itn in range(max_iter):
self._inference_step(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state)
if not itn-warmup < 0:
c_n_samples[(itn-warmup,)] = c_n
for k in active_components:
phi_c_samples[itn-warmup][k] = mixture_params[k].phi_c()
return c_n_samples, phi_c_samples
class GenericMSSampler(GenericGibbsSampler):
"""
Class which encapsulates common functionality between all merge-split (MS)
samplers.
"""
class Launch(object):
def __init__(self, c, g, x_g, mixture_param):
# Set the component
self.c = c
# The set inv_c will contain all examples that belong to the
# component c
self.inv_c = set([g])
# Number of examples in the component c
self.n_c = 1
# Auxiliary, model-dependent parameters
# TODO: A less ugly way to achieve parameter initialization
mixture_param.iterate()
self.mixture_param = mixture_param.update(x_g)
def update(self, g, x_g):
# Add example g to component c
self.inv_c.add(g)
# Increment counter
self.n_c += 1
# Update model-dependent parameters
self.mixture_param.update(x_g)
def downdate(self, g, x_g):
# Remove example g from component c
self.inv_c.remove(g)
# Reduce counter
self.n_c -= 1
# Downdate model-dependent parameters
self.mixture_param.downdate(x_g)
@staticmethod
def _select_random_pair(n, random_state):
"""
Select two distict observations (i.e. examples), i and j, uniformly
at random
"""
i, j = random_state.choice(a=n, size=2, replace=False)
return i, j
@staticmethod
def _find_common_components(c_n, inv_c, i, j):
"""
Define a set of examples, S, that does not contain i or j, but all
other examples that belong to the same component as i or j
"""
if c_n[i] == c_n[j]:
S = inv_c[c_n[i]] - set([i, j])
else:
S = (inv_c[c_n[i]] | inv_c[c_n[j]]) - set([i, j])
return S
def _attempt_split(self, n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_j, active_components, inactive_components, process_param,
mixture_params, random_state):
pm = self.process_model
mm = self.mixture_model
# Logarithm of prior quotient, see Eq. (3.4) in Jain & Neal (2004) and
# Eq. (7) in Jain & Neal (2007)
log_acc += pm._ms_log_prior_pre(n, len(active_components),
len(active_components)-1, process_param)
log_acc += pm._ms_log_prior_post(launch_i.n_c, process_param)
log_acc += pm._ms_log_prior_post(launch_j.n_c, process_param)
log_acc -= pm._ms_log_prior_post(n_c[launch_j.c], process_param)
log_acc += mm._ms_log_prior(launch_i.mixture_param)
log_acc += mm._ms_log_prior(launch_j.mixture_param)
log_acc -= mm._ms_log_prior(mixture_params[launch_j.c])
# Logarithm of likelihood quotient, see Eq. (3.8) in Jain & Neal
# (2004) and Eq. (11) in Jain & Neal (2007)
log_acc += mm._ms_log_likelihood(x_n, launch_i.inv_c,
launch_i.mixture_param, random_state)
log_acc += mm._ms_log_likelihood(x_n, launch_j.inv_c,
launch_j.mixture_param, random_state)
log_acc -= mm._ms_log_likelihood(x_n, inv_c[launch_j.c],
mixture_params[launch_j.c], random_state)
# Evaluate the split proposal by the MH acceptance probability
if np.log(random_state.uniform()) < min(0.0, log_acc):
# If the split proposal is accepted, then it becomes the next
# state. At this point, launch_i.inv_c and launch_j.inv_c contain
# the split proposal. Therefore, all labels are updated according
# to the assignments in launch_i.inv_c and launch_j.inv_c
c_n[list(launch_i.inv_c)] = launch_i.c
c_n[list(launch_j.inv_c)] = launch_j.c
# Update assignments in global component-example mapping
inv_c[launch_i.c] = launch_i.inv_c
inv_c[launch_j.c] = launch_j.inv_c
# Update counts
n_c[launch_i.c] = launch_i.n_c
n_c[launch_j.c] = launch_j.n_c
# Update mixture parameters
mixture_params[launch_i.c] = launch_i.mixture_param
mixture_params[launch_j.c] = launch_j.mixture_param
# TODO: Logging
# print "yay, accepted split with log-acc = {}".format(log_acc)
else:
# If the split proposal is rejected, then the old state remains as
# the next state. Thus, remove launch_i.c from the active
# components and put it back into the inactive components (if
# necessary)
active_components.remove(launch_i.c)
inactive_components.add(launch_i.c)
# TODO: Logging
# print "nay, rejected split with log-acc = {}".format(log_acc)
def _attempt_merge(self, n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_merge, active_components, inactive_components,
process_param, mixture_params, random_state):
pm = self.process_model
mm = self.mixture_model
# Logarithm of prior quotient, see Eq. (3.5) in Jain & Neal (2004) and
# Eq. (8) in Jain & Neal (2007)
log_acc += pm._ms_log_prior_pre(n, len(active_components),
len(active_components)-1, process_param)
log_acc += pm._ms_log_prior_post(launch_merge.n_c, process_param)
log_acc -= pm._ms_log_prior_post(n_c[launch_i.c], process_param)
log_acc -= pm._ms_log_prior_post(n_c[launch_merge.c], process_param)
log_acc += mm._ms_log_prior(launch_merge.mixture_param)
log_acc -= mm._ms_log_prior(mixture_params[launch_i.c])
log_acc -= mm._ms_log_prior(mixture_params[launch_merge.c])
# Logarithm of likelihood quotient, see Eq. (3.9) in Jain & Neal
# (2004) and Eq. (12) in Jain & Neal (2007)
log_acc += mm._ms_log_likelihood(x_n, launch_merge.inv_c,
launch_merge.mixture_param, random_state)
log_acc -= mm._ms_log_likelihood(x_n, inv_c[launch_i.c],
mixture_params[launch_i.c], random_state)
log_acc -= mm._ms_log_likelihood(x_n, inv_c[launch_merge.c],
mixture_params[launch_merge.c], random_state)
# Evaluate the split proposal by the MH acceptance probability
if np.log(random_state.uniform()) < min(0.0, log_acc):
# If the merge proposal is accepted, then it becomes the next
# state
active_components.remove(launch_i.c)
inactive_components.add(launch_i.c)
# Assign all examples to component launch_merge.c that in the
# proposal were assigned to launch_merge.c
c_n[list(launch_merge.inv_c)] = launch_merge.c
# Remove assignments to launch_i.c from global component-example
# mapping
inv_c[launch_i.c].clear()
# Add assignments to launch_merge.c to global component-example
# mapping
inv_c[launch_merge.c] = launch_merge.inv_c
# Update counts
n_c[launch_i.c] = 0
n_c[launch_merge.c] = launch_merge.n_c
# Update mixture parameters
mixture_params[launch_i.c] = mm.InferParam(mm, random_state)
mixture_params[launch_merge.c] = launch_merge.mixture_param
# TODO: Logging
#print "yay, accepted merge with log-acc = {}".format(log_acc)
else:
# There is nothing to do if the merge proposal is rejected
pass
# TODO: Logging
# print "nay, rejected merge with log-acc = {}".format(log_acc)
class GenericRGMSSampler(GenericMSSampler):
"""
Class which encapsulates common functionality between all restricted Gibbs
merge-split (RGMS) samplers.
"""
def __init__(self, process_model, m=10, scheme=None, max_iter=1000,
warmup=None):
super(GenericRGMSSampler, self).__init__(process_model, m=m,
max_iter=max_iter, warmup=warmup)
self._max_intermediate_scans_split, self._max_split_merge_moves, \
self._max_gibbs_scans, self._max_intermediate_scans_merge = \
self._check_scheme(scheme)
@staticmethod
def _check_scheme(scheme):
if scheme is None:
max_intermediate_scans_split = 5
max_split_merge_moves = 1
max_gibbs_scans = 1
max_intermediate_scans_merge = 5
else:
scheme = np.asarray(scheme, dtype=int)
if scheme.ndim == 0:
max_intermediate_scans_split = np.asscalar(scheme)
max_split_merge_moves = 1
max_gibbs_scans = 1
max_intermediate_scans_merge = 5
elif scheme.ndim == 1:
max_intermediate_scans_split = scheme[0]
try:
max_split_merge_moves = scheme[1]
except:
max_split_merge_moves = 1
try:
max_gibbs_scans = scheme[2]
except:
max_gibbs_scans = 1
try:
max_intermediate_scans_merge = scheme[3]
except:
max_intermediate_scans_merge = 1
elif scheme.ndim > 1:
raise ValueError('Scheme must be an integer or tuple of'
' integers; thus must have dimension <= 1.'
' Got scheme.ndim = %s' % str(tuple(scheme)))
if max_intermediate_scans_split < 1:
raise ValueError('The sampler requires at least one intermediate'
' restricted Gibbs sampling scan to reach the'
' the split launch state; thus must have'
' scheme[0] >= 1. Got scheme[0] ='
' %s' % str(max_intermediate_scans_split))
if max_split_merge_moves < 0:
raise ValueError('The number of split-merge moves per iteration'
' cannot be smaller than zero; thus must have'
' scheme[1] >= 0. Got scheme[1] ='
' %s' % str(max_split_merge_moves))
if max_gibbs_scans < 0:
raise ValueError('The number of Gibbs scans per iteration'
' cannot be smaller than zero; thus must have'
' scheme[2] >= 0. Got scheme[2] ='
' %s' % str(max_gibbs_scans))
if max_intermediate_scans_merge < 1:
raise ValueError('The sampler requires at least one intermediate'
' restricted Gibbs sampling scan to reach the'
' the merge launch state; thus must have'
' scheme[3] >= 1. Got scheme[3] ='
' %s' % str(max_intermediate_scans_merge))
return max_intermediate_scans_split, max_split_merge_moves, \
max_gibbs_scans, max_intermediate_scans_merge
@property
def scheme(self):
return self._max_intermediate_scans_split, \
self._max_split_merge_moves, self._max_gibbs_scans, \
self._max_intermediate_scans_merge
@scheme.setter
def scheme(self, scheme):
self._max_intermediate_scans_split, self._max_split_merge_moves, \
self._max_gibbs_scans, self._max_intermediate_scans_merge = \
self._check_scheme(scheme)
def _get_scheme(self, scheme):
if scheme is not None:
return self._check_scheme(scheme)
else:
return self._max_intermediate_scans_split, \
self._max_split_merge_moves, self._max_gibbs_scans, \
self._max_intermediate_scans_merge
def _init_split_launch_state(self, x_n, c_n, i, j, S, active_components,
inactive_components, random_state):
"""
Initialize the split launch state that will be used to compute the
restricted Gibbs sampling probabilities
"""
mm = self.mixture_model
Launch = self.Launch
# launch_i.c is the initial launch state component of example i
if c_n[i] == c_n[j]:
# This will be a split proposal, so let launch_i.c be a new
# component
launch_i = Launch(inactive_components.pop(), i, x_n[i],
mm.InferParam(mm, random_state))
active_components.add(launch_i.c)
else:
# This will be a merge proposal, so let launch_i.c be the current
# component of i
launch_i = Launch(c_n[i], i, x_n[i],
mm.InferParam(mm, random_state))
# launch_j.c is the initial launch state component of example j
launch_j = Launch(c_n[j], j, x_n[j], mm.InferParam(mm, random_state))
# Randomly select the launch state components, independently and with
# equal probability, for the examples in S
for l in S:
if random_state.uniform() < 0.5:
launch_i.update(l, x_n[l])
else:
launch_j.update(l, x_n[l])
return launch_i, launch_j
def _init_merge_launch_state(self, x_n, c_n, i, j, S, random_state):
"""
Initialize the merge launch state that will be used to compute the
restricted Gibbs sampling probabilities
"""
mm = self.mixture_model
Launch = self.Launch
# TODO: Should the model parameters of the merged component be set
# equal to the model parameters in the original component
# c_n[j]? According to Dahl (2005), they should. According to
# Jain & Neal (2007), they should not and instead be drawn from
# the prior distribution. Let's do the latter for now
launch_merge = Launch(c_n[j], j, x_n[j],
mm.InferParam(mm, random_state))
for l in (S | set([i])):
launch_merge.update(l, x_n[l])
return launch_merge
@staticmethod
def _restricted_gibbs_scans(n, x_n, c_n, i, j, S, launch_i, launch_j,
launch_merge, process_param, mixture_params,
max_intermediate_scans_split, max_intermediate_scans_merge,
random_state):
"""
Modify the initial launch state by performing intermediate restricted
Gibbs sampling scans. The last scan in this loop leads to the proposal
state.
"""
# Initialize acceptance probability aggregator
log_acc = 0.0
# Initialize a total log probability accumulator. Since there are
# two possibilities (either component launch_i.c or launch_j.c),
# this is a vector of length 2
log_dist = np.empty(2, dtype=float)
# Modify the split launch state by performing
# `max_intermediate_scans_split` intermediate restricted Gibbs
# sampling scans to update `launch_i` and `launch_j`. Then, conduct
# one final restricted Gibbs sampling scan from the split launch
# state.
for scan in range(max_intermediate_scans_split+1):
if scan == max_intermediate_scans_split:
# The last iteration of restricted Gibbs sampling leads to
# the split or merge proposal. We keep the corresponding
# log-likelihood, since it contributes to the proposal
# density in the M-H acceptance log-probability acc
if c_n[i] == c_n[j]:
# This is a split and there won't be a merge proposal
log_acc -= launch_i.mixture_param.iterate(
compute_log_likelihood=True)
log_acc -= launch_j.mixture_param.iterate(
compute_log_likelihood=True)
else:
# This is a merge and there won't be a split proposal.
# Reset component parameters to initial values
log_acc += launch_i.mixture_param.iterate_to(
mixture_params[launch_i.c],
compute_log_likelihood=True)
log_acc += launch_j.mixture_param.iterate_to(
mixture_params[launch_j.c],
compute_log_likelihood=True)
else:
launch_i.mixture_param.iterate()
launch_j.mixture_param.iterate()
# These scans are restricted to the examples in S. We do not loop
# over i and j; their launch state is kept fix!
for l in S:
# First, remove the current assignment of example l
if l in launch_i.inv_c:
launch_i.downdate(l, x_n[l])
else:
launch_j.downdate(l, x_n[l])
# Then, calculate the full conditional log-probabilities.
# First possibility: example l is in component launch_i.c.
# Second possibility: example l is in component launch_j.c
for index, launch in enumerate([launch_i, launch_j]):
# launch.n_c must never be zero!
# TODO: Make sure of that?
log_dist[index] = process_param.log_prior(n, launch.n_c,
1) + launch.mixture_param.log_likelihood(x_n[l])
# Normalization
log_dist -= _logsumexp(2, log_dist)
if scan == max_intermediate_scans_split:
# The last iteration of restricted Gibbs sampling leads to
# the split or merge proposal. We keep the corresponding
# log-probability, since it contributes to the proposal
# density in the M-H acceptance log-probability acc
if c_n[i] == c_n[j]:
# This is a split and there won't be a merge proposal
index = random_state.choice(a=2, p=np.exp(log_dist))
log_acc -= log_dist[index]
else:
# This is a merge and there won't be a split proposal
index = 0 if c_n[l] == launch_i.c else 1
log_acc += log_dist[index]
else:
index = random_state.choice(a=2, p=np.exp(log_dist))
if index == 0:
launch_i.update(l, x_n[l])
else:
launch_j.update(l, x_n[l])
# Modify the merge launch state by performing
# `max_intermediate_scans_merge` intermediate restricted Gibbs
# sampling scans to update `launch_merge.mixture_param`. Then, conduct
# one final restricted Gibbs sampling scan from the merge launch
# state.
for scan in range(max_intermediate_scans_merge+1):
if scan == max_intermediate_scans_merge:
# The last iteration of restricted Gibbs sampling leads to
# the split or merge proposal. We keep the corresponding
# log-likelihood, since it contributes to the proposal
# density in the M-H acceptance log-probability acc
if c_n[i] == c_n[j]:
# This is a split and there won't be a merge proposal.
# Reset component parameters to initial values
log_acc += launch_merge.mixture_param.iterate_to(
mixture_params[launch_merge.c],
compute_log_likelihood=True)
else:
# This is a merge and there won't be a split proposal
log_acc -= launch_merge.mixture_param.iterate(
compute_log_likelihood=True)
else:
launch_merge.mixture_param.iterate()
return log_acc
def _merge_split_iterate(self, n, x_n, c_n, inv_c, n_c,
active_components, inactive_components, process_param,
mixture_params, max_intermediate_scans_split,
max_intermediate_scans_merge, random_state):
"""
Performs a single iteration of the Split-Merge MCMC procedure for the
conjugate Dirichlet process mixture model, see Jain & Neal (2004).
"""
mm = self.mixture_model
i, j = self._select_random_pair(n, random_state)
S = self._find_common_components(c_n, inv_c, i, j)
launch_i, launch_j = self._init_split_launch_state(x_n, c_n, i, j, S,
active_components, inactive_components, random_state)
launch_merge = self._init_merge_launch_state(x_n, c_n, i, j, S,
random_state)
log_acc = self._restricted_gibbs_scans(n, x_n, c_n, i, j, S, launch_i,
launch_j, launch_merge, process_param, mixture_params,
max_intermediate_scans_split, max_intermediate_scans_merge,
random_state)
# If i and j are in the same mixture component, then we attempt to
# split
if c_n[i] == c_n[j]:
self._attempt_split(n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_j, active_components, inactive_components,
process_param, mixture_params, random_state)
# Otherwise, if i and j are in different mixture components, then we
# attempt to merge
else:
self._attempt_merge(n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_merge, active_components, inactive_components,
process_param, mixture_params, random_state)
def _inference_step(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
max_intermediate_scans_split, max_split_merge_moves,
max_gibbs_scans, max_intermediate_scans_merge, random_state):
for _ in range(max_split_merge_moves):
self._merge_split_iterate(n, x_n, c_n, inv_c, n_c,
active_components, inactive_components, process_param,
mixture_params, max_intermediate_scans_split,
max_intermediate_scans_merge, random_state)
for _ in range(max_gibbs_scans):
for k in active_components:
mixture_params[k].iterate()
self._gibbs_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state)
process_param.iterate(n, len(active_components))
def infer(self, x_n, c_n=None, m=None, scheme=None, max_iter=None,
warmup=None, random_state=None):
"""
Component and latent variable inference.
Parameters
----------
x_n : array-like
Examples
c_n : None or array-like, optional
Vector of component indicator variables. If None, then the
examples will be assigned to the same component initially
m : None or int, optional
The number of auxiliary components
scheme: None or array-like, optional
Computation scheme
max_iter : None or int, optional
The maximum number of iterations
warmup: None or int, optional
The number of warm-up iterations
random_state : np.random.RandomState instance, optional
Used for drawing the random variates
Returns
-------
c_n : ndarray
Inferred component vectors
phi_c : ndarray
Inferred latent variables
"""
m = self._get_m(m)
max_intermediate_scans_split, max_split_merge_moves, \
max_gibbs_scans, max_intermediate_scans_merge = \
self._get_scheme(scheme)
max_iter = self._get_max_iter(max_iter)
warmup = self._get_warmup(warmup)
pm = self.process_model
random_state = pm._get_random_state(random_state)
process_param = pm.InferParam(pm, random_state)
# TODO: Move into mixture model?
n, x_n = self._check_examples(x_n)
c_n = self._check_components(n, c_n)
# Maximum number of components
c_max = n + m - 1
# Inverse mapping from components to examples
# TODO: Only needed for split and merge samplers
inv_c = defaultdict(set)
for i in range(n):
inv_c[c_n[i]].add(i)
# Number of examples per component
n_c = np.bincount(c_n, minlength=c_max)
# active_components is an unordered set of unique components
active_components = set(np.unique(c_n))
# inactive_components is an unordered set of currently unassigned
# components
inactive_components = set(range(c_max)) - active_components
# Initialize model-dependent parameters lazily
mm = self.mixture_model
mixture_params = [mm.InferParam(mm, random_state)
for _ in range(c_max)]
for k in active_components:
mixture_params[k].iterate()
# TODO: Substitute for inv_c?
for i in inv_c[k]:
mixture_params[k].update(x_n[i])
mixture_params[k].iterate()
c_n_samples = np.empty((max_iter-warmup)*n, dtype=int).reshape(
(max_iter-warmup,n))
phi_c_samples = [{} for _ in range(max_iter-warmup)]
for itn in range(max_iter):
self._inference_step(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
max_intermediate_scans_split, max_split_merge_moves,
max_gibbs_scans, max_intermediate_scans_merge,
random_state)
if not itn-warmup < 0:
c_n_samples[(itn-warmup,)] = c_n
for k in active_components:
phi_c_samples[itn-warmup][k] = mixture_params[k].phi_c()
return c_n_samples, phi_c_samples
class GenericSAMSSampler(GenericMSSampler):
"""
Class which encapsulates common functionality between all
sequentially-allocated merge-split (SAMS) samplers.
"""
def _sequential_allocation(self, n, x_n, c_n, i, j, S, active_components,
inactive_components, process_param, random_state):
"""
Proposes splits by sequentially allocating observations to one of two
split components using allocation probabilities conditional on
previously allocated data. Returns proposal densities for both splits
and merges.
"""
mm = self.mixture_model
Launch = self.Launch
if c_n[i] == c_n[j]:
launch_i = Launch(inactive_components.pop(), i, x_n[i],
mm.InferParam(mm, random_state))
active_components.add(launch_i.c)
else:
launch_i = Launch(c_n[i], i, x_n[i],
mm.InferParam(mm, random_state))
launch_j = Launch(c_n[j], j, x_n[j], mm.InferParam(mm, random_state))
launch_merge = Launch(c_n[j], j, x_n[j],
mm.InferParam(mm, random_state))
log_acc = 0.0
log_dist = np.empty(2, dtype=float)
# TODO: Add code to sample component parameters (necessary for
# generalization to non-conjugate mixture models)
for l in random_state.permutation(list(S)):
for index, launch in enumerate([launch_i, launch_j]):
# launch.n_c must never be zero!
# TODO: Make sure of that?
log_dist[index] = process_param.log_prior(n, launch.n_c, 1) \
+ launch.mixture_param.log_likelihood(x_n[l])
# Normalization
log_dist -= _logsumexp(2, log_dist)
if c_n[i] == c_n[j]:
# This is a split and there won't be a merge proposal
index = random_state.choice(a=2, p=np.exp(log_dist))
log_acc -= log_dist[index]
else:
# This is a merge and there won't be a split proposal
index = 0 if c_n[l] == launch_i.c else 1
log_acc += log_dist[index]
if index == 0:
launch_i.update(l, x_n[l])
else:
launch_j.update(l, x_n[l])
for l in (S | set([i])):
launch_merge.update(l, x_n[l])
return launch_i, launch_j, launch_merge, log_acc
def _sams_iterate(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, random_state):
"""
Performs a single iteration of the Sequentially-Allocated Merge-Split
procedure for the conjugate Dirichlet process mixture model, see Dahl
(2003).
"""
mm = self.mixture_model
i, j = self._select_random_pair(n, random_state)
S = self._find_common_components(c_n, inv_c, i, j)
launch_i, launch_j, launch_merge, log_acc = \
self._sequential_allocation(n, x_n, c_n, i, j, S,
active_components, inactive_components, process_param,
random_state)
# If i and j are in the same mixture component, then we attempt to
# split
if c_n[i] == c_n[j]:
self._attempt_split(n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_j, active_components, inactive_components,
process_param, mixture_params, random_state)
# Otherwise, if i and j are in different mixture components, then we
# attempt to merge
else:
self._attempt_merge(n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_merge, active_components, inactive_components,
process_param, mixture_params, random_state)
def _inference_step(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state):
self._sams_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params,
random_state)
for k in active_components:
mixture_params[k].iterate()
self._gibbs_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state)
process_param.iterate(n, len(active_components))
class GenericSliceSampler(GenericSampler):
"""
Class which encapsulates common functionality between all slice samplers.
"""
def __init__(self, process_model, max_iter=1000, warmup=None):
super(GenericSliceSampler, self).__init__(process_model,
max_iter=max_iter, warmup=warmup)
@staticmethod
def _slice_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, random_state):
# For each component `k`, sample component weights:
dalpha = np.zeros(len(n_c), dtype=float)
for k in active_components:
dalpha[k] = n_c[k]
new_k = inactive_components.pop()
proposed_components = set([new_k])
dalpha[new_k] = process_param.alpha
beta = random_state.dirichlet(dalpha)
mixture_params[new_k].iterate()
beta_star = beta[new_k]
# Generate a sample of `u_star`, the minimum of the slice variables
k_star = None
for k in active_components:
u_c_star = beta[k] * random_state.beta(1.0, n_c[k])
if k_star:
if u_c_star < u_star:
k_star = k
u_star = u_c_star
else:
k_star = k
u_star = u_c_star
# Sample the index `i_star` of the slice variable that achieves
# `u_star`
i_star = random_state.choice(a=list(inv_c[k_star]))
# Create new components through stick breaking until `beta_star` <
# `u_star`
while not beta_star < u_star:
new_k = inactive_components.pop()
proposed_components.add(new_k)
nu = random_state.beta(1.0, process_param.alpha)
beta[new_k] = beta_star * nu
mixture_params[new_k].iterate()
beta_star *= 1.0 - nu
active_components |= proposed_components
# For each observation `x_n[i]`, sample the component assignment
# `c_n[i]`
for i in range(n):
# Bookkeeping: Downdate
prev_k = c_n[i]
inv_c[prev_k].remove(i)
n_c[prev_k] -= 1
mixture_params[prev_k].downdate(x_n[i])
if n_c[prev_k] == 0:
proposed_components.add(prev_k)
# Sample slice variable
if i == i_star:
u = u_star
else:
u = random_state.uniform(low=u_star, high=beta[prev_k])
# Initialize and populate the total log probability accumulator
log_dist = np.empty(len(n_c), dtype=float)
log_dist.fill(-np.inf)
# TODO: Performance improvement possible by reordering the
# clusters according to `beta[k]`
for k in active_components:
# Data items can only join clusters for which `beta[k]` is
# larger than the respective slice variable
if beta[k] > u:
log_dist[k] = mixture_params[k].log_likelihood(x_n[i])
# Sample from log_dist. Normalization is required
log_dist -= _logsumexp(len(n_c), log_dist)
# TODO: Can we expect performance improvements if we exclude those
# elements of `log_dist` that are -inf?
next_k = random_state.choice(a=len(n_c), p=np.exp(log_dist))
# Bookkeeping: Update
c_n[i] = next_k
inv_c[next_k].add(i)
n_c[next_k] += 1
mixture_params[next_k].update(x_n[i])
proposed_components.discard(next_k)
# Cleanup
active_components -= proposed_components
inactive_components |= proposed_components
def _inference_step(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, random_state):
# For each active component `k`, sample component parameters
for k in active_components:
mixture_params[k].iterate()
self._slice_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params,
random_state)
process_param.iterate(n, len(active_components))
def infer(self, x_n, c_n=None, max_iter=None, warmup=None,
random_state=None):
"""
Component and latent variable inference.
Parameters
----------
x_n : array-like
Examples
c_n : None or array-like, optional
Vector of component indicator variables. If None, then the
examples will be assigned to the same component initially
max_iter : None or int, optional
The maximum number of iterations
warmup: None or int, optional
The number of warm-up iterations
random_state : np.random.RandomState instance, optional
Used for drawing the random variates
Returns
-------
c_n : ndarray
Inferred component vectors
phi_c : ndarray
Inferred latent variables
"""
max_iter = self._get_max_iter(max_iter)
warmup = self._get_warmup(warmup)
pm = self.process_model
random_state = pm._get_random_state(random_state)
process_param = pm.InferParam(pm, random_state)
# TODO: Move into mixture model?
n, x_n = self._check_examples(x_n)
c_n = self._check_components(n, c_n)
# Maximum number of components
c_max = n
# Inverse mapping from components to examples
# TODO: Only needed for split and merge samplers
inv_c = defaultdict(set)
for i in range(n):
inv_c[c_n[i]].add(i)
# Number of examples per component
n_c = np.bincount(c_n, minlength=c_max)
# active_components is an unordered set of unique components
active_components = set(np.unique(c_n))
# inactive_components is an unordered set of currently unassigned
# components
inactive_components = set(range(c_max)) - active_components
# Initialize model-dependent parameters lazily
mm = self.mixture_model
mixture_params = [mm.InferParam(mm, random_state)
for _ in range(c_max)]
for k in active_components:
mixture_params[k].iterate()
# TODO: Substitute for inv_c?
for i in inv_c[k]:
mixture_params[k].update(x_n[i])
c_n_samples = np.empty((max_iter-warmup)*n, dtype=int).reshape(
(max_iter-warmup,n))
phi_c_samples = [{} for _ in range(max_iter-warmup)]
for itn in range(max_iter):
self._inference_step(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params,
random_state)
if not itn-warmup < 0:
c_n_samples[(itn-warmup,)] = c_n
for k in active_components:
phi_c_samples[itn-warmup][k] = mixture_params[k].phi_c()
return c_n_samples, phi_c_samples
| <filename>imm/samplers/generic.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import abc
import numpy as np
from collections import defaultdict
from ..utils import _logsumexp
from ..models.processes import GenericProcess
from ..models.mixtures import GenericMixture
class GenericSampler(object):
"""
Class which encapsulates common functionality between all samplers.
"""
__metaclass__ = abc.ABCMeta
compatible_process_models = set()
compatible_mixture_models = set()
def __init__(self, process_model, max_iter=1000, warmup=None):
self._process_model = self._check_process_model(process_model)
self._mixture_model = self._check_mixture_model(
self._process_model._mixture_model)
self._max_iter, self._warmup = self._check_max_iter(max_iter, warmup)
@classmethod
def _check_process_model(cls, process_model):
if isinstance(process_model, GenericProcess):
pm_class = process_model.__class__
if pm_class in cls.compatible_process_models:
return process_model
raise ValueError('A process model of type %r cannot be used with'
' this sampler' % pm_class.__name__)
raise ValueError("'process_model' must be a compatible process model"
" instance. Got process_model = %r" % process_model)
@property
def process_model(self):
return self._process_model
@process_model.setter
def process_model(self, process_model):
self._process_model = self._check_process_model(process_model)
def _get_process_model(self, process_model):
if process_model is not None:
return self._check_process_model(process_model)
else:
return self._process_model
@classmethod
def _check_mixture_model(cls, mixture_model):
if isinstance(mixture_model, GenericMixture):
mm_class = mixture_model.__class__
if mm_class in cls.compatible_mixture_models:
return mixture_model
raise ValueError('A mixture model of type %r cannot be used with'
' this sampler' % mm_class.__name__)
raise ValueError("'mixture_model' must be a compatible mixture model."
" Got mixture_model = %r" % mixture_model)
@property
def mixture_model(self):
return self._mixture_model
@mixture_model.setter
def mixture_model(self, mixture_model):
self._mixture_model = self._check_mixture_model(mixture_model)
def _get_mixture_model(self, mixture_model):
if mixture_model is not None:
return self._check_mixture_model(mixture_model)
else:
return self._mixture_model
@staticmethod
def _check_max_iter(max_iter, warmup):
if max_iter is not None:
if not np.isscalar(max_iter):
raise ValueError("Integer 'max_iter' must be a scalar.")
max_iter = int(max_iter)
if max_iter < 1:
raise ValueError("Integer 'max_iter' must be larger than"
" zero, but max_iter = %d" % max_iter)
else:
max_iter = 1000
if warmup is not None:
if not np.isscalar(warmup):
raise ValueError("Integer 'warmup' must be a scalar.")
warmup = int(warmup)
if warmup < 0:
raise ValueError("Integer 'warmup' must not be smaller than"
" zero, but warmup = %d" % warmup)
if not warmup < max_iter:
raise ValueError("Integer 'warmup' must be smaller than"
" 'max_iter', but warmup = %d" % warmup)
else:
warmup = max_iter / 2
return max_iter, warmup
@property
def max_iter(self):
return self._max_iter
@max_iter.setter
def max_iter(self, max_iter):
self._max_iter, _ = self._check_max_iter(max_iter, self._warmup)
def _get_max_iter(self, max_iter):
if max_iter is not None:
max_iter, _ = self._check_max_iter(max_iter, self._warmup)
return max_iter
else:
return self._max_iter
@property
def warmup(self):
return self._warmup
@warmup.setter
def warmup(self, warmup):
_, self._warmup = self._check_max_iter(self._max_iter, warmup)
def _get_warmup(self, warmup):
if warmup is not None:
_, warmup = self._check_max_iter(self._max_iter, warmup)
return warmup
else:
return self._warmup
@staticmethod
def _check_examples(x_n):
# TODO: Make this truly model-agnostic. Get rid of dtype=float
x_n = np.asarray(x_n, dtype=float)
if x_n.ndim == 0:
x_n = x_n[np.newaxis, np.newaxis]
elif x_n.ndim == 1:
x_n = x_n[:, np.newaxis]
elif x_n.ndim > 2:
raise ValueError("'x_n' must be at most two-dimensional,"
" but x_n.ndim = %d" % x_n.ndim)
return x_n.shape[0], x_n
@staticmethod
def _check_components(n, c_n):
if c_n == None:
c_n = np.zeros(n, dtype=int)
else:
c_n = np.asarray(c_n, dtype=int)
if c_n.ndim == 0:
c_n = c_n[np.newaxis]
elif c_n.ndim > 1:
raise ValueError("'c_n' must be at most one-dimensional,"
" but c_n.ndim = %d" % c_n.ndim)
if not c_n.shape == (n, ):
raise ValueError("'c_n' has incompatible dimensions: should"
" be %s, got %s." % ((n,), c_n.shape))
return c_n
class GenericGibbsSampler(GenericSampler):
"""
Class which encapsulates common functionality between all Gibbs samplers.
"""
def __init__(self, process_model, m=10, max_iter=1000, warmup=None):
super(GenericGibbsSampler, self).__init__(process_model,
max_iter=max_iter, warmup=warmup)
self._m = self._check_m(m)
@staticmethod
def _check_m(m):
if m is not None:
if not np.isscalar(m):
raise ValueError("Integer 'm' must be a scalar.")
m = int(m)
if m < 1:
raise ValueError("Integer 'm' must be larger than"
" zero, but m = %d" % m)
else:
m = 10
return m
@property
def m(self):
return self._m
@m.setter
def m(self, m):
self._m = self._check_m(m)
def _get_m(self, m):
if m is not None:
return self._check_m(m)
else:
return self._m
@staticmethod
def _gibbs_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state):
"""
Performs a single iteration of Radford Neal's algorithms 3 or 8, see
Neal (2000).
"""
for i in range(n):
prev_k = c_n[i]
# Bookkeeping. Note that Neal's algorithms do not need inv_c to
# work. It is used only in the split & merge algorithms
if inv_c is not None:
inv_c[prev_k].remove(i)
# Downdate component counter
n_c[prev_k] -= 1
# Downdate model-dependent parameters
mixture_params[prev_k].downdate(x_n[i])
# If the previous component is empty after example i is removed,
# recycle it and propose it as new component. If it is not empty,
# we need to get a new component from the inactive_components set
if n_c[prev_k] == 0:
proposed_components = set([prev_k])
else:
proposed_components = set([inactive_components.pop()])
for _ in range(1, m):
proposed_components.add(inactive_components.pop())
active_components |= proposed_components
# Make sure the proposed components are not contaminated with
# obsolete information
for k in (proposed_components - set([prev_k])):
mixture_params[k].iterate()
# Initialize and populate the total log probability accumulator
log_dist = np.empty(len(n_c), dtype=float)
log_dist.fill(-np.inf)
for k in active_components:
# Calculate the process prior and mixture likelihood
log_dist[k] = process_param.log_prior(n, n_c[k], m) + \
mixture_params[k].log_likelihood(x_n[i])
# Sample from log_dist. Normalization is required
log_dist -= _logsumexp(len(n_c), log_dist)
# TODO: Can we expect performance improvements if we exclude those
# elements of `log_dist` that are -inf?
next_k = random_state.choice(a=len(n_c), p=np.exp(log_dist))
c_n[i] = next_k
# More bookkeeping
if inv_c is not None:
inv_c[next_k].add(i)
# Update component counter
n_c[next_k] += 1
# Update model-dependent parameters
mixture_params[next_k].update(x_n[i])
# Cleanup
proposed_components.discard(next_k)
active_components -= proposed_components
inactive_components |= proposed_components
def _inference_step(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state):
for k in active_components:
mixture_params[k].iterate()
self._gibbs_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state)
process_param.iterate(n, len(active_components))
def infer(self, x_n, c_n=None, m=None, max_iter=None, warmup=None,
random_state=None):
"""
Component and latent variable inference.
Parameters
----------
x_n : array-like
Examples
c_n : None or array-like, optional
Vector of component indicator variables. If None, then the
examples will be assigned to the same component initially
m : None or int, optional
The number of auxiliary components
max_iter : None or int, optional
The maximum number of iterations
warmup: None or int, optional
The number of warm-up iterations
random_state : np.random.RandomState instance, optional
Used for drawing the random variates
Returns
-------
c_n : ndarray
Inferred component vectors
phi_c : ndarray
Inferred latent variables
"""
m = self._get_m(m)
max_iter = self._get_max_iter(max_iter)
warmup = self._get_warmup(warmup)
pm = self.process_model
random_state = pm._get_random_state(random_state)
process_param = pm.InferParam(pm, random_state)
# TODO: Move into mixture model?
n, x_n = self._check_examples(x_n)
c_n = self._check_components(n, c_n)
# Maximum number of components
c_max = n + m - 1
# Inverse mapping from components to examples
# TODO: Only needed for split and merge samplers
inv_c = defaultdict(set)
for i in range(n):
inv_c[c_n[i]].add(i)
# Number of examples per component
n_c = np.bincount(c_n, minlength=c_max)
# active_components is an unordered set of unique components
active_components = set(np.unique(c_n))
# inactive_components is an unordered set of currently unassigned
# components
inactive_components = set(range(c_max)) - active_components
# Initialize model-dependent parameters lazily
mm = self.mixture_model
mixture_params = [mm.InferParam(mm, random_state)
for _ in range(c_max)]
for k in active_components:
mixture_params[k].iterate()
# TODO: Substitute for inv_c?
for i in inv_c[k]:
mixture_params[k].update(x_n[i])
c_n_samples = np.empty((max_iter-warmup)*n, dtype=int).reshape(
(max_iter-warmup,n))
phi_c_samples = [{} for _ in range(max_iter-warmup)]
for itn in range(max_iter):
self._inference_step(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state)
if not itn-warmup < 0:
c_n_samples[(itn-warmup,)] = c_n
for k in active_components:
phi_c_samples[itn-warmup][k] = mixture_params[k].phi_c()
return c_n_samples, phi_c_samples
class GenericMSSampler(GenericGibbsSampler):
"""
Class which encapsulates common functionality between all merge-split (MS)
samplers.
"""
class Launch(object):
def __init__(self, c, g, x_g, mixture_param):
# Set the component
self.c = c
# The set inv_c will contain all examples that belong to the
# component c
self.inv_c = set([g])
# Number of examples in the component c
self.n_c = 1
# Auxiliary, model-dependent parameters
# TODO: A less ugly way to achieve parameter initialization
mixture_param.iterate()
self.mixture_param = mixture_param.update(x_g)
def update(self, g, x_g):
# Add example g to component c
self.inv_c.add(g)
# Increment counter
self.n_c += 1
# Update model-dependent parameters
self.mixture_param.update(x_g)
def downdate(self, g, x_g):
# Remove example g from component c
self.inv_c.remove(g)
# Reduce counter
self.n_c -= 1
# Downdate model-dependent parameters
self.mixture_param.downdate(x_g)
@staticmethod
def _select_random_pair(n, random_state):
"""
Select two distict observations (i.e. examples), i and j, uniformly
at random
"""
i, j = random_state.choice(a=n, size=2, replace=False)
return i, j
@staticmethod
def _find_common_components(c_n, inv_c, i, j):
"""
Define a set of examples, S, that does not contain i or j, but all
other examples that belong to the same component as i or j
"""
if c_n[i] == c_n[j]:
S = inv_c[c_n[i]] - set([i, j])
else:
S = (inv_c[c_n[i]] | inv_c[c_n[j]]) - set([i, j])
return S
def _attempt_split(self, n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_j, active_components, inactive_components, process_param,
mixture_params, random_state):
pm = self.process_model
mm = self.mixture_model
# Logarithm of prior quotient, see Eq. (3.4) in Jain & Neal (2004) and
# Eq. (7) in Jain & Neal (2007)
log_acc += pm._ms_log_prior_pre(n, len(active_components),
len(active_components)-1, process_param)
log_acc += pm._ms_log_prior_post(launch_i.n_c, process_param)
log_acc += pm._ms_log_prior_post(launch_j.n_c, process_param)
log_acc -= pm._ms_log_prior_post(n_c[launch_j.c], process_param)
log_acc += mm._ms_log_prior(launch_i.mixture_param)
log_acc += mm._ms_log_prior(launch_j.mixture_param)
log_acc -= mm._ms_log_prior(mixture_params[launch_j.c])
# Logarithm of likelihood quotient, see Eq. (3.8) in Jain & Neal
# (2004) and Eq. (11) in Jain & Neal (2007)
log_acc += mm._ms_log_likelihood(x_n, launch_i.inv_c,
launch_i.mixture_param, random_state)
log_acc += mm._ms_log_likelihood(x_n, launch_j.inv_c,
launch_j.mixture_param, random_state)
log_acc -= mm._ms_log_likelihood(x_n, inv_c[launch_j.c],
mixture_params[launch_j.c], random_state)
# Evaluate the split proposal by the MH acceptance probability
if np.log(random_state.uniform()) < min(0.0, log_acc):
# If the split proposal is accepted, then it becomes the next
# state. At this point, launch_i.inv_c and launch_j.inv_c contain
# the split proposal. Therefore, all labels are updated according
# to the assignments in launch_i.inv_c and launch_j.inv_c
c_n[list(launch_i.inv_c)] = launch_i.c
c_n[list(launch_j.inv_c)] = launch_j.c
# Update assignments in global component-example mapping
inv_c[launch_i.c] = launch_i.inv_c
inv_c[launch_j.c] = launch_j.inv_c
# Update counts
n_c[launch_i.c] = launch_i.n_c
n_c[launch_j.c] = launch_j.n_c
# Update mixture parameters
mixture_params[launch_i.c] = launch_i.mixture_param
mixture_params[launch_j.c] = launch_j.mixture_param
# TODO: Logging
# print "yay, accepted split with log-acc = {}".format(log_acc)
else:
# If the split proposal is rejected, then the old state remains as
# the next state. Thus, remove launch_i.c from the active
# components and put it back into the inactive components (if
# necessary)
active_components.remove(launch_i.c)
inactive_components.add(launch_i.c)
# TODO: Logging
# print "nay, rejected split with log-acc = {}".format(log_acc)
def _attempt_merge(self, n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_merge, active_components, inactive_components,
process_param, mixture_params, random_state):
pm = self.process_model
mm = self.mixture_model
# Logarithm of prior quotient, see Eq. (3.5) in Jain & Neal (2004) and
# Eq. (8) in Jain & Neal (2007)
log_acc += pm._ms_log_prior_pre(n, len(active_components),
len(active_components)-1, process_param)
log_acc += pm._ms_log_prior_post(launch_merge.n_c, process_param)
log_acc -= pm._ms_log_prior_post(n_c[launch_i.c], process_param)
log_acc -= pm._ms_log_prior_post(n_c[launch_merge.c], process_param)
log_acc += mm._ms_log_prior(launch_merge.mixture_param)
log_acc -= mm._ms_log_prior(mixture_params[launch_i.c])
log_acc -= mm._ms_log_prior(mixture_params[launch_merge.c])
# Logarithm of likelihood quotient, see Eq. (3.9) in Jain & Neal
# (2004) and Eq. (12) in Jain & Neal (2007)
log_acc += mm._ms_log_likelihood(x_n, launch_merge.inv_c,
launch_merge.mixture_param, random_state)
log_acc -= mm._ms_log_likelihood(x_n, inv_c[launch_i.c],
mixture_params[launch_i.c], random_state)
log_acc -= mm._ms_log_likelihood(x_n, inv_c[launch_merge.c],
mixture_params[launch_merge.c], random_state)
# Evaluate the split proposal by the MH acceptance probability
if np.log(random_state.uniform()) < min(0.0, log_acc):
# If the merge proposal is accepted, then it becomes the next
# state
active_components.remove(launch_i.c)
inactive_components.add(launch_i.c)
# Assign all examples to component launch_merge.c that in the
# proposal were assigned to launch_merge.c
c_n[list(launch_merge.inv_c)] = launch_merge.c
# Remove assignments to launch_i.c from global component-example
# mapping
inv_c[launch_i.c].clear()
# Add assignments to launch_merge.c to global component-example
# mapping
inv_c[launch_merge.c] = launch_merge.inv_c
# Update counts
n_c[launch_i.c] = 0
n_c[launch_merge.c] = launch_merge.n_c
# Update mixture parameters
mixture_params[launch_i.c] = mm.InferParam(mm, random_state)
mixture_params[launch_merge.c] = launch_merge.mixture_param
# TODO: Logging
#print "yay, accepted merge with log-acc = {}".format(log_acc)
else:
# There is nothing to do if the merge proposal is rejected
pass
# TODO: Logging
# print "nay, rejected merge with log-acc = {}".format(log_acc)
class GenericRGMSSampler(GenericMSSampler):
"""
Class which encapsulates common functionality between all restricted Gibbs
merge-split (RGMS) samplers.
"""
def __init__(self, process_model, m=10, scheme=None, max_iter=1000,
warmup=None):
super(GenericRGMSSampler, self).__init__(process_model, m=m,
max_iter=max_iter, warmup=warmup)
self._max_intermediate_scans_split, self._max_split_merge_moves, \
self._max_gibbs_scans, self._max_intermediate_scans_merge = \
self._check_scheme(scheme)
@staticmethod
def _check_scheme(scheme):
if scheme is None:
max_intermediate_scans_split = 5
max_split_merge_moves = 1
max_gibbs_scans = 1
max_intermediate_scans_merge = 5
else:
scheme = np.asarray(scheme, dtype=int)
if scheme.ndim == 0:
max_intermediate_scans_split = np.asscalar(scheme)
max_split_merge_moves = 1
max_gibbs_scans = 1
max_intermediate_scans_merge = 5
elif scheme.ndim == 1:
max_intermediate_scans_split = scheme[0]
try:
max_split_merge_moves = scheme[1]
except:
max_split_merge_moves = 1
try:
max_gibbs_scans = scheme[2]
except:
max_gibbs_scans = 1
try:
max_intermediate_scans_merge = scheme[3]
except:
max_intermediate_scans_merge = 1
elif scheme.ndim > 1:
raise ValueError('Scheme must be an integer or tuple of'
' integers; thus must have dimension <= 1.'
' Got scheme.ndim = %s' % str(tuple(scheme)))
if max_intermediate_scans_split < 1:
raise ValueError('The sampler requires at least one intermediate'
' restricted Gibbs sampling scan to reach the'
' the split launch state; thus must have'
' scheme[0] >= 1. Got scheme[0] ='
' %s' % str(max_intermediate_scans_split))
if max_split_merge_moves < 0:
raise ValueError('The number of split-merge moves per iteration'
' cannot be smaller than zero; thus must have'
' scheme[1] >= 0. Got scheme[1] ='
' %s' % str(max_split_merge_moves))
if max_gibbs_scans < 0:
raise ValueError('The number of Gibbs scans per iteration'
' cannot be smaller than zero; thus must have'
' scheme[2] >= 0. Got scheme[2] ='
' %s' % str(max_gibbs_scans))
if max_intermediate_scans_merge < 1:
raise ValueError('The sampler requires at least one intermediate'
' restricted Gibbs sampling scan to reach the'
' the merge launch state; thus must have'
' scheme[3] >= 1. Got scheme[3] ='
' %s' % str(max_intermediate_scans_merge))
return max_intermediate_scans_split, max_split_merge_moves, \
max_gibbs_scans, max_intermediate_scans_merge
@property
def scheme(self):
return self._max_intermediate_scans_split, \
self._max_split_merge_moves, self._max_gibbs_scans, \
self._max_intermediate_scans_merge
@scheme.setter
def scheme(self, scheme):
self._max_intermediate_scans_split, self._max_split_merge_moves, \
self._max_gibbs_scans, self._max_intermediate_scans_merge = \
self._check_scheme(scheme)
def _get_scheme(self, scheme):
if scheme is not None:
return self._check_scheme(scheme)
else:
return self._max_intermediate_scans_split, \
self._max_split_merge_moves, self._max_gibbs_scans, \
self._max_intermediate_scans_merge
def _init_split_launch_state(self, x_n, c_n, i, j, S, active_components,
inactive_components, random_state):
"""
Initialize the split launch state that will be used to compute the
restricted Gibbs sampling probabilities
"""
mm = self.mixture_model
Launch = self.Launch
# launch_i.c is the initial launch state component of example i
if c_n[i] == c_n[j]:
# This will be a split proposal, so let launch_i.c be a new
# component
launch_i = Launch(inactive_components.pop(), i, x_n[i],
mm.InferParam(mm, random_state))
active_components.add(launch_i.c)
else:
# This will be a merge proposal, so let launch_i.c be the current
# component of i
launch_i = Launch(c_n[i], i, x_n[i],
mm.InferParam(mm, random_state))
# launch_j.c is the initial launch state component of example j
launch_j = Launch(c_n[j], j, x_n[j], mm.InferParam(mm, random_state))
# Randomly select the launch state components, independently and with
# equal probability, for the examples in S
for l in S:
if random_state.uniform() < 0.5:
launch_i.update(l, x_n[l])
else:
launch_j.update(l, x_n[l])
return launch_i, launch_j
def _init_merge_launch_state(self, x_n, c_n, i, j, S, random_state):
"""
Initialize the merge launch state that will be used to compute the
restricted Gibbs sampling probabilities
"""
mm = self.mixture_model
Launch = self.Launch
# TODO: Should the model parameters of the merged component be set
# equal to the model parameters in the original component
# c_n[j]? According to Dahl (2005), they should. According to
# Jain & Neal (2007), they should not and instead be drawn from
# the prior distribution. Let's do the latter for now
launch_merge = Launch(c_n[j], j, x_n[j],
mm.InferParam(mm, random_state))
for l in (S | set([i])):
launch_merge.update(l, x_n[l])
return launch_merge
@staticmethod
def _restricted_gibbs_scans(n, x_n, c_n, i, j, S, launch_i, launch_j,
launch_merge, process_param, mixture_params,
max_intermediate_scans_split, max_intermediate_scans_merge,
random_state):
"""
Modify the initial launch state by performing intermediate restricted
Gibbs sampling scans. The last scan in this loop leads to the proposal
state.
"""
# Initialize acceptance probability aggregator
log_acc = 0.0
# Initialize a total log probability accumulator. Since there are
# two possibilities (either component launch_i.c or launch_j.c),
# this is a vector of length 2
log_dist = np.empty(2, dtype=float)
# Modify the split launch state by performing
# `max_intermediate_scans_split` intermediate restricted Gibbs
# sampling scans to update `launch_i` and `launch_j`. Then, conduct
# one final restricted Gibbs sampling scan from the split launch
# state.
for scan in range(max_intermediate_scans_split+1):
if scan == max_intermediate_scans_split:
# The last iteration of restricted Gibbs sampling leads to
# the split or merge proposal. We keep the corresponding
# log-likelihood, since it contributes to the proposal
# density in the M-H acceptance log-probability acc
if c_n[i] == c_n[j]:
# This is a split and there won't be a merge proposal
log_acc -= launch_i.mixture_param.iterate(
compute_log_likelihood=True)
log_acc -= launch_j.mixture_param.iterate(
compute_log_likelihood=True)
else:
# This is a merge and there won't be a split proposal.
# Reset component parameters to initial values
log_acc += launch_i.mixture_param.iterate_to(
mixture_params[launch_i.c],
compute_log_likelihood=True)
log_acc += launch_j.mixture_param.iterate_to(
mixture_params[launch_j.c],
compute_log_likelihood=True)
else:
launch_i.mixture_param.iterate()
launch_j.mixture_param.iterate()
# These scans are restricted to the examples in S. We do not loop
# over i and j; their launch state is kept fix!
for l in S:
# First, remove the current assignment of example l
if l in launch_i.inv_c:
launch_i.downdate(l, x_n[l])
else:
launch_j.downdate(l, x_n[l])
# Then, calculate the full conditional log-probabilities.
# First possibility: example l is in component launch_i.c.
# Second possibility: example l is in component launch_j.c
for index, launch in enumerate([launch_i, launch_j]):
# launch.n_c must never be zero!
# TODO: Make sure of that?
log_dist[index] = process_param.log_prior(n, launch.n_c,
1) + launch.mixture_param.log_likelihood(x_n[l])
# Normalization
log_dist -= _logsumexp(2, log_dist)
if scan == max_intermediate_scans_split:
# The last iteration of restricted Gibbs sampling leads to
# the split or merge proposal. We keep the corresponding
# log-probability, since it contributes to the proposal
# density in the M-H acceptance log-probability acc
if c_n[i] == c_n[j]:
# This is a split and there won't be a merge proposal
index = random_state.choice(a=2, p=np.exp(log_dist))
log_acc -= log_dist[index]
else:
# This is a merge and there won't be a split proposal
index = 0 if c_n[l] == launch_i.c else 1
log_acc += log_dist[index]
else:
index = random_state.choice(a=2, p=np.exp(log_dist))
if index == 0:
launch_i.update(l, x_n[l])
else:
launch_j.update(l, x_n[l])
# Modify the merge launch state by performing
# `max_intermediate_scans_merge` intermediate restricted Gibbs
# sampling scans to update `launch_merge.mixture_param`. Then, conduct
# one final restricted Gibbs sampling scan from the merge launch
# state.
for scan in range(max_intermediate_scans_merge+1):
if scan == max_intermediate_scans_merge:
# The last iteration of restricted Gibbs sampling leads to
# the split or merge proposal. We keep the corresponding
# log-likelihood, since it contributes to the proposal
# density in the M-H acceptance log-probability acc
if c_n[i] == c_n[j]:
# This is a split and there won't be a merge proposal.
# Reset component parameters to initial values
log_acc += launch_merge.mixture_param.iterate_to(
mixture_params[launch_merge.c],
compute_log_likelihood=True)
else:
# This is a merge and there won't be a split proposal
log_acc -= launch_merge.mixture_param.iterate(
compute_log_likelihood=True)
else:
launch_merge.mixture_param.iterate()
return log_acc
def _merge_split_iterate(self, n, x_n, c_n, inv_c, n_c,
active_components, inactive_components, process_param,
mixture_params, max_intermediate_scans_split,
max_intermediate_scans_merge, random_state):
"""
Performs a single iteration of the Split-Merge MCMC procedure for the
conjugate Dirichlet process mixture model, see Jain & Neal (2004).
"""
mm = self.mixture_model
i, j = self._select_random_pair(n, random_state)
S = self._find_common_components(c_n, inv_c, i, j)
launch_i, launch_j = self._init_split_launch_state(x_n, c_n, i, j, S,
active_components, inactive_components, random_state)
launch_merge = self._init_merge_launch_state(x_n, c_n, i, j, S,
random_state)
log_acc = self._restricted_gibbs_scans(n, x_n, c_n, i, j, S, launch_i,
launch_j, launch_merge, process_param, mixture_params,
max_intermediate_scans_split, max_intermediate_scans_merge,
random_state)
# If i and j are in the same mixture component, then we attempt to
# split
if c_n[i] == c_n[j]:
self._attempt_split(n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_j, active_components, inactive_components,
process_param, mixture_params, random_state)
# Otherwise, if i and j are in different mixture components, then we
# attempt to merge
else:
self._attempt_merge(n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_merge, active_components, inactive_components,
process_param, mixture_params, random_state)
def _inference_step(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
max_intermediate_scans_split, max_split_merge_moves,
max_gibbs_scans, max_intermediate_scans_merge, random_state):
for _ in range(max_split_merge_moves):
self._merge_split_iterate(n, x_n, c_n, inv_c, n_c,
active_components, inactive_components, process_param,
mixture_params, max_intermediate_scans_split,
max_intermediate_scans_merge, random_state)
for _ in range(max_gibbs_scans):
for k in active_components:
mixture_params[k].iterate()
self._gibbs_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state)
process_param.iterate(n, len(active_components))
def infer(self, x_n, c_n=None, m=None, scheme=None, max_iter=None,
warmup=None, random_state=None):
"""
Component and latent variable inference.
Parameters
----------
x_n : array-like
Examples
c_n : None or array-like, optional
Vector of component indicator variables. If None, then the
examples will be assigned to the same component initially
m : None or int, optional
The number of auxiliary components
scheme: None or array-like, optional
Computation scheme
max_iter : None or int, optional
The maximum number of iterations
warmup: None or int, optional
The number of warm-up iterations
random_state : np.random.RandomState instance, optional
Used for drawing the random variates
Returns
-------
c_n : ndarray
Inferred component vectors
phi_c : ndarray
Inferred latent variables
"""
m = self._get_m(m)
max_intermediate_scans_split, max_split_merge_moves, \
max_gibbs_scans, max_intermediate_scans_merge = \
self._get_scheme(scheme)
max_iter = self._get_max_iter(max_iter)
warmup = self._get_warmup(warmup)
pm = self.process_model
random_state = pm._get_random_state(random_state)
process_param = pm.InferParam(pm, random_state)
# TODO: Move into mixture model?
n, x_n = self._check_examples(x_n)
c_n = self._check_components(n, c_n)
# Maximum number of components
c_max = n + m - 1
# Inverse mapping from components to examples
# TODO: Only needed for split and merge samplers
inv_c = defaultdict(set)
for i in range(n):
inv_c[c_n[i]].add(i)
# Number of examples per component
n_c = np.bincount(c_n, minlength=c_max)
# active_components is an unordered set of unique components
active_components = set(np.unique(c_n))
# inactive_components is an unordered set of currently unassigned
# components
inactive_components = set(range(c_max)) - active_components
# Initialize model-dependent parameters lazily
mm = self.mixture_model
mixture_params = [mm.InferParam(mm, random_state)
for _ in range(c_max)]
for k in active_components:
mixture_params[k].iterate()
# TODO: Substitute for inv_c?
for i in inv_c[k]:
mixture_params[k].update(x_n[i])
mixture_params[k].iterate()
c_n_samples = np.empty((max_iter-warmup)*n, dtype=int).reshape(
(max_iter-warmup,n))
phi_c_samples = [{} for _ in range(max_iter-warmup)]
for itn in range(max_iter):
self._inference_step(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
max_intermediate_scans_split, max_split_merge_moves,
max_gibbs_scans, max_intermediate_scans_merge,
random_state)
if not itn-warmup < 0:
c_n_samples[(itn-warmup,)] = c_n
for k in active_components:
phi_c_samples[itn-warmup][k] = mixture_params[k].phi_c()
return c_n_samples, phi_c_samples
class GenericSAMSSampler(GenericMSSampler):
"""
Class which encapsulates common functionality between all
sequentially-allocated merge-split (SAMS) samplers.
"""
def _sequential_allocation(self, n, x_n, c_n, i, j, S, active_components,
inactive_components, process_param, random_state):
"""
Proposes splits by sequentially allocating observations to one of two
split components using allocation probabilities conditional on
previously allocated data. Returns proposal densities for both splits
and merges.
"""
mm = self.mixture_model
Launch = self.Launch
if c_n[i] == c_n[j]:
launch_i = Launch(inactive_components.pop(), i, x_n[i],
mm.InferParam(mm, random_state))
active_components.add(launch_i.c)
else:
launch_i = Launch(c_n[i], i, x_n[i],
mm.InferParam(mm, random_state))
launch_j = Launch(c_n[j], j, x_n[j], mm.InferParam(mm, random_state))
launch_merge = Launch(c_n[j], j, x_n[j],
mm.InferParam(mm, random_state))
log_acc = 0.0
log_dist = np.empty(2, dtype=float)
# TODO: Add code to sample component parameters (necessary for
# generalization to non-conjugate mixture models)
for l in random_state.permutation(list(S)):
for index, launch in enumerate([launch_i, launch_j]):
# launch.n_c must never be zero!
# TODO: Make sure of that?
log_dist[index] = process_param.log_prior(n, launch.n_c, 1) \
+ launch.mixture_param.log_likelihood(x_n[l])
# Normalization
log_dist -= _logsumexp(2, log_dist)
if c_n[i] == c_n[j]:
# This is a split and there won't be a merge proposal
index = random_state.choice(a=2, p=np.exp(log_dist))
log_acc -= log_dist[index]
else:
# This is a merge and there won't be a split proposal
index = 0 if c_n[l] == launch_i.c else 1
log_acc += log_dist[index]
if index == 0:
launch_i.update(l, x_n[l])
else:
launch_j.update(l, x_n[l])
for l in (S | set([i])):
launch_merge.update(l, x_n[l])
return launch_i, launch_j, launch_merge, log_acc
def _sams_iterate(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, random_state):
"""
Performs a single iteration of the Sequentially-Allocated Merge-Split
procedure for the conjugate Dirichlet process mixture model, see Dahl
(2003).
"""
mm = self.mixture_model
i, j = self._select_random_pair(n, random_state)
S = self._find_common_components(c_n, inv_c, i, j)
launch_i, launch_j, launch_merge, log_acc = \
self._sequential_allocation(n, x_n, c_n, i, j, S,
active_components, inactive_components, process_param,
random_state)
# If i and j are in the same mixture component, then we attempt to
# split
if c_n[i] == c_n[j]:
self._attempt_split(n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_j, active_components, inactive_components,
process_param, mixture_params, random_state)
# Otherwise, if i and j are in different mixture components, then we
# attempt to merge
else:
self._attempt_merge(n, x_n, c_n, inv_c, n_c, log_acc, launch_i,
launch_merge, active_components, inactive_components,
process_param, mixture_params, random_state)
def _inference_step(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state):
self._sams_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params,
random_state)
for k in active_components:
mixture_params[k].iterate()
self._gibbs_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, m,
random_state)
process_param.iterate(n, len(active_components))
class GenericSliceSampler(GenericSampler):
"""
Class which encapsulates common functionality between all slice samplers.
"""
def __init__(self, process_model, max_iter=1000, warmup=None):
super(GenericSliceSampler, self).__init__(process_model,
max_iter=max_iter, warmup=warmup)
@staticmethod
def _slice_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, random_state):
# For each component `k`, sample component weights:
dalpha = np.zeros(len(n_c), dtype=float)
for k in active_components:
dalpha[k] = n_c[k]
new_k = inactive_components.pop()
proposed_components = set([new_k])
dalpha[new_k] = process_param.alpha
beta = random_state.dirichlet(dalpha)
mixture_params[new_k].iterate()
beta_star = beta[new_k]
# Generate a sample of `u_star`, the minimum of the slice variables
k_star = None
for k in active_components:
u_c_star = beta[k] * random_state.beta(1.0, n_c[k])
if k_star:
if u_c_star < u_star:
k_star = k
u_star = u_c_star
else:
k_star = k
u_star = u_c_star
# Sample the index `i_star` of the slice variable that achieves
# `u_star`
i_star = random_state.choice(a=list(inv_c[k_star]))
# Create new components through stick breaking until `beta_star` <
# `u_star`
while not beta_star < u_star:
new_k = inactive_components.pop()
proposed_components.add(new_k)
nu = random_state.beta(1.0, process_param.alpha)
beta[new_k] = beta_star * nu
mixture_params[new_k].iterate()
beta_star *= 1.0 - nu
active_components |= proposed_components
# For each observation `x_n[i]`, sample the component assignment
# `c_n[i]`
for i in range(n):
# Bookkeeping: Downdate
prev_k = c_n[i]
inv_c[prev_k].remove(i)
n_c[prev_k] -= 1
mixture_params[prev_k].downdate(x_n[i])
if n_c[prev_k] == 0:
proposed_components.add(prev_k)
# Sample slice variable
if i == i_star:
u = u_star
else:
u = random_state.uniform(low=u_star, high=beta[prev_k])
# Initialize and populate the total log probability accumulator
log_dist = np.empty(len(n_c), dtype=float)
log_dist.fill(-np.inf)
# TODO: Performance improvement possible by reordering the
# clusters according to `beta[k]`
for k in active_components:
# Data items can only join clusters for which `beta[k]` is
# larger than the respective slice variable
if beta[k] > u:
log_dist[k] = mixture_params[k].log_likelihood(x_n[i])
# Sample from log_dist. Normalization is required
log_dist -= _logsumexp(len(n_c), log_dist)
# TODO: Can we expect performance improvements if we exclude those
# elements of `log_dist` that are -inf?
next_k = random_state.choice(a=len(n_c), p=np.exp(log_dist))
# Bookkeeping: Update
c_n[i] = next_k
inv_c[next_k].add(i)
n_c[next_k] += 1
mixture_params[next_k].update(x_n[i])
proposed_components.discard(next_k)
# Cleanup
active_components -= proposed_components
inactive_components |= proposed_components
def _inference_step(self, n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params, random_state):
# For each active component `k`, sample component parameters
for k in active_components:
mixture_params[k].iterate()
self._slice_iterate(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params,
random_state)
process_param.iterate(n, len(active_components))
def infer(self, x_n, c_n=None, max_iter=None, warmup=None,
random_state=None):
"""
Component and latent variable inference.
Parameters
----------
x_n : array-like
Examples
c_n : None or array-like, optional
Vector of component indicator variables. If None, then the
examples will be assigned to the same component initially
max_iter : None or int, optional
The maximum number of iterations
warmup: None or int, optional
The number of warm-up iterations
random_state : np.random.RandomState instance, optional
Used for drawing the random variates
Returns
-------
c_n : ndarray
Inferred component vectors
phi_c : ndarray
Inferred latent variables
"""
max_iter = self._get_max_iter(max_iter)
warmup = self._get_warmup(warmup)
pm = self.process_model
random_state = pm._get_random_state(random_state)
process_param = pm.InferParam(pm, random_state)
# TODO: Move into mixture model?
n, x_n = self._check_examples(x_n)
c_n = self._check_components(n, c_n)
# Maximum number of components
c_max = n
# Inverse mapping from components to examples
# TODO: Only needed for split and merge samplers
inv_c = defaultdict(set)
for i in range(n):
inv_c[c_n[i]].add(i)
# Number of examples per component
n_c = np.bincount(c_n, minlength=c_max)
# active_components is an unordered set of unique components
active_components = set(np.unique(c_n))
# inactive_components is an unordered set of currently unassigned
# components
inactive_components = set(range(c_max)) - active_components
# Initialize model-dependent parameters lazily
mm = self.mixture_model
mixture_params = [mm.InferParam(mm, random_state)
for _ in range(c_max)]
for k in active_components:
mixture_params[k].iterate()
# TODO: Substitute for inv_c?
for i in inv_c[k]:
mixture_params[k].update(x_n[i])
c_n_samples = np.empty((max_iter-warmup)*n, dtype=int).reshape(
(max_iter-warmup,n))
phi_c_samples = [{} for _ in range(max_iter-warmup)]
for itn in range(max_iter):
self._inference_step(n, x_n, c_n, inv_c, n_c, active_components,
inactive_components, process_param, mixture_params,
random_state)
if not itn-warmup < 0:
c_n_samples[(itn-warmup,)] = c_n
for k in active_components:
phi_c_samples[itn-warmup][k] = mixture_params[k].phi_c()
return c_n_samples, phi_c_samples
| en | 0.805123 | # -*- coding: utf-8 -*- Class which encapsulates common functionality between all samplers. # TODO: Make this truly model-agnostic. Get rid of dtype=float Class which encapsulates common functionality between all Gibbs samplers. Performs a single iteration of Radford Neal's algorithms 3 or 8, see Neal (2000). # Bookkeeping. Note that Neal's algorithms do not need inv_c to # work. It is used only in the split & merge algorithms # Downdate component counter # Downdate model-dependent parameters # If the previous component is empty after example i is removed, # recycle it and propose it as new component. If it is not empty, # we need to get a new component from the inactive_components set # Make sure the proposed components are not contaminated with # obsolete information # Initialize and populate the total log probability accumulator # Calculate the process prior and mixture likelihood # Sample from log_dist. Normalization is required # TODO: Can we expect performance improvements if we exclude those # elements of `log_dist` that are -inf? # More bookkeeping # Update component counter # Update model-dependent parameters # Cleanup Component and latent variable inference. Parameters ---------- x_n : array-like Examples c_n : None or array-like, optional Vector of component indicator variables. If None, then the examples will be assigned to the same component initially m : None or int, optional The number of auxiliary components max_iter : None or int, optional The maximum number of iterations warmup: None or int, optional The number of warm-up iterations random_state : np.random.RandomState instance, optional Used for drawing the random variates Returns ------- c_n : ndarray Inferred component vectors phi_c : ndarray Inferred latent variables # TODO: Move into mixture model? # Maximum number of components # Inverse mapping from components to examples # TODO: Only needed for split and merge samplers # Number of examples per component # active_components is an unordered set of unique components # inactive_components is an unordered set of currently unassigned # components # Initialize model-dependent parameters lazily # TODO: Substitute for inv_c? Class which encapsulates common functionality between all merge-split (MS) samplers. # Set the component # The set inv_c will contain all examples that belong to the # component c # Number of examples in the component c # Auxiliary, model-dependent parameters # TODO: A less ugly way to achieve parameter initialization # Add example g to component c # Increment counter # Update model-dependent parameters # Remove example g from component c # Reduce counter # Downdate model-dependent parameters Select two distict observations (i.e. examples), i and j, uniformly at random Define a set of examples, S, that does not contain i or j, but all other examples that belong to the same component as i or j # Logarithm of prior quotient, see Eq. (3.4) in Jain & Neal (2004) and # Eq. (7) in Jain & Neal (2007) # Logarithm of likelihood quotient, see Eq. (3.8) in Jain & Neal # (2004) and Eq. (11) in Jain & Neal (2007) # Evaluate the split proposal by the MH acceptance probability # If the split proposal is accepted, then it becomes the next # state. At this point, launch_i.inv_c and launch_j.inv_c contain # the split proposal. Therefore, all labels are updated according # to the assignments in launch_i.inv_c and launch_j.inv_c # Update assignments in global component-example mapping # Update counts # Update mixture parameters # TODO: Logging # print "yay, accepted split with log-acc = {}".format(log_acc) # If the split proposal is rejected, then the old state remains as # the next state. Thus, remove launch_i.c from the active # components and put it back into the inactive components (if # necessary) # TODO: Logging # print "nay, rejected split with log-acc = {}".format(log_acc) # Logarithm of prior quotient, see Eq. (3.5) in Jain & Neal (2004) and # Eq. (8) in Jain & Neal (2007) # Logarithm of likelihood quotient, see Eq. (3.9) in Jain & Neal # (2004) and Eq. (12) in Jain & Neal (2007) # Evaluate the split proposal by the MH acceptance probability # If the merge proposal is accepted, then it becomes the next # state # Assign all examples to component launch_merge.c that in the # proposal were assigned to launch_merge.c # Remove assignments to launch_i.c from global component-example # mapping # Add assignments to launch_merge.c to global component-example # mapping # Update counts # Update mixture parameters # TODO: Logging #print "yay, accepted merge with log-acc = {}".format(log_acc) # There is nothing to do if the merge proposal is rejected # TODO: Logging # print "nay, rejected merge with log-acc = {}".format(log_acc) Class which encapsulates common functionality between all restricted Gibbs merge-split (RGMS) samplers. Initialize the split launch state that will be used to compute the restricted Gibbs sampling probabilities # launch_i.c is the initial launch state component of example i # This will be a split proposal, so let launch_i.c be a new # component # This will be a merge proposal, so let launch_i.c be the current # component of i # launch_j.c is the initial launch state component of example j # Randomly select the launch state components, independently and with # equal probability, for the examples in S Initialize the merge launch state that will be used to compute the restricted Gibbs sampling probabilities # TODO: Should the model parameters of the merged component be set # equal to the model parameters in the original component # c_n[j]? According to Dahl (2005), they should. According to # Jain & Neal (2007), they should not and instead be drawn from # the prior distribution. Let's do the latter for now Modify the initial launch state by performing intermediate restricted Gibbs sampling scans. The last scan in this loop leads to the proposal state. # Initialize acceptance probability aggregator # Initialize a total log probability accumulator. Since there are # two possibilities (either component launch_i.c or launch_j.c), # this is a vector of length 2 # Modify the split launch state by performing # `max_intermediate_scans_split` intermediate restricted Gibbs # sampling scans to update `launch_i` and `launch_j`. Then, conduct # one final restricted Gibbs sampling scan from the split launch # state. # The last iteration of restricted Gibbs sampling leads to # the split or merge proposal. We keep the corresponding # log-likelihood, since it contributes to the proposal # density in the M-H acceptance log-probability acc # This is a split and there won't be a merge proposal # This is a merge and there won't be a split proposal. # Reset component parameters to initial values # These scans are restricted to the examples in S. We do not loop # over i and j; their launch state is kept fix! # First, remove the current assignment of example l # Then, calculate the full conditional log-probabilities. # First possibility: example l is in component launch_i.c. # Second possibility: example l is in component launch_j.c # launch.n_c must never be zero! # TODO: Make sure of that? # Normalization # The last iteration of restricted Gibbs sampling leads to # the split or merge proposal. We keep the corresponding # log-probability, since it contributes to the proposal # density in the M-H acceptance log-probability acc # This is a split and there won't be a merge proposal # This is a merge and there won't be a split proposal # Modify the merge launch state by performing # `max_intermediate_scans_merge` intermediate restricted Gibbs # sampling scans to update `launch_merge.mixture_param`. Then, conduct # one final restricted Gibbs sampling scan from the merge launch # state. # The last iteration of restricted Gibbs sampling leads to # the split or merge proposal. We keep the corresponding # log-likelihood, since it contributes to the proposal # density in the M-H acceptance log-probability acc # This is a split and there won't be a merge proposal. # Reset component parameters to initial values # This is a merge and there won't be a split proposal Performs a single iteration of the Split-Merge MCMC procedure for the conjugate Dirichlet process mixture model, see Jain & Neal (2004). # If i and j are in the same mixture component, then we attempt to # split # Otherwise, if i and j are in different mixture components, then we # attempt to merge Component and latent variable inference. Parameters ---------- x_n : array-like Examples c_n : None or array-like, optional Vector of component indicator variables. If None, then the examples will be assigned to the same component initially m : None or int, optional The number of auxiliary components scheme: None or array-like, optional Computation scheme max_iter : None or int, optional The maximum number of iterations warmup: None or int, optional The number of warm-up iterations random_state : np.random.RandomState instance, optional Used for drawing the random variates Returns ------- c_n : ndarray Inferred component vectors phi_c : ndarray Inferred latent variables # TODO: Move into mixture model? # Maximum number of components # Inverse mapping from components to examples # TODO: Only needed for split and merge samplers # Number of examples per component # active_components is an unordered set of unique components # inactive_components is an unordered set of currently unassigned # components # Initialize model-dependent parameters lazily # TODO: Substitute for inv_c? Class which encapsulates common functionality between all sequentially-allocated merge-split (SAMS) samplers. Proposes splits by sequentially allocating observations to one of two split components using allocation probabilities conditional on previously allocated data. Returns proposal densities for both splits and merges. # TODO: Add code to sample component parameters (necessary for # generalization to non-conjugate mixture models) # launch.n_c must never be zero! # TODO: Make sure of that? # Normalization # This is a split and there won't be a merge proposal # This is a merge and there won't be a split proposal Performs a single iteration of the Sequentially-Allocated Merge-Split procedure for the conjugate Dirichlet process mixture model, see Dahl (2003). # If i and j are in the same mixture component, then we attempt to # split # Otherwise, if i and j are in different mixture components, then we # attempt to merge Class which encapsulates common functionality between all slice samplers. # For each component `k`, sample component weights: # Generate a sample of `u_star`, the minimum of the slice variables # Sample the index `i_star` of the slice variable that achieves # `u_star` # Create new components through stick breaking until `beta_star` < # `u_star` # For each observation `x_n[i]`, sample the component assignment # `c_n[i]` # Bookkeeping: Downdate # Sample slice variable # Initialize and populate the total log probability accumulator # TODO: Performance improvement possible by reordering the # clusters according to `beta[k]` # Data items can only join clusters for which `beta[k]` is # larger than the respective slice variable # Sample from log_dist. Normalization is required # TODO: Can we expect performance improvements if we exclude those # elements of `log_dist` that are -inf? # Bookkeeping: Update # Cleanup # For each active component `k`, sample component parameters Component and latent variable inference. Parameters ---------- x_n : array-like Examples c_n : None or array-like, optional Vector of component indicator variables. If None, then the examples will be assigned to the same component initially max_iter : None or int, optional The maximum number of iterations warmup: None or int, optional The number of warm-up iterations random_state : np.random.RandomState instance, optional Used for drawing the random variates Returns ------- c_n : ndarray Inferred component vectors phi_c : ndarray Inferred latent variables # TODO: Move into mixture model? # Maximum number of components # Inverse mapping from components to examples # TODO: Only needed for split and merge samplers # Number of examples per component # active_components is an unordered set of unique components # inactive_components is an unordered set of currently unassigned # components # Initialize model-dependent parameters lazily # TODO: Substitute for inv_c? | 2.120149 | 2 |
diventi/brave/__init__.py | flavoi/diven | 2 | 6617775 | default_app_config = 'diventi.brave.apps.BraveConfig' | default_app_config = 'diventi.brave.apps.BraveConfig' | none | 1 | 0.959475 | 1 | |
test_gtimeit.py | Chthi/Bench | 0 | 6617776 | <filename>test_gtimeit.py
"""Tests for gtimeit package."""
import pytest
from gtimeit.benchmark import *
from gtimeit.perf_tracker import *
def test__benchmark_plot_percent(capsys):
"""First test"""
bm = Benchmark()
bm.plot_percent()
captured = capsys.readouterr()
assert "Not available" in captured.out
| <filename>test_gtimeit.py
"""Tests for gtimeit package."""
import pytest
from gtimeit.benchmark import *
from gtimeit.perf_tracker import *
def test__benchmark_plot_percent(capsys):
"""First test"""
bm = Benchmark()
bm.plot_percent()
captured = capsys.readouterr()
assert "Not available" in captured.out
| en | 0.688296 | Tests for gtimeit package. First test | 2.018069 | 2 |
testing/mpy_dummy/uasyncio.py | dpm76/Microvacbot | 1 | 6617777 | import asyncio
from asyncio import Event, wait_for, run, create_task, Lock,StreamReader
async def sleep(delay):
asyncio.sleep(delay)
async def sleep_ms(delay):
asyncio.sleep(delay/1e3)
def get_event_loop():
asyncio.get_event_loop()
Event=Event
wait_for=wait_for
run=run
create_task=create_task
Lock=Lock
StreamReader=StreamReader | import asyncio
from asyncio import Event, wait_for, run, create_task, Lock,StreamReader
async def sleep(delay):
asyncio.sleep(delay)
async def sleep_ms(delay):
asyncio.sleep(delay/1e3)
def get_event_loop():
asyncio.get_event_loop()
Event=Event
wait_for=wait_for
run=run
create_task=create_task
Lock=Lock
StreamReader=StreamReader | none | 1 | 3.183572 | 3 | |
src/test/python/mmlspark/vw/test_vw.py | JoanFM/mmlspark | 2 | 6617778 | <filename>src/test/python/mmlspark/vw/test_vw.py
# Prepare training and test data.
import os
import unittest
import tempfile
import pyspark
from mmlspark.vw.VowpalWabbitClassifier import VowpalWabbitClassifier
from mmlspark.vw.VowpalWabbitRegressor import VowpalWabbitRegressor
from mmlspark.vw.VowpalWabbitFeaturizer import VowpalWabbitFeaturizer
from pyspark.sql.types import *
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.master("local[*]") \
.appName("_VW") \
.config("spark.jars.packages", "com.microsoft.ml.spark:mmlspark_2.11:" + os.environ["MML_VERSION"]) \
.config("spark.executor.heartbeatInterval", "60s") \
.getOrCreate()
sc = spark.sparkContext
class VowpalWabbitSpec(unittest.TestCase):
def save_model(self, estimator):
# create sample data
schema = StructType([StructField("label", DoubleType()),
StructField("text", StringType())])
data = pyspark.sql.SparkSession.builder.getOrCreate().createDataFrame([
(-1.0, "mountains are nice"),
( 1.0, "do you have the TPS reports ready?")], schema)
# featurize data
featurizer = VowpalWabbitFeaturizer(stringSplitInputCols=['text'])
featurized_data = featurizer.transform(data)
# train model
model = estimator.fit(featurized_data)
# write model to file and validate it's there
with tempfile.TemporaryDirectory() as tmpdirname:
modelFile = '{}/model'.format(tmpdirname)
model.saveNativeModel(modelFile)
self.assertTrue(os.stat(modelFile).st_size > 0)
def test_save_model_classification(self):
self.save_model(VowpalWabbitClassifier())
def test_save_model_regression(self):
self.save_model(VowpalWabbitRegressor())
if __name__ == "__main__":
result = unittest.main()
| <filename>src/test/python/mmlspark/vw/test_vw.py
# Prepare training and test data.
import os
import unittest
import tempfile
import pyspark
from mmlspark.vw.VowpalWabbitClassifier import VowpalWabbitClassifier
from mmlspark.vw.VowpalWabbitRegressor import VowpalWabbitRegressor
from mmlspark.vw.VowpalWabbitFeaturizer import VowpalWabbitFeaturizer
from pyspark.sql.types import *
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.master("local[*]") \
.appName("_VW") \
.config("spark.jars.packages", "com.microsoft.ml.spark:mmlspark_2.11:" + os.environ["MML_VERSION"]) \
.config("spark.executor.heartbeatInterval", "60s") \
.getOrCreate()
sc = spark.sparkContext
class VowpalWabbitSpec(unittest.TestCase):
def save_model(self, estimator):
# create sample data
schema = StructType([StructField("label", DoubleType()),
StructField("text", StringType())])
data = pyspark.sql.SparkSession.builder.getOrCreate().createDataFrame([
(-1.0, "mountains are nice"),
( 1.0, "do you have the TPS reports ready?")], schema)
# featurize data
featurizer = VowpalWabbitFeaturizer(stringSplitInputCols=['text'])
featurized_data = featurizer.transform(data)
# train model
model = estimator.fit(featurized_data)
# write model to file and validate it's there
with tempfile.TemporaryDirectory() as tmpdirname:
modelFile = '{}/model'.format(tmpdirname)
model.saveNativeModel(modelFile)
self.assertTrue(os.stat(modelFile).st_size > 0)
def test_save_model_classification(self):
self.save_model(VowpalWabbitClassifier())
def test_save_model_regression(self):
self.save_model(VowpalWabbitRegressor())
if __name__ == "__main__":
result = unittest.main()
| en | 0.886629 | # Prepare training and test data. # create sample data # featurize data # train model # write model to file and validate it's there | 2.520932 | 3 |
history_generator/internal/event_analysis.py | ReedOei/History-Generator | 19 | 6617779 | <filename>history_generator/internal/event_analysis.py
import internal.events
from tkinter import *
class Result:
def __init__(self, event_list):
self.event_list = event_list
def multi_search(self, datums, regexes):
if len(datums) > 1:
return self.search(datums[0], regexes[0])
else:
return self.search(datums[0], regexes[0]).multi_search(datums[1:], regexes[1:])
# Only for event data
def search_or(self, data_names, search_regex=r'.*'):
results = set()
for data_name in data_names:
results = results.union(self.search(data_name, search_regex).event_list)
return Result(list(results))
def search(self, data_name, search_regex=r'.*', comp=lambda a, b: a > b, date=(0,)):
if not data_name in ['name', 'date']:
return Result(list(filter(
lambda e: data_name in e.event_data and len(re.findall(search_regex, e.event_data[data_name])) > 0,
self.event_list)))
elif data_name == 'name':
return Result(list(filter(lambda e: len(re.findall(search_regex, e.name)) > 0, self.event_list)))
elif data_name == 'date':
return Result(list(filter(lambda e: comp(date, e.date), self.event_list)))
def __repr__(self):
return str(self.event_list)
class Analyser:
def __init__(self, event_log):
self.event_list = event_log.events
# Only for event data
def search_or(self, data_names, search_regex=r'.*'):
return Result(self.event_list).search_or(data_names, search_regex)
def search(self, data_name, search_regex=r'.*', comp=lambda a, b: a > b, date=(0,)):
return Result(self.event_list).search(data_name, search_regex, comp, date)
class HistoryWindow:
def __init__(self, event_log, title, event_types):
self.title = title
self.event_types = event_types
self.display_event_types = event_types
self.event_log = Analyser(event_log).search('name', '|'.join(self.event_types))
self.show_information_gui()
def show_information_gui(self):
self.gui_window = Tk()
self.gui_window.title(self.title)
self.gui_window.geometry("800x400+0+0")
self.gui_window.config(background='white')
self.gui_window.columnconfigure(1, weight=1)
self.gui_window.rowconfigure(3, weight=1)
self.event_check_buttons = []
self.event_check_display = Listbox(self.gui_window, selectmode=MULTIPLE)
self.event_check_display.bind('<<ListboxSelect>>', self.change_selection)
for (i, event_type) in enumerate(self.event_types):
self.event_check_display.insert(END, event_type)
self.event_check_display.grid(row=0, sticky=W + E)
self.event_log_display = Listbox(self.gui_window)
self.event_log_display.grid(row=2, columnspan=2, rowspan=2, sticky=N + S + W + E)
self.refresh_event_log_display()
def change_selection(self, event):
self.refresh_event_log_display()
def refresh_event_log_display(self):
self.display_event_types = list(
[self.event_check_display.get(i) for i in self.event_check_display.curselection()])
all_events = self.get_sorted_events(self.event_log.search('name', '|'.join(self.display_event_types)))
self.event_log_display.delete(0, END)
for event in all_events:
self.event_log_display.insert(END, event.text_version())
def get_sorted_events(self, analyser):
return sorted(analyser.event_list, key=lambda event: event.date)
def find_nation_mentions(event_log, name):
return Analyser(event_log).search_or(['nation_a', 'nation_b'], name)
def find_city_mentions(event_log, names):
return Analyser(event_log).search_or(['city_a', 'city_b'], '|'.join(names))
def find_religion_mentions(event_log, name):
return Analyser(event_log).search_or(['religion_a', 'religion_b'], name)
| <filename>history_generator/internal/event_analysis.py
import internal.events
from tkinter import *
class Result:
def __init__(self, event_list):
self.event_list = event_list
def multi_search(self, datums, regexes):
if len(datums) > 1:
return self.search(datums[0], regexes[0])
else:
return self.search(datums[0], regexes[0]).multi_search(datums[1:], regexes[1:])
# Only for event data
def search_or(self, data_names, search_regex=r'.*'):
results = set()
for data_name in data_names:
results = results.union(self.search(data_name, search_regex).event_list)
return Result(list(results))
def search(self, data_name, search_regex=r'.*', comp=lambda a, b: a > b, date=(0,)):
if not data_name in ['name', 'date']:
return Result(list(filter(
lambda e: data_name in e.event_data and len(re.findall(search_regex, e.event_data[data_name])) > 0,
self.event_list)))
elif data_name == 'name':
return Result(list(filter(lambda e: len(re.findall(search_regex, e.name)) > 0, self.event_list)))
elif data_name == 'date':
return Result(list(filter(lambda e: comp(date, e.date), self.event_list)))
def __repr__(self):
return str(self.event_list)
class Analyser:
def __init__(self, event_log):
self.event_list = event_log.events
# Only for event data
def search_or(self, data_names, search_regex=r'.*'):
return Result(self.event_list).search_or(data_names, search_regex)
def search(self, data_name, search_regex=r'.*', comp=lambda a, b: a > b, date=(0,)):
return Result(self.event_list).search(data_name, search_regex, comp, date)
class HistoryWindow:
def __init__(self, event_log, title, event_types):
self.title = title
self.event_types = event_types
self.display_event_types = event_types
self.event_log = Analyser(event_log).search('name', '|'.join(self.event_types))
self.show_information_gui()
def show_information_gui(self):
self.gui_window = Tk()
self.gui_window.title(self.title)
self.gui_window.geometry("800x400+0+0")
self.gui_window.config(background='white')
self.gui_window.columnconfigure(1, weight=1)
self.gui_window.rowconfigure(3, weight=1)
self.event_check_buttons = []
self.event_check_display = Listbox(self.gui_window, selectmode=MULTIPLE)
self.event_check_display.bind('<<ListboxSelect>>', self.change_selection)
for (i, event_type) in enumerate(self.event_types):
self.event_check_display.insert(END, event_type)
self.event_check_display.grid(row=0, sticky=W + E)
self.event_log_display = Listbox(self.gui_window)
self.event_log_display.grid(row=2, columnspan=2, rowspan=2, sticky=N + S + W + E)
self.refresh_event_log_display()
def change_selection(self, event):
self.refresh_event_log_display()
def refresh_event_log_display(self):
self.display_event_types = list(
[self.event_check_display.get(i) for i in self.event_check_display.curselection()])
all_events = self.get_sorted_events(self.event_log.search('name', '|'.join(self.display_event_types)))
self.event_log_display.delete(0, END)
for event in all_events:
self.event_log_display.insert(END, event.text_version())
def get_sorted_events(self, analyser):
return sorted(analyser.event_list, key=lambda event: event.date)
def find_nation_mentions(event_log, name):
return Analyser(event_log).search_or(['nation_a', 'nation_b'], name)
def find_city_mentions(event_log, names):
return Analyser(event_log).search_or(['city_a', 'city_b'], '|'.join(names))
def find_religion_mentions(event_log, name):
return Analyser(event_log).search_or(['religion_a', 'religion_b'], name)
| en | 0.819054 | # Only for event data # Only for event data | 2.837215 | 3 |
dostaweemvse/dostaweemvse/models/route.py | ale3otik/DostaweemWse | 3 | 6617780 | <filename>dostaweemvse/dostaweemvse/models/route.py
from django.db import models
from .edge import Edge
class Route(models.Model):
edges = models.ManyToManyField(Edge)
active_edge_index = models.IntegerField()
def go_to_next_edge(self):
self.active_edge_index += 1
self.save(update_fields=["active_edge_index"])
def get_active_edge(self):
return self.edges.all()[self.active_edge_index]
| <filename>dostaweemvse/dostaweemvse/models/route.py
from django.db import models
from .edge import Edge
class Route(models.Model):
edges = models.ManyToManyField(Edge)
active_edge_index = models.IntegerField()
def go_to_next_edge(self):
self.active_edge_index += 1
self.save(update_fields=["active_edge_index"])
def get_active_edge(self):
return self.edges.all()[self.active_edge_index]
| none | 1 | 2.053485 | 2 | |
PhononModes/atomic_participation_ratio.py | Huaguiyuan/PhononModes | 9 | 6617781 | <filename>PhononModes/atomic_participation_ratio.py<gh_stars>1-10
#!/usr/bin/env python
"""
Script to calculate atom-decomposed participation ratios of a phonon
calculation.
Assumes the phonon mode file is named 'phonons.out', the atomic mass file is
named 'apos.dat', and both are located in the directory where this script is
run.
Script takes 1 command line argument, the index of the phonon mode for which to
calculate the atom-decomposed participation ratio.
Prints atom-decomposed participation ratio to standard output.
"""
import sys
from phononmodes import *
modes = PhononModes()
index = int(sys.argv[1])
aprs = modes.atomic_participation_ratio(index)
print "Number of atoms:", modes.num_atoms
print "Number of modes:", modes.num_modes
print "Participation Ratio from Eq. (1):"
print modes.participation_ratio(index)
print "Participation Ratio from Sum over Eq. (2):"
print aprs.sum()**2
print
print "Mode_# Atom_# Atomic_Participation_Ratio"
for j in range(modes.num_atoms):
print index, j+1, aprs[j]
| <filename>PhononModes/atomic_participation_ratio.py<gh_stars>1-10
#!/usr/bin/env python
"""
Script to calculate atom-decomposed participation ratios of a phonon
calculation.
Assumes the phonon mode file is named 'phonons.out', the atomic mass file is
named 'apos.dat', and both are located in the directory where this script is
run.
Script takes 1 command line argument, the index of the phonon mode for which to
calculate the atom-decomposed participation ratio.
Prints atom-decomposed participation ratio to standard output.
"""
import sys
from phononmodes import *
modes = PhononModes()
index = int(sys.argv[1])
aprs = modes.atomic_participation_ratio(index)
print "Number of atoms:", modes.num_atoms
print "Number of modes:", modes.num_modes
print "Participation Ratio from Eq. (1):"
print modes.participation_ratio(index)
print "Participation Ratio from Sum over Eq. (2):"
print aprs.sum()**2
print
print "Mode_# Atom_# Atomic_Participation_Ratio"
for j in range(modes.num_atoms):
print index, j+1, aprs[j]
| en | 0.843284 | #!/usr/bin/env python Script to calculate atom-decomposed participation ratios of a phonon calculation. Assumes the phonon mode file is named 'phonons.out', the atomic mass file is named 'apos.dat', and both are located in the directory where this script is run. Script takes 1 command line argument, the index of the phonon mode for which to calculate the atom-decomposed participation ratio. Prints atom-decomposed participation ratio to standard output. # Atom_# Atomic_Participation_Ratio" | 3.639539 | 4 |
tests/test_pep8.py | wiccy46/stockmanager | 1 | 6617782 | import pycodestyle
def test_conformance():
"""Test that we conform to PEP-8."""
# E731 ignores lamda, W291 trailing whitespace
# W391 blank line at end of file
# W292 no newline at end of file
# E722 bare except
# W293 blank line white space
style = pycodestyle.StyleGuide(quiet=False,
ignore=['E501', 'E731', 'W291',
'W391', 'W292', 'E722',
'W293'])
# style.input_dir('../../pya')
style.input_dir('./src/')
# style.input_dir('tests')
result = style.check_files()
assert result.total_errors == 0 | import pycodestyle
def test_conformance():
"""Test that we conform to PEP-8."""
# E731 ignores lamda, W291 trailing whitespace
# W391 blank line at end of file
# W292 no newline at end of file
# E722 bare except
# W293 blank line white space
style = pycodestyle.StyleGuide(quiet=False,
ignore=['E501', 'E731', 'W291',
'W391', 'W292', 'E722',
'W293'])
# style.input_dir('../../pya')
style.input_dir('./src/')
# style.input_dir('tests')
result = style.check_files()
assert result.total_errors == 0 | en | 0.475166 | Test that we conform to PEP-8. # E731 ignores lamda, W291 trailing whitespace # W391 blank line at end of file # W292 no newline at end of file # E722 bare except # W293 blank line white space # style.input_dir('../../pya') # style.input_dir('tests') | 2.609947 | 3 |
app.py | SimoneCff/TW6-PJ-PAPC | 2 | 6617783 | <gh_stars>1-10
from flask import Flask, render_template, request
from complements.config import Config
from complements.trolley import Trolley
app = Flask(__name__, static_url_path='', template_folder='templates', static_folder='static')
app.config.from_object(Config)
Carrello = Trolley()
import routes.case, routes.checkout, routes.cooling, routes.cpu, routes.idex, routes.memory, routes.mobo, routes.psu,\
routes.ram, routes.gpu
if __name__ == '__name__':
app.run(debug=True, ssl_contex='adhoc')
| from flask import Flask, render_template, request
from complements.config import Config
from complements.trolley import Trolley
app = Flask(__name__, static_url_path='', template_folder='templates', static_folder='static')
app.config.from_object(Config)
Carrello = Trolley()
import routes.case, routes.checkout, routes.cooling, routes.cpu, routes.idex, routes.memory, routes.mobo, routes.psu,\
routes.ram, routes.gpu
if __name__ == '__name__':
app.run(debug=True, ssl_contex='adhoc') | none | 1 | 1.996684 | 2 | |
py/test/pytests/thunderbolt_loopback.py | arccode/factory | 3 | 6617784 | # Copyright 2021 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests thunderbolt port with a loopback card.
Description
-----------
Verifies the thunderbolt port with a loopback card.
Test Procedure
--------------
1. Operator inserts the loopback card.
2. The tool sends payloads to the loopback card.
3. The tool receives payloads from the loopback card and checks correctness.
4. The tool collects lane margining data and uploads it to server.
5. Operator removes the loopback card.
Dependency
----------
- Loopback card driver.
- tdtl tool if we want to test lane margining.
- Write serial number to device data before the test for data collecting.
- The DUT must be able to connect factory server when running the test.
Examples
--------
The minimal working example::
{
"pytest_name": "thunderbolt_loopback",
"args": {
"usbpd_spec": {
"port": 0
}
}
}
Test specific controller and test lane margining with 60 seconds timeout::
{
"pytest_name": "thunderbolt_loopback"
"args": {
"usbpd_spec": {
"port": 0
},
"timeout_secs": 60,
"controller_port": "0-1.*",
"lane_margining": true
}
}
Test controller 0-3 with CC1 port 1 with 60 seconds timeout::
{
"pytest_name": "thunderbolt_loopback"
"args": {
"usbpd_spec": {
"port": 1,
"polarity": 1
},
"timeout_secs": 60,
"controller_port": "0-3.*"
}
}
"""
import logging
import os
import re
import subprocess
import time
from cros.factory.device import device_utils
from cros.factory.device import usb_c
from cros.factory.test import device_data
from cros.factory.test.env import paths
from cros.factory.test.i18n import _
from cros.factory.testlog import testlog
from cros.factory.test import server_proxy
from cros.factory.test import session
from cros.factory.test import test_case
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils import sync_utils
from cros.factory.utils import type_utils
_LOOPBACK_TEST_PATH = '/sys/kernel/debug/thunderbolt'
_CONTROLLER_PORTS = ('0-1.*', '0-3.*', '1-1.*', '1-3.*')
_RE_ADP_DOMAIN = re.compile(r'^.*(?P<domain>\d+)-(?P<adapter>\d+)\.\d+$')
_RE_MARGIN_LOOPBACK = re.compile(
r'(RT\d+ L\d+ )(BOTTOM|LEFT),(TOP|RIGHT) = (\d+),(\d+)')
_DMA_TEST = 'dma_test'
_TEST_MODULE = 'thunderbolt_dma_test'
LINK_WIDTH_TYPE = type_utils.Enum(['Single', 'Dual'])
LINK_SPEED_TYPE = type_utils.Enum(['Slow', 'Fast'])
ENCODE_LINK_WIDTH = {
LINK_WIDTH_TYPE.Single: '1',
LINK_WIDTH_TYPE.Dual: '2'
}
ENCODE_LINK_SPEED = {
LINK_SPEED_TYPE.Slow: '10',
LINK_SPEED_TYPE.Fast: '20'
}
_RE_STATUS = re.compile(r'^result: (.+)\n(?:.|\n)*$')
_CARD_STATE = type_utils.Enum(['Absent', 'Multiple', 'Wrong'])
_TDTL_PATH = os.path.join(paths.FACTORY_DIR, 'tdtl-master')
class ThunderboltLoopbackTest(test_case.TestCase):
"""Thunderbolt loopback card factory test."""
LOG_GROUP_NAME = 'usb4_lane_margining_log'
LOG_KEYS = [
'DOMAIN',
'ADP',
'RT1 L0 BOTTOM',
'RT1 L0 TOP',
'RT1 L0 LEFT',
'RT1 L0 RIGHT',
'RT1 L1 BOTTOM',
'RT1 L1 TOP',
'RT1 L1 LEFT',
'RT1 L1 RIGHT',
'RT2 L0 BOTTOM',
'RT2 L0 TOP',
'RT2 L0 LEFT',
'RT2 L0 RIGHT',
'RT2 L1 BOTTOM',
'RT2 L1 TOP',
'RT2 L1 LEFT',
'RT2 L1 RIGHT',
]
ARGS = [
Arg('timeout_secs', int, 'Timeout value for the test.', default=None),
Arg('expected_link_speed', LINK_SPEED_TYPE, 'Link speed.',
default=LINK_SPEED_TYPE.Fast),
Arg('expected_link_width', LINK_WIDTH_TYPE, 'Link width.',
default=LINK_WIDTH_TYPE.Dual),
Arg('packets_to_send', int, 'Amount of packets to be sent.',
default=1000),
Arg('packets_to_receive', int, 'Amount of packets to be received.',
default=1000),
Arg('debugfs_path', str, 'The path of debugfs to test.', default=None),
Arg('controller_port', str, 'The name of the controller port to test.',
default=None),
Arg('usbpd_spec', dict,
('A dict which must contain "port" and optionally specify "polarity".'
' For example, `{"port": 1, "polarity": 1}`.'),
schema=usb_c.USB_PD_SPEC_SCHEMA, _transform=usb_c.MigrateUSBPDSpec),
Arg('load_module', bool, 'Load test module.', default=True),
Arg('check_muxinfo_only', bool, 'Check muxinfo only.', default=False),
Arg('lane_margining', bool, 'Collet lane margining data.', default=False),
Arg('lane_margining_timeout_secs', (int, float),
'Timeout for colleting lane margining data.', default=10),
Arg('check_card_removal', bool,
'If set, require removing the card after DMA test.', default=True),
]
def setUp(self):
self.ui.ToggleTemplateClass('font-large', True)
self._dut = device_utils.CreateDUTInterface()
self._usbpd_port = self.args.usbpd_spec['port']
self._usbpd_polarity = {
1: 'NORMAL',
2: 'INVERTED'
}.get(self.args.usbpd_spec.get('polarity'))
self._remove_module = False
self._card_state = None
self._muxinfo = {}
self._first_typec_control = True
self._first_check_mux_info = True
self._group_checker = None
if self.args.lane_margining:
# Group checker and details for Testlog.
self._group_checker = testlog.GroupParam(self.LOG_GROUP_NAME,
self.LOG_KEYS)
testlog.UpdateParam('ADP', param_type=testlog.PARAM_TYPE.argument)
testlog.UpdateParam('DOMAIN', param_type=testlog.PARAM_TYPE.argument)
self._errors = []
def tearDown(self):
if self._remove_module:
self._dut.CheckCall(['modprobe', '-r', _TEST_MODULE], log=True)
def _GlobLoopbackPath(self, controller_ports):
devices = []
for name in controller_ports:
device_path = self._dut.path.join(_LOOPBACK_TEST_PATH, name, _DMA_TEST)
devices.extend(
self._dut.path.dirname(path) for path in self._dut.Glob(device_path))
return devices
def _SetCardState(self, state):
if self._card_state == state:
return False
self._card_state = state
return True
def _SendTypecControl(self):
"""Send typeccontrol control command."""
_first_typec_control = self._first_typec_control
self._first_typec_control = False
mode = {
'DP': '0',
'TBT': '1',
'USB4': '2'
}
try:
self._dut.CheckCall(
['ectool', 'typeccontrol',
str(self._usbpd_port), '2', mode['TBT']], log=_first_typec_control)
except Exception:
pass
def _CheckMuxinfo(self):
"""Returns True if TBT=1."""
fail_tag = 'GetPDMuxInfo'
_first_check_mux_info = self._first_check_mux_info
self._first_check_mux_info = False
try:
outputs = self._dut.usb_c.GetPDMuxInfo(self._usbpd_port,
log=_first_check_mux_info)
except Exception:
if self._muxinfo.get(fail_tag) != 1:
logging.exception('%s failed', fail_tag)
self.ui.SetState(_('Please unplug and replug.'))
self._muxinfo = {
fail_tag: 1
}
return False
else:
if self._muxinfo != outputs:
logging.info('%s %r', fail_tag, outputs)
self.ui.SetState(
'Port %d<br>%s %r' % (self._usbpd_port, fail_tag, outputs))
self._muxinfo = outputs
if self._usbpd_polarity:
if outputs['POLARITY'] != self._usbpd_polarity:
self.ui.SetInstruction(
_('Wrong USB side, please flip over {media}.',
media='Loopback card'))
return False
self.ui.SetInstruction('')
if outputs['TBT']:
return True
if outputs['USB']:
self._SendTypecControl()
return False
def _FindLoopbackPath(self):
if self.args.debugfs_path:
if self._dut.path.exists(self.args.debugfs_path):
return self.args.debugfs_path
if self._SetCardState(_CARD_STATE.Absent):
logging.info('No loopback card exists.')
return None
controller_ports = set([self.args.controller_port] if self.args
.controller_port else _CONTROLLER_PORTS)
devices = self._GlobLoopbackPath(controller_ports)
if len(devices) > 1:
if self._SetCardState(_CARD_STATE.Multiple):
self.ui.SetState(_('Do not insert more than one loopback card.'))
logging.info('Multiple loopback cards exist: %r. controller_ports: %r',
devices, controller_ports)
return None
wrong_controller_ports = set(_CONTROLLER_PORTS) - controller_ports
wrong_devices = self._GlobLoopbackPath(wrong_controller_ports)
if wrong_devices:
if self._SetCardState(_CARD_STATE.Wrong):
self.ui.SetState(
_('The loopback card is inserted into the wrong port.'))
logging.info(('Wrong loopback cards exist: %r. '
'wrong_controller_ports: %r'), wrong_devices,
wrong_controller_ports)
return None
if not devices:
if self._SetCardState(_CARD_STATE.Absent):
self.ui.SetState(_('Insert the loopback card.'))
logging.info('No loopback card exists. controller_ports: %r',
controller_ports)
return None
return devices[0]
def _LogAndWriteFile(self, filename, content):
logging.info('echo %s > %s', content, filename)
self._dut.WriteFile(filename, content)
def _TestLaneMargining(self, domain: str, adapter: str):
"""Uses tdtl tool to collect lane margining data.
Args:
domain: A string we pass to tdtl tool.
adapter: A string we pass to tdtl tool.
Returns:
log_result: A dict to save the result.
"""
session.console.info('Start collecting lane margining data.')
# Log 0 when failed.
# Log -1 when timeout.
log_result = dict.fromkeys(self.LOG_KEYS, None)
log_result.update({
'ADP': int(adapter),
'DOMAIN': int(domain),
})
# self._dut.CheckOutput do not support env and timeout
# process_utils.Spawn do not support timeout
cmd = [
'cli.py', 'margin_loopback', '-d', domain, '-a', adapter, '-r', '0',
'-i', '1'
]
env = {
'ADP': adapter,
'LC_ALL': 'en_US.utf-8',
}
logging.info('env: %r, cmd: %r, cwd: %r', env, cmd, _TDTL_PATH)
stop_timer = self.ui.StartCountdownTimer(
self.args.lane_margining_timeout_secs)
try:
result = subprocess.run(cmd, env=env, cwd=_TDTL_PATH,
timeout=self.args.lane_margining_timeout_secs,
encoding='utf-8', stdout=subprocess.PIPE,
check=False)
except subprocess.TimeoutExpired:
logging.exception('_TestLaneMargining timeout')
self._errors.append('_TestLaneMargining timeout')
for key, value in log_result.items():
if value is None:
log_result[key] = -1
return log_result
finally:
stop_timer.set()
try:
logging.info('stdout:\n%s', result.stdout)
result.check_returncode()
except Exception:
logging.exception('_TestLaneMargining failed')
self._errors.append('_TestLaneMargining failed')
for key, value in log_result.items():
if value is None:
log_result[key] = 0
# The output of `cli.py margin_loopback` looks like below.
#
# RT1 L0 BOTTOM,TOP = 56,54
# RT2 L0 BOTTOM,TOP = 56,62
# RT1 L0 LEFT,RIGHT = 20,17
# RT2 L0 LEFT,RIGHT = 22,24
# RT1 L1 BOTTOM,TOP = 62,70
# RT2 L1 BOTTOM,TOP = 60,68
# RT1 L1 LEFT,RIGHT = 21,22
# RT2 L1 LEFT,RIGHT = 17,16
for line in result.stdout.splitlines():
match = _RE_MARGIN_LOOPBACK.match(line)
if not match:
continue
for index in range(2, 4):
log_result.update(
{match.group(1) + match.group(index): int(match.group(index + 2))})
return log_result
def _GetUITimer(self):
"""Returns the stop event flag of the timer or None if no timeout."""
if self.args.timeout_secs:
return self.ui.StartFailingCountdownTimer(self.args.timeout_secs)
return None
def _UploadLaneMargining(self, log_result: dict):
"""Uploads the result of lane margining."""
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
csv_entries = [device_data.GetSerialNumber(), timestamp]
csv_entries.extend(log_result[key] for key in self.LOG_KEYS)
self.ui.SetState(_('Trying to check server protocol...'))
try:
server = server_proxy.GetServerProxy(timeout=5)
server.Ping()
server.UploadCSVEntry(self.LOG_GROUP_NAME, csv_entries)
except server_proxy.Fault:
messages = 'Server fault %s' % server_proxy.GetServerURL()
logging.exception(messages)
self._errors.append(messages)
except Exception:
messages = 'Unable to sync with server %s' % server_proxy.GetServerURL()
logging.exception(messages)
self._errors.append(messages)
with self._group_checker:
for key, value in log_result.items():
testlog.LogParam(key, value)
def _WaitMuxInfoBecomingTBT(self):
"""Waits until Mux info becomes TBT=1."""
stop_timer = self._GetUITimer()
self.ui.SetState(_('Insert the loopback card.'))
sync_utils.WaitFor(self._CheckMuxinfo, self.args.timeout_secs,
poll_interval=0.5)
if stop_timer:
stop_timer.set()
def _WaitForLoopbackCardInsertion(self):
"""Waits until device node appears."""
stop_timer = self._GetUITimer()
self.ui.SetState(_('Insert the loopback card.'))
device_path = sync_utils.WaitFor(self._FindLoopbackPath,
self.args.timeout_secs, poll_interval=0.5)
match = _RE_ADP_DOMAIN.match(device_path)
if not match:
raise Exception('device_path is not in expected format.')
adapter = match.group('adapter')
domain = match.group('domain')
session.console.info('The ADP is at %r, domain is %r.', adapter, domain)
if stop_timer:
stop_timer.set()
return device_path, domain, adapter
def _WaitForLoopbackCardRemoval(self, device_path):
"""Waits until device node disappears."""
stop_timer = self._GetUITimer()
self.ui.SetState(_('Remove the loopback card.'))
sync_utils.WaitFor(lambda: not self._dut.path.exists(device_path),
self.args.timeout_secs, poll_interval=0.5)
if stop_timer:
stop_timer.set()
def _TestDMA(self, device_path):
"""Performs DMA test."""
stop_timer = self._GetUITimer()
self.ui.SetState(_('Test is in progress, please do not move the device.'))
session.console.info('The loopback card path is at %r.', device_path)
device_test_path = self._dut.path.join(device_path, _DMA_TEST)
# Configure the test
self._LogAndWriteFile(
self._dut.path.join(device_test_path, 'speed'),
ENCODE_LINK_SPEED[self.args.expected_link_speed])
self._LogAndWriteFile(
self._dut.path.join(device_test_path, 'lanes'),
ENCODE_LINK_WIDTH[self.args.expected_link_width])
self._LogAndWriteFile(
self._dut.path.join(device_test_path, 'packets_to_send'),
str(self.args.packets_to_send))
self._LogAndWriteFile(
self._dut.path.join(device_test_path, 'packets_to_receive'),
str(self.args.packets_to_receive))
# Run the test.
self._LogAndWriteFile(self._dut.path.join(device_test_path, 'test'), '1')
if stop_timer:
stop_timer.set()
# Check the result.
status_path = self._dut.path.join(device_test_path, 'status')
logging.info('cat %s', status_path)
output = self._dut.ReadFile(status_path)
logging.info('output:\n%s', output)
match = _RE_STATUS.match(output)
if not match:
self._errors.append('Output format of status is changed.')
result = match.group(1)
if result == 'success':
return
if result in ('fail', 'failed', 'not run'):
self._errors.append('result: %s' % result)
else:
self._errors.append('Unknown result: %r' % result)
def runTest(self):
self._WaitMuxInfoBecomingTBT()
if self.args.check_muxinfo_only:
self.PassTask()
if self.args.load_module:
# Fail the test if the module doesn't exist.
self._dut.CheckCall(['modinfo', _TEST_MODULE])
# If the module is loaded before the test then do not remove it.
loaded = self._dut.Call(['modprobe', '--first-time', _TEST_MODULE],
log=True)
self._remove_module = not loaded
device_path, domain, adapter = self._WaitForLoopbackCardInsertion()
self._TestDMA(device_path)
if self.args.lane_margining:
log_result = self._TestLaneMargining(domain, adapter)
self._UploadLaneMargining(log_result)
if self.args.check_card_removal:
self._WaitForLoopbackCardRemoval(device_path)
if self._errors:
self.FailTask('\n'.join(self._errors))
| # Copyright 2021 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests thunderbolt port with a loopback card.
Description
-----------
Verifies the thunderbolt port with a loopback card.
Test Procedure
--------------
1. Operator inserts the loopback card.
2. The tool sends payloads to the loopback card.
3. The tool receives payloads from the loopback card and checks correctness.
4. The tool collects lane margining data and uploads it to server.
5. Operator removes the loopback card.
Dependency
----------
- Loopback card driver.
- tdtl tool if we want to test lane margining.
- Write serial number to device data before the test for data collecting.
- The DUT must be able to connect factory server when running the test.
Examples
--------
The minimal working example::
{
"pytest_name": "thunderbolt_loopback",
"args": {
"usbpd_spec": {
"port": 0
}
}
}
Test specific controller and test lane margining with 60 seconds timeout::
{
"pytest_name": "thunderbolt_loopback"
"args": {
"usbpd_spec": {
"port": 0
},
"timeout_secs": 60,
"controller_port": "0-1.*",
"lane_margining": true
}
}
Test controller 0-3 with CC1 port 1 with 60 seconds timeout::
{
"pytest_name": "thunderbolt_loopback"
"args": {
"usbpd_spec": {
"port": 1,
"polarity": 1
},
"timeout_secs": 60,
"controller_port": "0-3.*"
}
}
"""
import logging
import os
import re
import subprocess
import time
from cros.factory.device import device_utils
from cros.factory.device import usb_c
from cros.factory.test import device_data
from cros.factory.test.env import paths
from cros.factory.test.i18n import _
from cros.factory.testlog import testlog
from cros.factory.test import server_proxy
from cros.factory.test import session
from cros.factory.test import test_case
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils import sync_utils
from cros.factory.utils import type_utils
_LOOPBACK_TEST_PATH = '/sys/kernel/debug/thunderbolt'
_CONTROLLER_PORTS = ('0-1.*', '0-3.*', '1-1.*', '1-3.*')
_RE_ADP_DOMAIN = re.compile(r'^.*(?P<domain>\d+)-(?P<adapter>\d+)\.\d+$')
_RE_MARGIN_LOOPBACK = re.compile(
r'(RT\d+ L\d+ )(BOTTOM|LEFT),(TOP|RIGHT) = (\d+),(\d+)')
_DMA_TEST = 'dma_test'
_TEST_MODULE = 'thunderbolt_dma_test'
LINK_WIDTH_TYPE = type_utils.Enum(['Single', 'Dual'])
LINK_SPEED_TYPE = type_utils.Enum(['Slow', 'Fast'])
ENCODE_LINK_WIDTH = {
LINK_WIDTH_TYPE.Single: '1',
LINK_WIDTH_TYPE.Dual: '2'
}
ENCODE_LINK_SPEED = {
LINK_SPEED_TYPE.Slow: '10',
LINK_SPEED_TYPE.Fast: '20'
}
_RE_STATUS = re.compile(r'^result: (.+)\n(?:.|\n)*$')
_CARD_STATE = type_utils.Enum(['Absent', 'Multiple', 'Wrong'])
_TDTL_PATH = os.path.join(paths.FACTORY_DIR, 'tdtl-master')
class ThunderboltLoopbackTest(test_case.TestCase):
"""Thunderbolt loopback card factory test."""
LOG_GROUP_NAME = 'usb4_lane_margining_log'
LOG_KEYS = [
'DOMAIN',
'ADP',
'RT1 L0 BOTTOM',
'RT1 L0 TOP',
'RT1 L0 LEFT',
'RT1 L0 RIGHT',
'RT1 L1 BOTTOM',
'RT1 L1 TOP',
'RT1 L1 LEFT',
'RT1 L1 RIGHT',
'RT2 L0 BOTTOM',
'RT2 L0 TOP',
'RT2 L0 LEFT',
'RT2 L0 RIGHT',
'RT2 L1 BOTTOM',
'RT2 L1 TOP',
'RT2 L1 LEFT',
'RT2 L1 RIGHT',
]
ARGS = [
Arg('timeout_secs', int, 'Timeout value for the test.', default=None),
Arg('expected_link_speed', LINK_SPEED_TYPE, 'Link speed.',
default=LINK_SPEED_TYPE.Fast),
Arg('expected_link_width', LINK_WIDTH_TYPE, 'Link width.',
default=LINK_WIDTH_TYPE.Dual),
Arg('packets_to_send', int, 'Amount of packets to be sent.',
default=1000),
Arg('packets_to_receive', int, 'Amount of packets to be received.',
default=1000),
Arg('debugfs_path', str, 'The path of debugfs to test.', default=None),
Arg('controller_port', str, 'The name of the controller port to test.',
default=None),
Arg('usbpd_spec', dict,
('A dict which must contain "port" and optionally specify "polarity".'
' For example, `{"port": 1, "polarity": 1}`.'),
schema=usb_c.USB_PD_SPEC_SCHEMA, _transform=usb_c.MigrateUSBPDSpec),
Arg('load_module', bool, 'Load test module.', default=True),
Arg('check_muxinfo_only', bool, 'Check muxinfo only.', default=False),
Arg('lane_margining', bool, 'Collet lane margining data.', default=False),
Arg('lane_margining_timeout_secs', (int, float),
'Timeout for colleting lane margining data.', default=10),
Arg('check_card_removal', bool,
'If set, require removing the card after DMA test.', default=True),
]
def setUp(self):
self.ui.ToggleTemplateClass('font-large', True)
self._dut = device_utils.CreateDUTInterface()
self._usbpd_port = self.args.usbpd_spec['port']
self._usbpd_polarity = {
1: 'NORMAL',
2: 'INVERTED'
}.get(self.args.usbpd_spec.get('polarity'))
self._remove_module = False
self._card_state = None
self._muxinfo = {}
self._first_typec_control = True
self._first_check_mux_info = True
self._group_checker = None
if self.args.lane_margining:
# Group checker and details for Testlog.
self._group_checker = testlog.GroupParam(self.LOG_GROUP_NAME,
self.LOG_KEYS)
testlog.UpdateParam('ADP', param_type=testlog.PARAM_TYPE.argument)
testlog.UpdateParam('DOMAIN', param_type=testlog.PARAM_TYPE.argument)
self._errors = []
def tearDown(self):
if self._remove_module:
self._dut.CheckCall(['modprobe', '-r', _TEST_MODULE], log=True)
def _GlobLoopbackPath(self, controller_ports):
devices = []
for name in controller_ports:
device_path = self._dut.path.join(_LOOPBACK_TEST_PATH, name, _DMA_TEST)
devices.extend(
self._dut.path.dirname(path) for path in self._dut.Glob(device_path))
return devices
def _SetCardState(self, state):
if self._card_state == state:
return False
self._card_state = state
return True
def _SendTypecControl(self):
"""Send typeccontrol control command."""
_first_typec_control = self._first_typec_control
self._first_typec_control = False
mode = {
'DP': '0',
'TBT': '1',
'USB4': '2'
}
try:
self._dut.CheckCall(
['ectool', 'typeccontrol',
str(self._usbpd_port), '2', mode['TBT']], log=_first_typec_control)
except Exception:
pass
def _CheckMuxinfo(self):
"""Returns True if TBT=1."""
fail_tag = 'GetPDMuxInfo'
_first_check_mux_info = self._first_check_mux_info
self._first_check_mux_info = False
try:
outputs = self._dut.usb_c.GetPDMuxInfo(self._usbpd_port,
log=_first_check_mux_info)
except Exception:
if self._muxinfo.get(fail_tag) != 1:
logging.exception('%s failed', fail_tag)
self.ui.SetState(_('Please unplug and replug.'))
self._muxinfo = {
fail_tag: 1
}
return False
else:
if self._muxinfo != outputs:
logging.info('%s %r', fail_tag, outputs)
self.ui.SetState(
'Port %d<br>%s %r' % (self._usbpd_port, fail_tag, outputs))
self._muxinfo = outputs
if self._usbpd_polarity:
if outputs['POLARITY'] != self._usbpd_polarity:
self.ui.SetInstruction(
_('Wrong USB side, please flip over {media}.',
media='Loopback card'))
return False
self.ui.SetInstruction('')
if outputs['TBT']:
return True
if outputs['USB']:
self._SendTypecControl()
return False
def _FindLoopbackPath(self):
if self.args.debugfs_path:
if self._dut.path.exists(self.args.debugfs_path):
return self.args.debugfs_path
if self._SetCardState(_CARD_STATE.Absent):
logging.info('No loopback card exists.')
return None
controller_ports = set([self.args.controller_port] if self.args
.controller_port else _CONTROLLER_PORTS)
devices = self._GlobLoopbackPath(controller_ports)
if len(devices) > 1:
if self._SetCardState(_CARD_STATE.Multiple):
self.ui.SetState(_('Do not insert more than one loopback card.'))
logging.info('Multiple loopback cards exist: %r. controller_ports: %r',
devices, controller_ports)
return None
wrong_controller_ports = set(_CONTROLLER_PORTS) - controller_ports
wrong_devices = self._GlobLoopbackPath(wrong_controller_ports)
if wrong_devices:
if self._SetCardState(_CARD_STATE.Wrong):
self.ui.SetState(
_('The loopback card is inserted into the wrong port.'))
logging.info(('Wrong loopback cards exist: %r. '
'wrong_controller_ports: %r'), wrong_devices,
wrong_controller_ports)
return None
if not devices:
if self._SetCardState(_CARD_STATE.Absent):
self.ui.SetState(_('Insert the loopback card.'))
logging.info('No loopback card exists. controller_ports: %r',
controller_ports)
return None
return devices[0]
def _LogAndWriteFile(self, filename, content):
logging.info('echo %s > %s', content, filename)
self._dut.WriteFile(filename, content)
def _TestLaneMargining(self, domain: str, adapter: str):
"""Uses tdtl tool to collect lane margining data.
Args:
domain: A string we pass to tdtl tool.
adapter: A string we pass to tdtl tool.
Returns:
log_result: A dict to save the result.
"""
session.console.info('Start collecting lane margining data.')
# Log 0 when failed.
# Log -1 when timeout.
log_result = dict.fromkeys(self.LOG_KEYS, None)
log_result.update({
'ADP': int(adapter),
'DOMAIN': int(domain),
})
# self._dut.CheckOutput do not support env and timeout
# process_utils.Spawn do not support timeout
cmd = [
'cli.py', 'margin_loopback', '-d', domain, '-a', adapter, '-r', '0',
'-i', '1'
]
env = {
'ADP': adapter,
'LC_ALL': 'en_US.utf-8',
}
logging.info('env: %r, cmd: %r, cwd: %r', env, cmd, _TDTL_PATH)
stop_timer = self.ui.StartCountdownTimer(
self.args.lane_margining_timeout_secs)
try:
result = subprocess.run(cmd, env=env, cwd=_TDTL_PATH,
timeout=self.args.lane_margining_timeout_secs,
encoding='utf-8', stdout=subprocess.PIPE,
check=False)
except subprocess.TimeoutExpired:
logging.exception('_TestLaneMargining timeout')
self._errors.append('_TestLaneMargining timeout')
for key, value in log_result.items():
if value is None:
log_result[key] = -1
return log_result
finally:
stop_timer.set()
try:
logging.info('stdout:\n%s', result.stdout)
result.check_returncode()
except Exception:
logging.exception('_TestLaneMargining failed')
self._errors.append('_TestLaneMargining failed')
for key, value in log_result.items():
if value is None:
log_result[key] = 0
# The output of `cli.py margin_loopback` looks like below.
#
# RT1 L0 BOTTOM,TOP = 56,54
# RT2 L0 BOTTOM,TOP = 56,62
# RT1 L0 LEFT,RIGHT = 20,17
# RT2 L0 LEFT,RIGHT = 22,24
# RT1 L1 BOTTOM,TOP = 62,70
# RT2 L1 BOTTOM,TOP = 60,68
# RT1 L1 LEFT,RIGHT = 21,22
# RT2 L1 LEFT,RIGHT = 17,16
for line in result.stdout.splitlines():
match = _RE_MARGIN_LOOPBACK.match(line)
if not match:
continue
for index in range(2, 4):
log_result.update(
{match.group(1) + match.group(index): int(match.group(index + 2))})
return log_result
def _GetUITimer(self):
"""Returns the stop event flag of the timer or None if no timeout."""
if self.args.timeout_secs:
return self.ui.StartFailingCountdownTimer(self.args.timeout_secs)
return None
def _UploadLaneMargining(self, log_result: dict):
"""Uploads the result of lane margining."""
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
csv_entries = [device_data.GetSerialNumber(), timestamp]
csv_entries.extend(log_result[key] for key in self.LOG_KEYS)
self.ui.SetState(_('Trying to check server protocol...'))
try:
server = server_proxy.GetServerProxy(timeout=5)
server.Ping()
server.UploadCSVEntry(self.LOG_GROUP_NAME, csv_entries)
except server_proxy.Fault:
messages = 'Server fault %s' % server_proxy.GetServerURL()
logging.exception(messages)
self._errors.append(messages)
except Exception:
messages = 'Unable to sync with server %s' % server_proxy.GetServerURL()
logging.exception(messages)
self._errors.append(messages)
with self._group_checker:
for key, value in log_result.items():
testlog.LogParam(key, value)
def _WaitMuxInfoBecomingTBT(self):
"""Waits until Mux info becomes TBT=1."""
stop_timer = self._GetUITimer()
self.ui.SetState(_('Insert the loopback card.'))
sync_utils.WaitFor(self._CheckMuxinfo, self.args.timeout_secs,
poll_interval=0.5)
if stop_timer:
stop_timer.set()
def _WaitForLoopbackCardInsertion(self):
"""Waits until device node appears."""
stop_timer = self._GetUITimer()
self.ui.SetState(_('Insert the loopback card.'))
device_path = sync_utils.WaitFor(self._FindLoopbackPath,
self.args.timeout_secs, poll_interval=0.5)
match = _RE_ADP_DOMAIN.match(device_path)
if not match:
raise Exception('device_path is not in expected format.')
adapter = match.group('adapter')
domain = match.group('domain')
session.console.info('The ADP is at %r, domain is %r.', adapter, domain)
if stop_timer:
stop_timer.set()
return device_path, domain, adapter
def _WaitForLoopbackCardRemoval(self, device_path):
"""Waits until device node disappears."""
stop_timer = self._GetUITimer()
self.ui.SetState(_('Remove the loopback card.'))
sync_utils.WaitFor(lambda: not self._dut.path.exists(device_path),
self.args.timeout_secs, poll_interval=0.5)
if stop_timer:
stop_timer.set()
def _TestDMA(self, device_path):
"""Performs DMA test."""
stop_timer = self._GetUITimer()
self.ui.SetState(_('Test is in progress, please do not move the device.'))
session.console.info('The loopback card path is at %r.', device_path)
device_test_path = self._dut.path.join(device_path, _DMA_TEST)
# Configure the test
self._LogAndWriteFile(
self._dut.path.join(device_test_path, 'speed'),
ENCODE_LINK_SPEED[self.args.expected_link_speed])
self._LogAndWriteFile(
self._dut.path.join(device_test_path, 'lanes'),
ENCODE_LINK_WIDTH[self.args.expected_link_width])
self._LogAndWriteFile(
self._dut.path.join(device_test_path, 'packets_to_send'),
str(self.args.packets_to_send))
self._LogAndWriteFile(
self._dut.path.join(device_test_path, 'packets_to_receive'),
str(self.args.packets_to_receive))
# Run the test.
self._LogAndWriteFile(self._dut.path.join(device_test_path, 'test'), '1')
if stop_timer:
stop_timer.set()
# Check the result.
status_path = self._dut.path.join(device_test_path, 'status')
logging.info('cat %s', status_path)
output = self._dut.ReadFile(status_path)
logging.info('output:\n%s', output)
match = _RE_STATUS.match(output)
if not match:
self._errors.append('Output format of status is changed.')
result = match.group(1)
if result == 'success':
return
if result in ('fail', 'failed', 'not run'):
self._errors.append('result: %s' % result)
else:
self._errors.append('Unknown result: %r' % result)
def runTest(self):
self._WaitMuxInfoBecomingTBT()
if self.args.check_muxinfo_only:
self.PassTask()
if self.args.load_module:
# Fail the test if the module doesn't exist.
self._dut.CheckCall(['modinfo', _TEST_MODULE])
# If the module is loaded before the test then do not remove it.
loaded = self._dut.Call(['modprobe', '--first-time', _TEST_MODULE],
log=True)
self._remove_module = not loaded
device_path, domain, adapter = self._WaitForLoopbackCardInsertion()
self._TestDMA(device_path)
if self.args.lane_margining:
log_result = self._TestLaneMargining(domain, adapter)
self._UploadLaneMargining(log_result)
if self.args.check_card_removal:
self._WaitForLoopbackCardRemoval(device_path)
if self._errors:
self.FailTask('\n'.join(self._errors))
| en | 0.631146 | # Copyright 2021 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Tests thunderbolt port with a loopback card. Description ----------- Verifies the thunderbolt port with a loopback card. Test Procedure -------------- 1. Operator inserts the loopback card. 2. The tool sends payloads to the loopback card. 3. The tool receives payloads from the loopback card and checks correctness. 4. The tool collects lane margining data and uploads it to server. 5. Operator removes the loopback card. Dependency ---------- - Loopback card driver. - tdtl tool if we want to test lane margining. - Write serial number to device data before the test for data collecting. - The DUT must be able to connect factory server when running the test. Examples -------- The minimal working example:: { "pytest_name": "thunderbolt_loopback", "args": { "usbpd_spec": { "port": 0 } } } Test specific controller and test lane margining with 60 seconds timeout:: { "pytest_name": "thunderbolt_loopback" "args": { "usbpd_spec": { "port": 0 }, "timeout_secs": 60, "controller_port": "0-1.*", "lane_margining": true } } Test controller 0-3 with CC1 port 1 with 60 seconds timeout:: { "pytest_name": "thunderbolt_loopback" "args": { "usbpd_spec": { "port": 1, "polarity": 1 }, "timeout_secs": 60, "controller_port": "0-3.*" } } Thunderbolt loopback card factory test. # Group checker and details for Testlog. Send typeccontrol control command. Returns True if TBT=1. Uses tdtl tool to collect lane margining data. Args: domain: A string we pass to tdtl tool. adapter: A string we pass to tdtl tool. Returns: log_result: A dict to save the result. # Log 0 when failed. # Log -1 when timeout. # self._dut.CheckOutput do not support env and timeout # process_utils.Spawn do not support timeout # The output of `cli.py margin_loopback` looks like below. # # RT1 L0 BOTTOM,TOP = 56,54 # RT2 L0 BOTTOM,TOP = 56,62 # RT1 L0 LEFT,RIGHT = 20,17 # RT2 L0 LEFT,RIGHT = 22,24 # RT1 L1 BOTTOM,TOP = 62,70 # RT2 L1 BOTTOM,TOP = 60,68 # RT1 L1 LEFT,RIGHT = 21,22 # RT2 L1 LEFT,RIGHT = 17,16 Returns the stop event flag of the timer or None if no timeout. Uploads the result of lane margining. Waits until Mux info becomes TBT=1. Waits until device node appears. Waits until device node disappears. Performs DMA test. # Configure the test # Run the test. # Check the result. # Fail the test if the module doesn't exist. # If the module is loaded before the test then do not remove it. | 2.454435 | 2 |
archiv/migrations/0013_hapaplacename_alternative_names.py | acdh-oeaw/hapa | 0 | 6617785 | <gh_stars>0
# Generated by Django 3.1.6 on 2021-04-08 10:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archiv', '0012_auto_20210408_1035'),
]
operations = [
migrations.AddField(
model_name='hapaplacename',
name='alternative_names',
field=models.TextField(blank=True, help_text="Alternative Namen, verwende '; ' als Trennzeichen", verbose_name='Alternative Namen'),
),
]
| # Generated by Django 3.1.6 on 2021-04-08 10:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archiv', '0012_auto_20210408_1035'),
]
operations = [
migrations.AddField(
model_name='hapaplacename',
name='alternative_names',
field=models.TextField(blank=True, help_text="Alternative Namen, verwende '; ' als Trennzeichen", verbose_name='Alternative Namen'),
),
] | en | 0.812333 | # Generated by Django 3.1.6 on 2021-04-08 10:46 | 1.542982 | 2 |
model_multilabel_mprover.py | swarnaHub/multiPRover | 6 | 6617786 | from pytorch_transformers import BertPreTrainedModel, RobertaConfig, \
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, RobertaModel
from pytorch_transformers.modeling_roberta import RobertaClassificationHead
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
import torch
import torch.nn as nn
import numpy as np
from scipy.optimize import linear_sum_assignment
class RobertaForRR(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForRR, self).__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.classifier = RobertaClassificationHead(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, position_ids=None,
head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:]
if labels is not None:
loss_fct = CrossEntropyLoss()
qa_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (qa_loss,) + outputs
return outputs # qa_loss, logits, (hidden_states), (attentions)
class NodeClassificationHead(nn.Module):
def __init__(self, config, num_proof):
super(NodeClassificationHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, num_proof)
def forward(self, features, **kwargs):
x = self.dropout(features)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class EdgeClassificationHead(nn.Module):
def __init__(self, config, num_proof):
super(EdgeClassificationHead, self).__init__()
self.dense = nn.Linear(3 * config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, num_proof)
def forward(self, features, **kwargs):
x = self.dropout(features)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class RobertaForRRMMultilabelMprover(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config, num_proof):
super(RobertaForRRMMultilabelMprover, self).__init__(config)
self.num_labels = config.num_labels
self.num_proof = num_proof
self.roberta = RobertaModel(config)
self.naf_layer = nn.Linear(config.hidden_size, config.hidden_size)
self.classifier = RobertaClassificationHead(config)
self.classifier_node = NodeClassificationHead(config, num_proof=num_proof)
self.classifier_edge = EdgeClassificationHead(config, num_proof=num_proof)
self.apply(self.init_weights)
def _get_hungarian_loss(self, loss_map):
cost_matrix = np.zeros((self.num_proof, self.num_proof))
for i in range(self.num_proof):
for j in range(self.num_proof):
cost_matrix[i][j] = loss_map[(i, j)]
row_ind, col_ind = linear_sum_assignment(cost_matrix)
hungarian_loss = None
for (pred_id, gold_id) in zip(row_ind, col_ind):
if hungarian_loss is None:
hungarian_loss = loss_map[(pred_id, gold_id)]
else:
hungarian_loss += loss_map[(pred_id, gold_id)]
return hungarian_loss
def forward(self, input_ids, token_type_ids=None, attention_mask=None, proof_offset=None, node_label=None,
edge_label=None, labels=None, proof_count=None, position_ids=None, head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
cls_output = sequence_output[:, 0, :]
naf_output = self.naf_layer(cls_output)
logits = self.classifier(sequence_output)
max_node_length = node_label.shape[1]
max_edge_length = edge_label.shape[1]
batch_size = node_label.shape[0]
embedding_dim = sequence_output.shape[2]
batch_node_embedding = torch.zeros((batch_size, max_node_length, embedding_dim)).to("cuda")
batch_edge_embedding = torch.zeros((batch_size, max_edge_length, 3 * embedding_dim)).to("cuda")
for batch_index in range(batch_size):
prev_index = 1
sample_node_embedding = None
count = 0
for offset in proof_offset[batch_index]:
if offset == 0:
break
else:
rf_embedding = torch.mean(sequence_output[batch_index, prev_index:(offset + 1), :],
dim=0).unsqueeze(0)
prev_index = offset + 1
count += 1
if sample_node_embedding is None:
sample_node_embedding = rf_embedding
else:
sample_node_embedding = torch.cat((sample_node_embedding, rf_embedding), dim=0)
# Add the NAF output at the end
sample_node_embedding = torch.cat((sample_node_embedding, naf_output[batch_index].unsqueeze(0)), dim=0)
repeat1 = sample_node_embedding.unsqueeze(0).repeat(len(sample_node_embedding), 1, 1)
repeat2 = sample_node_embedding.unsqueeze(1).repeat(1, len(sample_node_embedding), 1)
sample_edge_embedding = torch.cat((repeat1, repeat2, (repeat1 - repeat2)), dim=2)
sample_edge_embedding = sample_edge_embedding.view(-1, sample_edge_embedding.shape[-1])
# Append 0s at the end (these will be ignored for loss)
sample_node_embedding = torch.cat((sample_node_embedding,
torch.zeros((max_node_length - count - 1, embedding_dim)).to("cuda")),
dim=0)
sample_edge_embedding = torch.cat((sample_edge_embedding,
torch.zeros((max_edge_length - len(sample_edge_embedding),
3 * embedding_dim)).to("cuda")), dim=0)
batch_node_embedding[batch_index, :, :] = sample_node_embedding
batch_edge_embedding[batch_index, :, :] = sample_edge_embedding
node_logits = self.classifier_node(batch_node_embedding)
edge_logits = self.classifier_edge(batch_edge_embedding)
outputs = (logits, node_logits, edge_logits) + outputs[2:]
if labels is not None:
qa_loss_fct = CrossEntropyLoss()
qa_loss = qa_loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
proof_loss = None
proof_loss_fct = BCEWithLogitsLoss()
for batch_index in range(batch_size):
loss_map = {}
sample_node_logits = node_logits[batch_index, :, :]
sample_edge_logits = edge_logits[batch_index, :, :]
sample_node_label = node_label[batch_index, :, :]
sample_edge_label = edge_label[batch_index, :, :]
for i in range(self.num_proof):
for j in range(self.num_proof):
temp_node_logits = sample_node_logits[:, i]
temp_node_label = sample_node_label.double()[:, j]
temp_node_logits = temp_node_logits[(temp_node_label != -100.)]
temp_node_label = temp_node_label[(temp_node_label != -100.)]
temp_edge_logits = sample_edge_logits[:, i]
temp_edge_label = sample_edge_label.double()[:, j]
temp_edge_logits = temp_edge_logits[(temp_edge_label != -100.)]
temp_edge_label = temp_edge_label[(temp_edge_label != -100.)]
if temp_edge_label.shape[0] != 0:
loss_map[(i, j)] = proof_loss_fct(temp_node_logits, temp_node_label) \
+ proof_loss_fct(temp_edge_logits, temp_edge_label)
else:
loss_map[(i, j)] = proof_loss_fct(temp_node_logits, temp_node_label)
hungarian_loss = self._get_hungarian_loss(loss_map)
if proof_loss is None:
proof_loss = hungarian_loss
else:
proof_loss += hungarian_loss
outputs = (qa_loss+proof_loss, qa_loss, proof_loss) + outputs
return outputs # (total_loss), qa_loss, proof_loss, logits, node_logits, edge_logits, (hidden_states), (attentions)
| from pytorch_transformers import BertPreTrainedModel, RobertaConfig, \
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, RobertaModel
from pytorch_transformers.modeling_roberta import RobertaClassificationHead
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
import torch
import torch.nn as nn
import numpy as np
from scipy.optimize import linear_sum_assignment
class RobertaForRR(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForRR, self).__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.classifier = RobertaClassificationHead(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, position_ids=None,
head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:]
if labels is not None:
loss_fct = CrossEntropyLoss()
qa_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (qa_loss,) + outputs
return outputs # qa_loss, logits, (hidden_states), (attentions)
class NodeClassificationHead(nn.Module):
def __init__(self, config, num_proof):
super(NodeClassificationHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, num_proof)
def forward(self, features, **kwargs):
x = self.dropout(features)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class EdgeClassificationHead(nn.Module):
def __init__(self, config, num_proof):
super(EdgeClassificationHead, self).__init__()
self.dense = nn.Linear(3 * config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, num_proof)
def forward(self, features, **kwargs):
x = self.dropout(features)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class RobertaForRRMMultilabelMprover(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config, num_proof):
super(RobertaForRRMMultilabelMprover, self).__init__(config)
self.num_labels = config.num_labels
self.num_proof = num_proof
self.roberta = RobertaModel(config)
self.naf_layer = nn.Linear(config.hidden_size, config.hidden_size)
self.classifier = RobertaClassificationHead(config)
self.classifier_node = NodeClassificationHead(config, num_proof=num_proof)
self.classifier_edge = EdgeClassificationHead(config, num_proof=num_proof)
self.apply(self.init_weights)
def _get_hungarian_loss(self, loss_map):
cost_matrix = np.zeros((self.num_proof, self.num_proof))
for i in range(self.num_proof):
for j in range(self.num_proof):
cost_matrix[i][j] = loss_map[(i, j)]
row_ind, col_ind = linear_sum_assignment(cost_matrix)
hungarian_loss = None
for (pred_id, gold_id) in zip(row_ind, col_ind):
if hungarian_loss is None:
hungarian_loss = loss_map[(pred_id, gold_id)]
else:
hungarian_loss += loss_map[(pred_id, gold_id)]
return hungarian_loss
def forward(self, input_ids, token_type_ids=None, attention_mask=None, proof_offset=None, node_label=None,
edge_label=None, labels=None, proof_count=None, position_ids=None, head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
cls_output = sequence_output[:, 0, :]
naf_output = self.naf_layer(cls_output)
logits = self.classifier(sequence_output)
max_node_length = node_label.shape[1]
max_edge_length = edge_label.shape[1]
batch_size = node_label.shape[0]
embedding_dim = sequence_output.shape[2]
batch_node_embedding = torch.zeros((batch_size, max_node_length, embedding_dim)).to("cuda")
batch_edge_embedding = torch.zeros((batch_size, max_edge_length, 3 * embedding_dim)).to("cuda")
for batch_index in range(batch_size):
prev_index = 1
sample_node_embedding = None
count = 0
for offset in proof_offset[batch_index]:
if offset == 0:
break
else:
rf_embedding = torch.mean(sequence_output[batch_index, prev_index:(offset + 1), :],
dim=0).unsqueeze(0)
prev_index = offset + 1
count += 1
if sample_node_embedding is None:
sample_node_embedding = rf_embedding
else:
sample_node_embedding = torch.cat((sample_node_embedding, rf_embedding), dim=0)
# Add the NAF output at the end
sample_node_embedding = torch.cat((sample_node_embedding, naf_output[batch_index].unsqueeze(0)), dim=0)
repeat1 = sample_node_embedding.unsqueeze(0).repeat(len(sample_node_embedding), 1, 1)
repeat2 = sample_node_embedding.unsqueeze(1).repeat(1, len(sample_node_embedding), 1)
sample_edge_embedding = torch.cat((repeat1, repeat2, (repeat1 - repeat2)), dim=2)
sample_edge_embedding = sample_edge_embedding.view(-1, sample_edge_embedding.shape[-1])
# Append 0s at the end (these will be ignored for loss)
sample_node_embedding = torch.cat((sample_node_embedding,
torch.zeros((max_node_length - count - 1, embedding_dim)).to("cuda")),
dim=0)
sample_edge_embedding = torch.cat((sample_edge_embedding,
torch.zeros((max_edge_length - len(sample_edge_embedding),
3 * embedding_dim)).to("cuda")), dim=0)
batch_node_embedding[batch_index, :, :] = sample_node_embedding
batch_edge_embedding[batch_index, :, :] = sample_edge_embedding
node_logits = self.classifier_node(batch_node_embedding)
edge_logits = self.classifier_edge(batch_edge_embedding)
outputs = (logits, node_logits, edge_logits) + outputs[2:]
if labels is not None:
qa_loss_fct = CrossEntropyLoss()
qa_loss = qa_loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
proof_loss = None
proof_loss_fct = BCEWithLogitsLoss()
for batch_index in range(batch_size):
loss_map = {}
sample_node_logits = node_logits[batch_index, :, :]
sample_edge_logits = edge_logits[batch_index, :, :]
sample_node_label = node_label[batch_index, :, :]
sample_edge_label = edge_label[batch_index, :, :]
for i in range(self.num_proof):
for j in range(self.num_proof):
temp_node_logits = sample_node_logits[:, i]
temp_node_label = sample_node_label.double()[:, j]
temp_node_logits = temp_node_logits[(temp_node_label != -100.)]
temp_node_label = temp_node_label[(temp_node_label != -100.)]
temp_edge_logits = sample_edge_logits[:, i]
temp_edge_label = sample_edge_label.double()[:, j]
temp_edge_logits = temp_edge_logits[(temp_edge_label != -100.)]
temp_edge_label = temp_edge_label[(temp_edge_label != -100.)]
if temp_edge_label.shape[0] != 0:
loss_map[(i, j)] = proof_loss_fct(temp_node_logits, temp_node_label) \
+ proof_loss_fct(temp_edge_logits, temp_edge_label)
else:
loss_map[(i, j)] = proof_loss_fct(temp_node_logits, temp_node_label)
hungarian_loss = self._get_hungarian_loss(loss_map)
if proof_loss is None:
proof_loss = hungarian_loss
else:
proof_loss += hungarian_loss
outputs = (qa_loss+proof_loss, qa_loss, proof_loss) + outputs
return outputs # (total_loss), qa_loss, proof_loss, logits, node_logits, edge_logits, (hidden_states), (attentions)
| en | 0.779755 | # qa_loss, logits, (hidden_states), (attentions) # Add the NAF output at the end # Append 0s at the end (these will be ignored for loss) # (total_loss), qa_loss, proof_loss, logits, node_logits, edge_logits, (hidden_states), (attentions) | 2.176196 | 2 |
myblog/branch/works.py | HkangHero/Student-information | 0 | 6617787 | <gh_stars>0
from django.shortcuts import render,HttpResponse
from branch import models
from branch.models import bill,Cash,Teacher
from branch.views import Password_module
import requests
from bs4 import BeautifulSoup
from branch.teacher import return_id
#义工通知查询 报名
#创建义工发布单
def teacher_work(request):
if request.method=='POST':
token=request.POST.get('token')#获取老师token
title=request.POST.get('title')#内容标题
demand=request.POST.get('demand')#男/女/无要求
Time=request.POST.get('time')
work_time=request.POST.get('work_time')
Impatient=request.POST.get('Impatient')
photos=request.POST.get('photos')
address=request.POST.get('address')
phone_number=request.POST.get('phone_number')
peoples=request.POST.get('peoples')
teacher_id=return_id(token)
if teacher_id == 'error':
return HttpResponse('error')
work_tepy=models.Teacher.objects.filter(teacher_id=teacher_id)[0].teacher_work
models.bill.objects.create(title=title,demand=demand,time=Time,
work_tepy=work_tepy,work_time=work_time,Impatient=Impatient,photos=photos,
address=address,phone_number=phone_number,peoples=int(peoples),teacher_name=teacher_id)
##以后加订阅号通知
return HttpResponse('ok')
#查找出今天或者今天以后的招募义工返回json
#没有便返回None
#bill与Case之间进行对比 如果该学生已经报名了 那么在界面上应该变成灰色 不可报名 正常则是可以报名
#
import time
import json
from django.db.models import F #排序
def find_work(request):
if request.method=='GET':
token=request.GET.get('token')
stu_id=return_id(token)
if stu_id == 'error':
return HttpResponse('error')
date=str(time.strftime("%Y-%m-%d", time.localtime()))#获取当时的日期
#筛选出大于等于日期 并且人数没有报完。
content=[]
is_null=models.bill.objects.filter(time__gte=date).order_by(F('time')) #大于等于
if is_null.exists():
for i in is_null:
stu=models.Cash.objects.filter(cid=i.ud,pass_time=stu_id) #在CASE中查找该任务的id与该学生的id 如果有则说明该学生已经报名了
if stu.exists():
c=1
else:
c=0
if i.number_peoplr<i.peoples :#如果bill中的报名人数小于选择需要的人数 则显示
number={'title':i.title,'demand':i.demand,'time':i.time,'typt':i.work_tepy,'needtime':i.work_time,'name':i.teacher_name,
'Impatient':i.Impatient,'photos':i.photos,'address':i.address,
'phone_number':i.phone_number,'peoples':i.peoples,'number_peoplr':i.number_peoplr,'c':c}
content.append(number)
return HttpResponse(json.dumps(content))
else:
return HttpResponse('None')
#学生报名义工
#先判断Bill中记录 如果报名的人数=需要人数 则人数满
#如果CASH已经存在了 就是报名重复
#否则 记录
def signworks(request):
if request.method=='POST':
token=request.POST.get('token')#学生
uid=request.POST.get('id')#bill账单中的ud
students=return_id(token)
if students == 'error':
return HttpResponse('学生token错误')
se_sql=models.bill.objects.filter(ud=uid)
number=se_sql[0].number_peoplr
if number==se_sql[0].peoples:
return HttpResponse('overflow') #人数已经满
sta=models.Cash.objects.filter(ud=uid,student_id=students)
if sta.exists():
return HttpResponse('你已参加报名')
#先将bill中的保名人数加一
date=str(time.strftime("%Y-%m-%d", time.localtime()))
models.bill.objects.filter(ud=uid).update(number_peoplr=number+1)
models.Cash.objects.create(cid=uid,student_id=students,pass_time=date)
return HttpResponse('ok')
#查询自己正在接到的任务
#要判断返回的不为空
#查找Cash中 所有事件的时间大于等于当天时间
#也没有必要 因为在定时任务的时候要都会删除
#
def process(request):
if request.method=='POST':
token=request.POST.get('token')
students=return_id(token)
content=[]
if students == 'error':
return HttpResponse('学生token错误')
else:
date=str(time.strftime("%Y-%m-%d", time.localtime()))
data=models.Cash.objects.filter(student_id=students,pass_time__gte=date)
if data.exists():
for i in data:
ex=models.bill.objects.filter(ud=i.cid)
#Case中筛选出 符合条件的Ud 在bill中查找出便可
if ex.exists():
lis={'phone':ex[0].photos,'time':ex[0].time,'address':ex[0].address,'id':ex[0].ud}
content.append(lis)
return HttpResponse(json.dumps(content))
#取消报名义工
#先将bill中的报名人数减一
#然后在Case中删除该学生的记录
def del_process(request):
if request.method=='POST':
ud=request.POST.get('id')
token=request.POST.get('token')
stu_id=return_id(token)
if stu_id == 'error':
return HttpResponse('error')
date=models.bill.objects.filter(ud=ud)
if date.exists():
num=date[0].number_peoplr-1
models.bill.objects.filter(ud=ud).update(number_peoplr=num)
#
models.Case.objects.filter(cid=ud,student_id=stu_id).delete()
return HttpResponse('ok')
else:
return HttpResponse('no') | from django.shortcuts import render,HttpResponse
from branch import models
from branch.models import bill,Cash,Teacher
from branch.views import Password_module
import requests
from bs4 import BeautifulSoup
from branch.teacher import return_id
#义工通知查询 报名
#创建义工发布单
def teacher_work(request):
if request.method=='POST':
token=request.POST.get('token')#获取老师token
title=request.POST.get('title')#内容标题
demand=request.POST.get('demand')#男/女/无要求
Time=request.POST.get('time')
work_time=request.POST.get('work_time')
Impatient=request.POST.get('Impatient')
photos=request.POST.get('photos')
address=request.POST.get('address')
phone_number=request.POST.get('phone_number')
peoples=request.POST.get('peoples')
teacher_id=return_id(token)
if teacher_id == 'error':
return HttpResponse('error')
work_tepy=models.Teacher.objects.filter(teacher_id=teacher_id)[0].teacher_work
models.bill.objects.create(title=title,demand=demand,time=Time,
work_tepy=work_tepy,work_time=work_time,Impatient=Impatient,photos=photos,
address=address,phone_number=phone_number,peoples=int(peoples),teacher_name=teacher_id)
##以后加订阅号通知
return HttpResponse('ok')
#查找出今天或者今天以后的招募义工返回json
#没有便返回None
#bill与Case之间进行对比 如果该学生已经报名了 那么在界面上应该变成灰色 不可报名 正常则是可以报名
#
import time
import json
from django.db.models import F #排序
def find_work(request):
if request.method=='GET':
token=request.GET.get('token')
stu_id=return_id(token)
if stu_id == 'error':
return HttpResponse('error')
date=str(time.strftime("%Y-%m-%d", time.localtime()))#获取当时的日期
#筛选出大于等于日期 并且人数没有报完。
content=[]
is_null=models.bill.objects.filter(time__gte=date).order_by(F('time')) #大于等于
if is_null.exists():
for i in is_null:
stu=models.Cash.objects.filter(cid=i.ud,pass_time=stu_id) #在CASE中查找该任务的id与该学生的id 如果有则说明该学生已经报名了
if stu.exists():
c=1
else:
c=0
if i.number_peoplr<i.peoples :#如果bill中的报名人数小于选择需要的人数 则显示
number={'title':i.title,'demand':i.demand,'time':i.time,'typt':i.work_tepy,'needtime':i.work_time,'name':i.teacher_name,
'Impatient':i.Impatient,'photos':i.photos,'address':i.address,
'phone_number':i.phone_number,'peoples':i.peoples,'number_peoplr':i.number_peoplr,'c':c}
content.append(number)
return HttpResponse(json.dumps(content))
else:
return HttpResponse('None')
#学生报名义工
#先判断Bill中记录 如果报名的人数=需要人数 则人数满
#如果CASH已经存在了 就是报名重复
#否则 记录
def signworks(request):
if request.method=='POST':
token=request.POST.get('token')#学生
uid=request.POST.get('id')#bill账单中的ud
students=return_id(token)
if students == 'error':
return HttpResponse('学生token错误')
se_sql=models.bill.objects.filter(ud=uid)
number=se_sql[0].number_peoplr
if number==se_sql[0].peoples:
return HttpResponse('overflow') #人数已经满
sta=models.Cash.objects.filter(ud=uid,student_id=students)
if sta.exists():
return HttpResponse('你已参加报名')
#先将bill中的保名人数加一
date=str(time.strftime("%Y-%m-%d", time.localtime()))
models.bill.objects.filter(ud=uid).update(number_peoplr=number+1)
models.Cash.objects.create(cid=uid,student_id=students,pass_time=date)
return HttpResponse('ok')
#查询自己正在接到的任务
#要判断返回的不为空
#查找Cash中 所有事件的时间大于等于当天时间
#也没有必要 因为在定时任务的时候要都会删除
#
def process(request):
if request.method=='POST':
token=request.POST.get('token')
students=return_id(token)
content=[]
if students == 'error':
return HttpResponse('学生token错误')
else:
date=str(time.strftime("%Y-%m-%d", time.localtime()))
data=models.Cash.objects.filter(student_id=students,pass_time__gte=date)
if data.exists():
for i in data:
ex=models.bill.objects.filter(ud=i.cid)
#Case中筛选出 符合条件的Ud 在bill中查找出便可
if ex.exists():
lis={'phone':ex[0].photos,'time':ex[0].time,'address':ex[0].address,'id':ex[0].ud}
content.append(lis)
return HttpResponse(json.dumps(content))
#取消报名义工
#先将bill中的报名人数减一
#然后在Case中删除该学生的记录
def del_process(request):
if request.method=='POST':
ud=request.POST.get('id')
token=request.POST.get('token')
stu_id=return_id(token)
if stu_id == 'error':
return HttpResponse('error')
date=models.bill.objects.filter(ud=ud)
if date.exists():
num=date[0].number_peoplr-1
models.bill.objects.filter(ud=ud).update(number_peoplr=num)
#
models.Case.objects.filter(cid=ud,student_id=stu_id).delete()
return HttpResponse('ok')
else:
return HttpResponse('no') | zh | 0.993168 | #义工通知查询 报名 #创建义工发布单 #获取老师token #内容标题 #男/女/无要求 ##以后加订阅号通知 #查找出今天或者今天以后的招募义工返回json #没有便返回None #bill与Case之间进行对比 如果该学生已经报名了 那么在界面上应该变成灰色 不可报名 正常则是可以报名 # #排序 #获取当时的日期 #筛选出大于等于日期 并且人数没有报完。 #大于等于 #在CASE中查找该任务的id与该学生的id 如果有则说明该学生已经报名了 #如果bill中的报名人数小于选择需要的人数 则显示 #学生报名义工 #先判断Bill中记录 如果报名的人数=需要人数 则人数满 #如果CASH已经存在了 就是报名重复 #否则 记录 #学生 #bill账单中的ud #人数已经满 #先将bill中的保名人数加一 #查询自己正在接到的任务 #要判断返回的不为空 #查找Cash中 所有事件的时间大于等于当天时间 #也没有必要 因为在定时任务的时候要都会删除 # #Case中筛选出 符合条件的Ud 在bill中查找出便可 #取消报名义工 #先将bill中的报名人数减一 #然后在Case中删除该学生的记录 # | 2.162621 | 2 |
Python/List/program to clone or copy a list.py | aarti835/Number-Theory | 9 | 6617788 | <filename>Python/List/program to clone or copy a list.py
# Write a Python program to clone or copy a list
list = ['abc', 'xyz', 'aba', '1221']
print(list)
l = list.copy()
print(l) | <filename>Python/List/program to clone or copy a list.py
# Write a Python program to clone or copy a list
list = ['abc', 'xyz', 'aba', '1221']
print(list)
l = list.copy()
print(l) | en | 0.775588 | # Write a Python program to clone or copy a list | 3.745885 | 4 |
downloadsupport/models.py | uktrade/fadmin2 | 3 | 6617789 | <reponame>uktrade/fadmin2
from django.db import models
from core.metamodels import (
BaseModel,
)
class DownloadLog(BaseModel):
CC_AT = "CC"
NAC_H_AT = "NAC H"
CC_TRAVEL = "CC TRAV"
DOWNLOAD_CHOICE = (
(CC_TRAVEL, "Cost Centre for Trainline"),
(CC_AT, "Cost Centre Hierarchy for Admin Tool"),
(NAC_H_AT, "NAC Hierarchy for Admin Tool"),
)
download_type = models.CharField(
"Download Type", choices=DOWNLOAD_CHOICE, max_length=300
)
downloader = models.CharField("Download by", max_length=300)
download_time = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = "Download Log"
verbose_name_plural = "Download Logs"
ordering = ["-download_time"]
| from django.db import models
from core.metamodels import (
BaseModel,
)
class DownloadLog(BaseModel):
CC_AT = "CC"
NAC_H_AT = "NAC H"
CC_TRAVEL = "CC TRAV"
DOWNLOAD_CHOICE = (
(CC_TRAVEL, "Cost Centre for Trainline"),
(CC_AT, "Cost Centre Hierarchy for Admin Tool"),
(NAC_H_AT, "NAC Hierarchy for Admin Tool"),
)
download_type = models.CharField(
"Download Type", choices=DOWNLOAD_CHOICE, max_length=300
)
downloader = models.CharField("Download by", max_length=300)
download_time = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = "Download Log"
verbose_name_plural = "Download Logs"
ordering = ["-download_time"] | none | 1 | 2.116419 | 2 | |
trafficgraphnn/genconfig/tls_config.py | arsenious/trafficgraphnn | 0 | 6617790 | <reponame>arsenious/trafficgraphnn
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 2 11:42:42 2018
To generate traffic light phases where every lane is prioritized.
Just for 3 lanes per edge!
@author: simon
"""
import os, sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import xml.etree.cElementTree as et
def tls_config(network_path):
#Attention: This is the configuration for tl for 3 lane grid network!
phase_duration = ['33', '3', '33', '3', '33', '3', '33', '3']
phase_state = ['GGGGGGrrrrrrrrrrrrrrrrrr', 'yyyyyyrrrrrrrrrrrrrrrrrr',
'rrrrrrrrrrrrGGGGGGrrrrrr', 'rrrrrrrrrrrryyyyyyrrrrrr',
'rrrrrrrrrrrrrrrrrrGGGGGG', 'rrrrrrrrrrrrrrrrrryyyyyy',
'rrrrrrGGGGGGrrrrrrrrrrrr', 'rrrrrryyyyyyrrrrrrrrrrrr'
]
parsed_net_tree = et.parse(network_path)
root = parsed_net_tree.getroot()
for child in root:
if child.tag == 'tlLogic':
grandchildren = child.getchildren()
if len(grandchildren) == 8:
for phase, cnt in zip(grandchildren, range(0, 8)):
child.remove(phase)
attribute = {}
attribute['duration'] = phase_duration[cnt]
attribute['state'] = phase_state[cnt]
et.SubElement(child, 'phase', attribute)
parsed_net_tree.write(network_path)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 2 11:42:42 2018
To generate traffic light phases where every lane is prioritized.
Just for 3 lanes per edge!
@author: simon
"""
import os, sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import xml.etree.cElementTree as et
def tls_config(network_path):
#Attention: This is the configuration for tl for 3 lane grid network!
phase_duration = ['33', '3', '33', '3', '33', '3', '33', '3']
phase_state = ['GGGGGGrrrrrrrrrrrrrrrrrr', 'yyyyyyrrrrrrrrrrrrrrrrrr',
'rrrrrrrrrrrrGGGGGGrrrrrr', 'rrrrrrrrrrrryyyyyyrrrrrr',
'rrrrrrrrrrrrrrrrrrGGGGGG', 'rrrrrrrrrrrrrrrrrryyyyyy',
'rrrrrrGGGGGGrrrrrrrrrrrr', 'rrrrrryyyyyyrrrrrrrrrrrr'
]
parsed_net_tree = et.parse(network_path)
root = parsed_net_tree.getroot()
for child in root:
if child.tag == 'tlLogic':
grandchildren = child.getchildren()
if len(grandchildren) == 8:
for phase, cnt in zip(grandchildren, range(0, 8)):
child.remove(phase)
attribute = {}
attribute['duration'] = phase_duration[cnt]
attribute['state'] = phase_state[cnt]
et.SubElement(child, 'phase', attribute)
parsed_net_tree.write(network_path) | en | 0.760819 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Mon Jul 2 11:42:42 2018 To generate traffic light phases where every lane is prioritized. Just for 3 lanes per edge! @author: simon #Attention: This is the configuration for tl for 3 lane grid network! | 2.830525 | 3 |
cactusbot/commands/magic/__init__.py | CactusBot/CactusBot | 23 | 6617791 | <gh_stars>10-100
"""Define custom magic commands."""
from .. import Command
from .alias import Alias
from .cactus import Cactus
from .command import Meta
from .config import Config
from .cube import Cube, Temmie
from .quote import Quote
from .repeat import Repeat
from .social import Social
from .trust import Trust
from .uptime import Uptime
from .multi import Multi
COMMANDS = (Alias, Cactus, Meta, Config, Cube, Temmie,
Quote, Repeat, Social, Trust, Uptime, Multi)
__all__ = ("Alias", "Command", "Cactus", "Meta", "Config", "Cube",
"Temmie", "Quote", "Repeat", "Social", "Trust", "Uptime",
"Multi")
| """Define custom magic commands."""
from .. import Command
from .alias import Alias
from .cactus import Cactus
from .command import Meta
from .config import Config
from .cube import Cube, Temmie
from .quote import Quote
from .repeat import Repeat
from .social import Social
from .trust import Trust
from .uptime import Uptime
from .multi import Multi
COMMANDS = (Alias, Cactus, Meta, Config, Cube, Temmie,
Quote, Repeat, Social, Trust, Uptime, Multi)
__all__ = ("Alias", "Command", "Cactus", "Meta", "Config", "Cube",
"Temmie", "Quote", "Repeat", "Social", "Trust", "Uptime",
"Multi") | en | 0.75508 | Define custom magic commands. | 1.607236 | 2 |
nma_algo_char/common.py | victor-gil-sepulveda/PhD-ANMPythonHelpers | 1 | 6617792 | <gh_stars>1-10
"""
Created on Nov 21, 2015
@author: victor
"""
import os
import json
import time
import sys
import re
import errno
import numbers
import numpy
from anmichelpers.tools.tools import norm
from pyRMSD.RMSDCalculator import RMSDCalculator
import random
from math import exp
from _collections import defaultdict
import matplotlib.pyplot as plt
import math
class LineParser:
def __init__(self, tag, position, conversion, split_token = None):
self.data = []
self.tag = tag
self.tag_len = len(tag)
self.position = position
self.split_token = split_token
self.conversion_function = conversion
def parse_line(self, line):
if line[0:self.tag_len] == self.tag:
try:
self.data.append(self.conversion_function(line.split(self.split_token)[self.position]))
except Exception, e:
print line
raise e
class LineCounter:
def __init__(self, tag):
self.data = []
self.tag = tag
self.counter = 0
def parse_line(self, line):
if self.tag in line:
self.counter = self.counter + 1
class MetropolisMCSimulator:
BOLTZMANN_KCAL_MOL = 0.001987207
def __init__(self, energy_increments):
self.energy_increments = energy_increments
def expected_probability(self, temperature):
beta = 1.0/(MetropolisMCSimulator.BOLTZMANN_KCAL_MOL * temperature)
experiment_acceptance = []
for inc_u in self.energy_increments:
experiment_acceptance.append(min(1.0,max(0.0,exp(-(beta*inc_u)))))
return numpy.mean(experiment_acceptance), numpy.std(experiment_acceptance)
@classmethod
def energy_for_probability(cls, prob, temperature):
beta = MetropolisMCSimulator.BOLTZMANN_KCAL_MOL * temperature
return -math.log(prob)*beta
def perform_simulation(self, number_of_samples, number_of_tries, temperature):
beta = 1.0/(MetropolisMCSimulator.BOLTZMANN_KCAL_MOL * temperature)
experiment_acceptance = []
for _ in range(number_of_tries):
energy_bootstrap = random.sample(self.energy_increments, number_of_samples)
num_accepted = 0
for energy_diff in energy_bootstrap:
if energy_diff <= 0:
num_accepted += 1
else:
prob = exp(-(beta*energy_diff))
if prob > random.random():
num_accepted += 1
experiment_acceptance.append(float(num_accepted)/number_of_samples)
return numpy.mean(experiment_acceptance), numpy.std(experiment_acceptance)
def who_is_accepted(self, temperature):
accepted = []
beta = 1.0/(MetropolisMCSimulator.BOLTZMANN_KCAL_MOL * temperature)
for i,energy_diff in enumerate(self.energy_increments):
prob = exp(-(beta*energy_diff))
if prob > random.random():
accepted.append(i)
return accepted
def pair_parameter_values(parameter_keys, parameters):
key1, key2 = parameter_keys[0], parameter_keys[1]
for vals1 in parameters[key1]:
for vals2 in parameters[key2]:
yield ((key1, vals1),(key2,vals2))
def set_parameter_value(key_description, param_dict, value):
keys = key_description.split(".")
tmp_dic = param_dict
for i, k in enumerate(keys[:-1]):
val = tmp_dic[k]
if isinstance(val, list):
# Change in all the items (all items must have same structure)
sub_search = ".".join(keys[i+1:])
for item in val:
try:
set_parameter_value(sub_search, item, value)
except:
pass
return
else:
tmp_dic = val
tmp_dic[keys[-1]] = value
def prepare_workspace(base_data):
create_directory(base_data["workspace"])
os.system("cp -r %s %s"%(base_data["initial_structure"], base_data["workspace"]))
if base_data["PELE_data"]["action"] == "COPY":
os.system("cp -r %s %s"%(base_data["PELE_data"]["path"], base_data["workspace"]))
if base_data["PELE_data"]["action"] == "LINK":
os.system("ln -s %s %s"%(base_data["PELE_data"]["path"], base_data["workspace"]))
if base_data["PELE_docs"]["action"] == "COPY":
os.system("cp -r %s %s"%(base_data["PELE_docs"]["path"],
os.path.join(base_data["workspace"],"Documents")))
if base_data["PELE_docs"]["action"] == "LINK":
os.system("ln -s %s %s"%(base_data["PELE_docs"]["path"],
os.path.join(base_data["workspace"],"Documents")))
def run_pele_in_folder( control_file_dict, folder, experiment_data, test = False, sleep_time = 10, return_command = False):
current_dir = os.getcwd()
os.chdir(experiment_data["workspace"])
create_directory(folder)
create_directory(os.path.join(folder,"info"))
control_file_path = os.path.join(folder,'control.json')
out_file_path = os.path.join(folder,'out')
with open(control_file_path, 'w') as outfile:
json.dump(control_file_dict, outfile, indent=4)
cmd = "%s %s > %s"%(experiment_data["PELE_exec_cmd"],control_file_path,out_file_path)
print cmd
sys.stdout.flush()
if not test:
os.system(cmd)
else:
time.sleep(sleep_time)
os.chdir(current_dir)
if return_command:
return cmd
def change_output_path_parameters(control_dict, params_dict, folder):
for complete_key in params_dict:
set_parameter_value(complete_key, control_dict, os.path.join(folder, params_dict[complete_key]))
def wait_for_results_and_close(pool, results, query_time):
finished = 0
while finished != len(results):
finished = 0
for result in results:
if result.ready():
finished += 1
time.sleep(query_time)
print "All processes have finished"
pool.close()
def load_control_json(json_script):
json_string = remove_comments(open(json_script).read())
return convert_to_utf8(json.loads(json_string))
def remove_comments(string):
"""
Removes /**/ and // comments from a string (used with the control script).
From http://stackoverflow.com/questions/2319019/using-regex-to-remove-comments-from-source-files
"""
string = re.sub(re.compile("/\*.*?\*/",re.DOTALL ) ,"" ,string) # remove all occurance streamed comments (/*COMMENT */) from string
string = re.sub(re.compile("//.*?\n" ) ,"" ,string) # remove all occurance singleline comments (//COMMENT\n ) from string
return string
def convert_to_utf8(my_input):
"""
Recursively encodes all strings of an input dictionary as UTF-8. Useful to eliminate unicode strings.
@param my_input: A dictionary object.
@return: Encoded dictionary.
"""
if isinstance(my_input, dict):
return {convert_to_utf8(key): convert_to_utf8(value) for key, value in my_input.iteritems()}
elif isinstance(my_input, list):
return [convert_to_utf8(element) for element in my_input]
elif isinstance(my_input, unicode):
return my_input.encode('utf-8')
else:
return my_input
def create_directory(directory_path, ensure_writability = False):
"""
Creates a directory (with subdirs) if it doesn't exist.
@param directory_path: the path of the directory and subdirectories to be created.
"""
if ensure_writability:
if not os.access(os.path.dirname(directory_path), os.W_OK):
return False
try:
os.makedirs(directory_path)
return True
except OSError, e:
if e.errno != errno.EEXIST:
raise
return False
def parameter_value_to_string(val):
if isinstance(val, basestring):
return val
else:
if isinstance(val, numbers.Integral):
return "%d"%val
else:
return "%.2f"%val
def process_after_perturb_max_and_mean_disp(data):
number_of_sets = len(data["coords_before"])
num_coords = len(data["coords_before"][0])
coordsets_before = numpy.array(data["coords_before"])
coordsets_before = numpy.reshape(coordsets_before, (number_of_sets, num_coords/3, 3))
coordsets_after = numpy.array(data["coords_after"])
coordsets_after = numpy.reshape(coordsets_after, (number_of_sets, num_coords/3, 3))
superimposed_translations = []
for i in range(number_of_sets):
coords = numpy.array([coordsets_before[i], coordsets_after[i]])
calculator = RMSDCalculator(calculatorType = "QTRFIT_OMP_CALCULATOR",
fittingCoordsets = coords)
_, rearranged_coords = calculator.oneVsTheOthers(0, get_superposed_coordinates = True)
superimposed_translations.append(rearranged_coords[1]-rearranged_coords[0])
translations = numpy.array(superimposed_translations)
norms = numpy.array([norm(t) for t in translations])
return numpy.max(norms, axis = 1), numpy.mean(norms, axis = 1)
def get_values_by_hue(val_x, val_y, hue_vals):
# assert all 3 have the same length
if len(val_x) == len(val_y) and len(val_y)== len(hue_vals):
vals_by_hue = defaultdict(lambda: {"x":[],"y":[]})
for i in range(len(val_x)):
vals_by_hue[hue_vals[i]]["x"].append(val_x[i])
vals_by_hue[hue_vals[i]]["y"].append(val_y[i])
return vals_by_hue
else:
print "Error::get_values_by_hue arays have not the same length (%d, %d, %d)"%(len(val_x), len(val_y), len(hue_vals))
exit(-1)
def scatter_plot_by_hue(x, y, hue, colors):
vals_by_hue = get_values_by_hue(x, y, hue)
for i,hue_key in enumerate(vals_by_hue):
plt.scatter(vals_by_hue[hue_key]["x"],vals_by_hue[hue_key]["y"], label = str(hue_key), color = colors[i], alpha = 0.6)
def prepare_subplots(row_len, col_len):
if row_len > 1 or col_len > 1:
f, axes = plt.subplots( col_len, row_len, sharey='row', sharex='col')
f.subplots_adjust(hspace=0.4, wspace=0.3 )
f.set_size_inches(12, 12, forward=True)
else:
f = plt.gcf()
axes = {(0,0): plt.gca()}
return f, axes
| """
Created on Nov 21, 2015
@author: victor
"""
import os
import json
import time
import sys
import re
import errno
import numbers
import numpy
from anmichelpers.tools.tools import norm
from pyRMSD.RMSDCalculator import RMSDCalculator
import random
from math import exp
from _collections import defaultdict
import matplotlib.pyplot as plt
import math
class LineParser:
def __init__(self, tag, position, conversion, split_token = None):
self.data = []
self.tag = tag
self.tag_len = len(tag)
self.position = position
self.split_token = split_token
self.conversion_function = conversion
def parse_line(self, line):
if line[0:self.tag_len] == self.tag:
try:
self.data.append(self.conversion_function(line.split(self.split_token)[self.position]))
except Exception, e:
print line
raise e
class LineCounter:
def __init__(self, tag):
self.data = []
self.tag = tag
self.counter = 0
def parse_line(self, line):
if self.tag in line:
self.counter = self.counter + 1
class MetropolisMCSimulator:
BOLTZMANN_KCAL_MOL = 0.001987207
def __init__(self, energy_increments):
self.energy_increments = energy_increments
def expected_probability(self, temperature):
beta = 1.0/(MetropolisMCSimulator.BOLTZMANN_KCAL_MOL * temperature)
experiment_acceptance = []
for inc_u in self.energy_increments:
experiment_acceptance.append(min(1.0,max(0.0,exp(-(beta*inc_u)))))
return numpy.mean(experiment_acceptance), numpy.std(experiment_acceptance)
@classmethod
def energy_for_probability(cls, prob, temperature):
beta = MetropolisMCSimulator.BOLTZMANN_KCAL_MOL * temperature
return -math.log(prob)*beta
def perform_simulation(self, number_of_samples, number_of_tries, temperature):
beta = 1.0/(MetropolisMCSimulator.BOLTZMANN_KCAL_MOL * temperature)
experiment_acceptance = []
for _ in range(number_of_tries):
energy_bootstrap = random.sample(self.energy_increments, number_of_samples)
num_accepted = 0
for energy_diff in energy_bootstrap:
if energy_diff <= 0:
num_accepted += 1
else:
prob = exp(-(beta*energy_diff))
if prob > random.random():
num_accepted += 1
experiment_acceptance.append(float(num_accepted)/number_of_samples)
return numpy.mean(experiment_acceptance), numpy.std(experiment_acceptance)
def who_is_accepted(self, temperature):
accepted = []
beta = 1.0/(MetropolisMCSimulator.BOLTZMANN_KCAL_MOL * temperature)
for i,energy_diff in enumerate(self.energy_increments):
prob = exp(-(beta*energy_diff))
if prob > random.random():
accepted.append(i)
return accepted
def pair_parameter_values(parameter_keys, parameters):
key1, key2 = parameter_keys[0], parameter_keys[1]
for vals1 in parameters[key1]:
for vals2 in parameters[key2]:
yield ((key1, vals1),(key2,vals2))
def set_parameter_value(key_description, param_dict, value):
keys = key_description.split(".")
tmp_dic = param_dict
for i, k in enumerate(keys[:-1]):
val = tmp_dic[k]
if isinstance(val, list):
# Change in all the items (all items must have same structure)
sub_search = ".".join(keys[i+1:])
for item in val:
try:
set_parameter_value(sub_search, item, value)
except:
pass
return
else:
tmp_dic = val
tmp_dic[keys[-1]] = value
def prepare_workspace(base_data):
create_directory(base_data["workspace"])
os.system("cp -r %s %s"%(base_data["initial_structure"], base_data["workspace"]))
if base_data["PELE_data"]["action"] == "COPY":
os.system("cp -r %s %s"%(base_data["PELE_data"]["path"], base_data["workspace"]))
if base_data["PELE_data"]["action"] == "LINK":
os.system("ln -s %s %s"%(base_data["PELE_data"]["path"], base_data["workspace"]))
if base_data["PELE_docs"]["action"] == "COPY":
os.system("cp -r %s %s"%(base_data["PELE_docs"]["path"],
os.path.join(base_data["workspace"],"Documents")))
if base_data["PELE_docs"]["action"] == "LINK":
os.system("ln -s %s %s"%(base_data["PELE_docs"]["path"],
os.path.join(base_data["workspace"],"Documents")))
def run_pele_in_folder( control_file_dict, folder, experiment_data, test = False, sleep_time = 10, return_command = False):
current_dir = os.getcwd()
os.chdir(experiment_data["workspace"])
create_directory(folder)
create_directory(os.path.join(folder,"info"))
control_file_path = os.path.join(folder,'control.json')
out_file_path = os.path.join(folder,'out')
with open(control_file_path, 'w') as outfile:
json.dump(control_file_dict, outfile, indent=4)
cmd = "%s %s > %s"%(experiment_data["PELE_exec_cmd"],control_file_path,out_file_path)
print cmd
sys.stdout.flush()
if not test:
os.system(cmd)
else:
time.sleep(sleep_time)
os.chdir(current_dir)
if return_command:
return cmd
def change_output_path_parameters(control_dict, params_dict, folder):
for complete_key in params_dict:
set_parameter_value(complete_key, control_dict, os.path.join(folder, params_dict[complete_key]))
def wait_for_results_and_close(pool, results, query_time):
finished = 0
while finished != len(results):
finished = 0
for result in results:
if result.ready():
finished += 1
time.sleep(query_time)
print "All processes have finished"
pool.close()
def load_control_json(json_script):
json_string = remove_comments(open(json_script).read())
return convert_to_utf8(json.loads(json_string))
def remove_comments(string):
"""
Removes /**/ and // comments from a string (used with the control script).
From http://stackoverflow.com/questions/2319019/using-regex-to-remove-comments-from-source-files
"""
string = re.sub(re.compile("/\*.*?\*/",re.DOTALL ) ,"" ,string) # remove all occurance streamed comments (/*COMMENT */) from string
string = re.sub(re.compile("//.*?\n" ) ,"" ,string) # remove all occurance singleline comments (//COMMENT\n ) from string
return string
def convert_to_utf8(my_input):
"""
Recursively encodes all strings of an input dictionary as UTF-8. Useful to eliminate unicode strings.
@param my_input: A dictionary object.
@return: Encoded dictionary.
"""
if isinstance(my_input, dict):
return {convert_to_utf8(key): convert_to_utf8(value) for key, value in my_input.iteritems()}
elif isinstance(my_input, list):
return [convert_to_utf8(element) for element in my_input]
elif isinstance(my_input, unicode):
return my_input.encode('utf-8')
else:
return my_input
def create_directory(directory_path, ensure_writability = False):
"""
Creates a directory (with subdirs) if it doesn't exist.
@param directory_path: the path of the directory and subdirectories to be created.
"""
if ensure_writability:
if not os.access(os.path.dirname(directory_path), os.W_OK):
return False
try:
os.makedirs(directory_path)
return True
except OSError, e:
if e.errno != errno.EEXIST:
raise
return False
def parameter_value_to_string(val):
if isinstance(val, basestring):
return val
else:
if isinstance(val, numbers.Integral):
return "%d"%val
else:
return "%.2f"%val
def process_after_perturb_max_and_mean_disp(data):
number_of_sets = len(data["coords_before"])
num_coords = len(data["coords_before"][0])
coordsets_before = numpy.array(data["coords_before"])
coordsets_before = numpy.reshape(coordsets_before, (number_of_sets, num_coords/3, 3))
coordsets_after = numpy.array(data["coords_after"])
coordsets_after = numpy.reshape(coordsets_after, (number_of_sets, num_coords/3, 3))
superimposed_translations = []
for i in range(number_of_sets):
coords = numpy.array([coordsets_before[i], coordsets_after[i]])
calculator = RMSDCalculator(calculatorType = "QTRFIT_OMP_CALCULATOR",
fittingCoordsets = coords)
_, rearranged_coords = calculator.oneVsTheOthers(0, get_superposed_coordinates = True)
superimposed_translations.append(rearranged_coords[1]-rearranged_coords[0])
translations = numpy.array(superimposed_translations)
norms = numpy.array([norm(t) for t in translations])
return numpy.max(norms, axis = 1), numpy.mean(norms, axis = 1)
def get_values_by_hue(val_x, val_y, hue_vals):
# assert all 3 have the same length
if len(val_x) == len(val_y) and len(val_y)== len(hue_vals):
vals_by_hue = defaultdict(lambda: {"x":[],"y":[]})
for i in range(len(val_x)):
vals_by_hue[hue_vals[i]]["x"].append(val_x[i])
vals_by_hue[hue_vals[i]]["y"].append(val_y[i])
return vals_by_hue
else:
print "Error::get_values_by_hue arays have not the same length (%d, %d, %d)"%(len(val_x), len(val_y), len(hue_vals))
exit(-1)
def scatter_plot_by_hue(x, y, hue, colors):
vals_by_hue = get_values_by_hue(x, y, hue)
for i,hue_key in enumerate(vals_by_hue):
plt.scatter(vals_by_hue[hue_key]["x"],vals_by_hue[hue_key]["y"], label = str(hue_key), color = colors[i], alpha = 0.6)
def prepare_subplots(row_len, col_len):
if row_len > 1 or col_len > 1:
f, axes = plt.subplots( col_len, row_len, sharey='row', sharex='col')
f.subplots_adjust(hspace=0.4, wspace=0.3 )
f.set_size_inches(12, 12, forward=True)
else:
f = plt.gcf()
axes = {(0,0): plt.gca()}
return f, axes | en | 0.762312 | Created on Nov 21, 2015 @author: victor # Change in all the items (all items must have same structure) Removes /**/ and // comments from a string (used with the control script). From http://stackoverflow.com/questions/2319019/using-regex-to-remove-comments-from-source-files # remove all occurance streamed comments (/*COMMENT */) from string # remove all occurance singleline comments (//COMMENT\n ) from string Recursively encodes all strings of an input dictionary as UTF-8. Useful to eliminate unicode strings. @param my_input: A dictionary object. @return: Encoded dictionary. Creates a directory (with subdirs) if it doesn't exist. @param directory_path: the path of the directory and subdirectories to be created. # assert all 3 have the same length | 2.545945 | 3 |
src/ui/core/table.py | temportalflux/MelodyBot | 0 | 6617793 | <reponame>temportalflux/MelodyBot
import tkinter as tk
import tkinter.ttk as ttk
class Table():
def __init__(self, parent, logger):
self.logger = logger
self.frame = tk.Frame(parent)
self.column_list = []
self.treeview = None
self.scrollbar = None
self.height_in_rows = None
self.on_selected_callbacks = []
def get_item_list(self):
# STUB: Override by subclasses
return []
def create_column_values_for(self, item):
# STUB: Override by subclasses
return ()
def on_click_header(self, column, event):
# STUB: Overriden in subclasses
pass
def on_click_item(self, event):
pass
def on_double_click_item(self, event):
# STUB: Overriden by subclasses
pass
def display_context_menu(self, event):
# STUB: Override by subclasses
pass
def pack(self, fill=None, side=None, expand=False, padx=None, pady=None):
self.frame.pack(fill=fill, side=side, expand=expand, padx=padx, pady=pady)
def add_column(self, name):
self.column_list.append(name)
def init_treeview(self):
self.treeview = ttk.Treeview(master=self.frame, columns=self.column_list, height=self.height_in_rows)
self.treeview.column('#0', width=0, stretch=tk.NO)
for column_name in self.column_list:
self.treeview.heading(column_name, text=column_name, anchor=tk.CENTER)
self.populate_view()
self.treeview.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
# https://www.python-course.eu/tkinter_events_binds.php
self.treeview.bind('<Button-1>', self.on_click_mouse_left)
self.treeview.bind('<Double-Button-1>', self.on_double_mouse_left)
self.treeview.bind('<Button-3>', self.on_click_mouse_right)
self.scrollbar = tk.Scrollbar(master=self.frame, orient=tk.VERTICAL)
self.scrollbar.pack(fill=tk.Y, side=tk.RIGHT)
self.treeview.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.treeview.yview)
def identify_column(self, event):
column = self.treeview.identify('column', event.x, event.y)
column = int(column[1:]) - 1
return column
def get_idenfitied_column(self, event):
return self.column_list[self.identify_column(event)]
def get_selected_item(self):
selection = self.treeview.selection()
if len(selection) > 0:
iid = selection[0]
return self.items_by_iid[iid]
return None
def get_selected_iid(self):
selection = self.treeview.selection()
if len(selection) > 0:
return selection[0]
return None
def clear_view(self):
self.treeview.delete(*self.treeview.get_children())
def populate_view(self):
items = self.get_item_list()
self.clear_item_iids()
for i in range(0, len(items)):
self.prepopulate_item(items[i])
values = self.create_column_values_for(items[i])
iid = self.treeview.insert(
parent='', index=i, text='',
values=values
)
self.iids.append(iid)
self.set_item_iid(items[i], iid)
def prepopulate_item(self, item):
pass
def clear_item_iids(self):
self.iids = []
self.items_by_iid = {}
def set_item_iid(self, item, iid):
self.items_by_iid[iid] = item
def refresh(self):
self.clear_view()
self.populate_view()
def refresh_selected_row(self):
self.treeview.item(self.get_selected_iid(), values=self.create_column_values_for(self.get_selected_item()))
def add_on_selected_callback(self, callback):
self.on_selected_callbacks.append(callback)
def remove_on_selected_callback(self, callback):
self.on_selected_callbacks.remove(callback)
def dispatch_on_selected(self):
for callback in self.on_selected_callbacks:
callback(self.get_selected_item())
def set_selected_item(self, iid):
self.treeview.selection_set(iid)
self.dispatch_on_selected()
def select_item_by_index(self, index):
self.set_selected_item(self.iids[index])
def on_click_mouse_left(self, event):
region = self.treeview.identify('region', event.x, event.y)
if region == 'heading':
self.on_click_header(self.identify_column(event), event)
elif region == 'cell':
iid = self.treeview.identify('row', event.x, event.y)
self.set_selected_item(iid)
self.on_click_item(event)
def on_double_mouse_left(self, event):
region = self.treeview.identify('region', event.x, event.y)
if region == 'cell':
iid = self.treeview.identify('row', event.x, event.y)
self.set_selected_item(iid)
self.on_double_click_item(event)
def on_click_mouse_right(self, event):
region = self.treeview.identify('region', event.x, event.y)
if region == 'cell':
iid = self.treeview.identify('row', event.x, event.y)
self.set_selected_item(iid)
self.display_context_menu(event)
| import tkinter as tk
import tkinter.ttk as ttk
class Table():
def __init__(self, parent, logger):
self.logger = logger
self.frame = tk.Frame(parent)
self.column_list = []
self.treeview = None
self.scrollbar = None
self.height_in_rows = None
self.on_selected_callbacks = []
def get_item_list(self):
# STUB: Override by subclasses
return []
def create_column_values_for(self, item):
# STUB: Override by subclasses
return ()
def on_click_header(self, column, event):
# STUB: Overriden in subclasses
pass
def on_click_item(self, event):
pass
def on_double_click_item(self, event):
# STUB: Overriden by subclasses
pass
def display_context_menu(self, event):
# STUB: Override by subclasses
pass
def pack(self, fill=None, side=None, expand=False, padx=None, pady=None):
self.frame.pack(fill=fill, side=side, expand=expand, padx=padx, pady=pady)
def add_column(self, name):
self.column_list.append(name)
def init_treeview(self):
self.treeview = ttk.Treeview(master=self.frame, columns=self.column_list, height=self.height_in_rows)
self.treeview.column('#0', width=0, stretch=tk.NO)
for column_name in self.column_list:
self.treeview.heading(column_name, text=column_name, anchor=tk.CENTER)
self.populate_view()
self.treeview.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
# https://www.python-course.eu/tkinter_events_binds.php
self.treeview.bind('<Button-1>', self.on_click_mouse_left)
self.treeview.bind('<Double-Button-1>', self.on_double_mouse_left)
self.treeview.bind('<Button-3>', self.on_click_mouse_right)
self.scrollbar = tk.Scrollbar(master=self.frame, orient=tk.VERTICAL)
self.scrollbar.pack(fill=tk.Y, side=tk.RIGHT)
self.treeview.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.treeview.yview)
def identify_column(self, event):
column = self.treeview.identify('column', event.x, event.y)
column = int(column[1:]) - 1
return column
def get_idenfitied_column(self, event):
return self.column_list[self.identify_column(event)]
def get_selected_item(self):
selection = self.treeview.selection()
if len(selection) > 0:
iid = selection[0]
return self.items_by_iid[iid]
return None
def get_selected_iid(self):
selection = self.treeview.selection()
if len(selection) > 0:
return selection[0]
return None
def clear_view(self):
self.treeview.delete(*self.treeview.get_children())
def populate_view(self):
items = self.get_item_list()
self.clear_item_iids()
for i in range(0, len(items)):
self.prepopulate_item(items[i])
values = self.create_column_values_for(items[i])
iid = self.treeview.insert(
parent='', index=i, text='',
values=values
)
self.iids.append(iid)
self.set_item_iid(items[i], iid)
def prepopulate_item(self, item):
pass
def clear_item_iids(self):
self.iids = []
self.items_by_iid = {}
def set_item_iid(self, item, iid):
self.items_by_iid[iid] = item
def refresh(self):
self.clear_view()
self.populate_view()
def refresh_selected_row(self):
self.treeview.item(self.get_selected_iid(), values=self.create_column_values_for(self.get_selected_item()))
def add_on_selected_callback(self, callback):
self.on_selected_callbacks.append(callback)
def remove_on_selected_callback(self, callback):
self.on_selected_callbacks.remove(callback)
def dispatch_on_selected(self):
for callback in self.on_selected_callbacks:
callback(self.get_selected_item())
def set_selected_item(self, iid):
self.treeview.selection_set(iid)
self.dispatch_on_selected()
def select_item_by_index(self, index):
self.set_selected_item(self.iids[index])
def on_click_mouse_left(self, event):
region = self.treeview.identify('region', event.x, event.y)
if region == 'heading':
self.on_click_header(self.identify_column(event), event)
elif region == 'cell':
iid = self.treeview.identify('row', event.x, event.y)
self.set_selected_item(iid)
self.on_click_item(event)
def on_double_mouse_left(self, event):
region = self.treeview.identify('region', event.x, event.y)
if region == 'cell':
iid = self.treeview.identify('row', event.x, event.y)
self.set_selected_item(iid)
self.on_double_click_item(event)
def on_click_mouse_right(self, event):
region = self.treeview.identify('region', event.x, event.y)
if region == 'cell':
iid = self.treeview.identify('row', event.x, event.y)
self.set_selected_item(iid)
self.display_context_menu(event) | en | 0.74686 | # STUB: Override by subclasses # STUB: Override by subclasses # STUB: Overriden in subclasses # STUB: Overriden by subclasses # STUB: Override by subclasses # https://www.python-course.eu/tkinter_events_binds.php | 3.202282 | 3 |
training/utilities/utils.py | ndrpnt/gpt-code-clippy | 435 | 6617794 | <reponame>ndrpnt/gpt-code-clippy
import numpy as np
import threading
import queue
import multiprocessing
from collections import defaultdict
import jax
import jax.numpy as jnp
def make_batch(samples):
batch = {k:jnp.array(v) for k,v in samples.items()}
batch['labels'] = batch['input_ids'].copy()
return batch
class PrefetchDataloaderTread(threading.Thread):
"Prefetch dataloader for IterableDataset"
def __init__(self, dataset, max_steps, batch_size, sequence_length, prefetch_buffer=1, shuffle=True, shuffle_buffer=1000, seed=0):
super().__init__(daemon=True)
self.max_steps = max_steps
self.bs = batch_size
self.seq_len = sequence_length
self.max_length = batch_size * sequence_length
self.prefetch_buffer = prefetch_buffer
self.shuffle = shuffle
self.shuffle_buffer = shuffle_buffer
self.seed = seed
self.dataset = dataset
if shuffle:
shuffled_dataset = dataset.shuffle(shuffle_buffer, seed=self.seed)
self.seed += 1
self.ds_iter = iter(shuffled_dataset)
else:
self.ds_iter = iter(dataset)
self.queue = queue.Queue(prefetch_buffer)
self.rem = defaultdict(list)
self.start()
def make_iter(self):
if self.shuffle:
shuffled_dataset = self.dataset.shuffle(self.shuffle_buffer, seed=self.seed)
self.seed += 1
self.ds_iter = iter(shuffled_dataset)
else:
self.ds_iter = iter(self.dataset)
def __next__(self):
batch = self.queue.get()
return batch
def run(self):
i = 0
while True and i < self.max_steps:
i += 1
# prepair next batch
sample = self.rem.copy()
l = len(sample["input_ids"])
max_length = self.max_length
while l < max_length:
try:
next_sample = next(self.ds_iter)
except StopIteration:
# reset generator if a pass through dataset is completed
self.make_iter()
next_sample = next(self.ds_iter)
l += len(next_sample["input_ids"])
sample = {k:sample[k]+next_sample[k] for k in next_sample.keys()}
self.rem = {k:v[max_length:] for k,v in sample.items()}
sample = {k:v[:max_length] for k,v in sample.items()}
# regroup to shape [bs x seq_len]
samples = {k:np.array([v[i*self.seq_len:(i+1)*self.seq_len] for i in range(self.bs)]) for k,v in sample.items()}
self.queue.put(make_batch(samples))
self.queue.put(None)
def __iter__(self):
return self
class PrefetchDataloader(multiprocessing.Process):
"Prefetch dataloader for IterableDataset"
def __init__(self, dataset, max_steps, batch_size, sequence_length, prefetch_buffer=1, shuffle=True, shuffle_buffer=1000, seed=0):
super().__init__(daemon=True)
self.max_steps = max_steps
self.bs = batch_size
self.seq_len = sequence_length
self.max_length = batch_size * sequence_length
self.prefetch_buffer = prefetch_buffer
self.shuffle = shuffle
self.shuffle_buffer = shuffle_buffer
self.seed = seed
self.dataset = dataset
self.make_iter()
self.queue = multiprocessing.Queue(prefetch_buffer)
self.rem = defaultdict(list)
self.start()
def make_iter(self):
if self.shuffle:
shuffled_dataset = self.dataset.shuffle(self.shuffle_buffer, seed=self.seed)
self.seed += 1
self.ds_iter = iter(shuffled_dataset)
else:
self.ds_iter = iter(self.dataset)
def __next__(self):
return make_batch(self.queue.get())
def run(self):
i = 0
while True and i < self.max_steps:
# prepair next batch
sample = self.rem.copy()
l = len(sample["input_ids"])
max_length = self.max_length
while l < max_length:
try:
next_sample = next(self.ds_iter)
except StopIteration:
# reset generator if a pass through dataset is completed
self.make_iter()
next_sample = next(self.ds_iter)
l += len(next_sample["input_ids"])
sample = {k:sample[k]+next_sample[k] for k in next_sample.keys()}
self.rem = {k:v[max_length:] for k,v in sample.items()}
sample = {k:v[:max_length] for k,v in sample.items()}
# regroup to shape [bs x seq_len]
samples = {k:np.array([v[i*self.seq_len:(i+1)*self.seq_len] for i in range(self.bs)]) for k,v in sample.items()}
self.queue.put(samples)
self.queue.put(None)
def __iter__(self):
return self | import numpy as np
import threading
import queue
import multiprocessing
from collections import defaultdict
import jax
import jax.numpy as jnp
def make_batch(samples):
batch = {k:jnp.array(v) for k,v in samples.items()}
batch['labels'] = batch['input_ids'].copy()
return batch
class PrefetchDataloaderTread(threading.Thread):
"Prefetch dataloader for IterableDataset"
def __init__(self, dataset, max_steps, batch_size, sequence_length, prefetch_buffer=1, shuffle=True, shuffle_buffer=1000, seed=0):
super().__init__(daemon=True)
self.max_steps = max_steps
self.bs = batch_size
self.seq_len = sequence_length
self.max_length = batch_size * sequence_length
self.prefetch_buffer = prefetch_buffer
self.shuffle = shuffle
self.shuffle_buffer = shuffle_buffer
self.seed = seed
self.dataset = dataset
if shuffle:
shuffled_dataset = dataset.shuffle(shuffle_buffer, seed=self.seed)
self.seed += 1
self.ds_iter = iter(shuffled_dataset)
else:
self.ds_iter = iter(dataset)
self.queue = queue.Queue(prefetch_buffer)
self.rem = defaultdict(list)
self.start()
def make_iter(self):
if self.shuffle:
shuffled_dataset = self.dataset.shuffle(self.shuffle_buffer, seed=self.seed)
self.seed += 1
self.ds_iter = iter(shuffled_dataset)
else:
self.ds_iter = iter(self.dataset)
def __next__(self):
batch = self.queue.get()
return batch
def run(self):
i = 0
while True and i < self.max_steps:
i += 1
# prepair next batch
sample = self.rem.copy()
l = len(sample["input_ids"])
max_length = self.max_length
while l < max_length:
try:
next_sample = next(self.ds_iter)
except StopIteration:
# reset generator if a pass through dataset is completed
self.make_iter()
next_sample = next(self.ds_iter)
l += len(next_sample["input_ids"])
sample = {k:sample[k]+next_sample[k] for k in next_sample.keys()}
self.rem = {k:v[max_length:] for k,v in sample.items()}
sample = {k:v[:max_length] for k,v in sample.items()}
# regroup to shape [bs x seq_len]
samples = {k:np.array([v[i*self.seq_len:(i+1)*self.seq_len] for i in range(self.bs)]) for k,v in sample.items()}
self.queue.put(make_batch(samples))
self.queue.put(None)
def __iter__(self):
return self
class PrefetchDataloader(multiprocessing.Process):
"Prefetch dataloader for IterableDataset"
def __init__(self, dataset, max_steps, batch_size, sequence_length, prefetch_buffer=1, shuffle=True, shuffle_buffer=1000, seed=0):
super().__init__(daemon=True)
self.max_steps = max_steps
self.bs = batch_size
self.seq_len = sequence_length
self.max_length = batch_size * sequence_length
self.prefetch_buffer = prefetch_buffer
self.shuffle = shuffle
self.shuffle_buffer = shuffle_buffer
self.seed = seed
self.dataset = dataset
self.make_iter()
self.queue = multiprocessing.Queue(prefetch_buffer)
self.rem = defaultdict(list)
self.start()
def make_iter(self):
if self.shuffle:
shuffled_dataset = self.dataset.shuffle(self.shuffle_buffer, seed=self.seed)
self.seed += 1
self.ds_iter = iter(shuffled_dataset)
else:
self.ds_iter = iter(self.dataset)
def __next__(self):
return make_batch(self.queue.get())
def run(self):
i = 0
while True and i < self.max_steps:
# prepair next batch
sample = self.rem.copy()
l = len(sample["input_ids"])
max_length = self.max_length
while l < max_length:
try:
next_sample = next(self.ds_iter)
except StopIteration:
# reset generator if a pass through dataset is completed
self.make_iter()
next_sample = next(self.ds_iter)
l += len(next_sample["input_ids"])
sample = {k:sample[k]+next_sample[k] for k in next_sample.keys()}
self.rem = {k:v[max_length:] for k,v in sample.items()}
sample = {k:v[:max_length] for k,v in sample.items()}
# regroup to shape [bs x seq_len]
samples = {k:np.array([v[i*self.seq_len:(i+1)*self.seq_len] for i in range(self.bs)]) for k,v in sample.items()}
self.queue.put(samples)
self.queue.put(None)
def __iter__(self):
return self | en | 0.868017 | # prepair next batch # reset generator if a pass through dataset is completed # regroup to shape [bs x seq_len] # prepair next batch # reset generator if a pass through dataset is completed # regroup to shape [bs x seq_len] | 2.659495 | 3 |
days/day10.py | seankelly001/adventofcode2020 | 0 | 6617795 | from myutils import files
from itertools import combinations
def main():
print("===== Part 1 =====")
answer = part1()
print("answer: {}".format(answer))
print("===== Part 2 =====")
answer = part2()
print("answer: {}".format(answer))
def part1():
#adapters = files.getNumInputs("../inputs/day10-input.txt")
adapters = files.getNumInputs("../inputs/test.txt")
adapters.sort()
adapters.insert(0, 0)
adapters.append(adapters[-1]+3)
print(adapters)
diff1 = 0
diff3 = 0
for i in range(len(adapters)-1):
first = adapters[i]
second = adapters[i+1]
diff = second - first
if diff == 1:
diff1 += 1
elif diff == 3:
diff3 +=1
else:
raise("Unknown joltage difference: {}".format(diff))
return str(diff1 * diff3)
def part2():
#adapters = files.getNumInputs("../inputs/day10-input.txt")
adapters = files.getNumInputs("../inputs/test.txt")
adapters.sort()
adapters.insert(0, 0)
adapters.append(adapters[-1]+3)
visited_adapters = []
for i in range(len(adapters)):
return ""
if __name__ == "__main__":
main() | from myutils import files
from itertools import combinations
def main():
print("===== Part 1 =====")
answer = part1()
print("answer: {}".format(answer))
print("===== Part 2 =====")
answer = part2()
print("answer: {}".format(answer))
def part1():
#adapters = files.getNumInputs("../inputs/day10-input.txt")
adapters = files.getNumInputs("../inputs/test.txt")
adapters.sort()
adapters.insert(0, 0)
adapters.append(adapters[-1]+3)
print(adapters)
diff1 = 0
diff3 = 0
for i in range(len(adapters)-1):
first = adapters[i]
second = adapters[i+1]
diff = second - first
if diff == 1:
diff1 += 1
elif diff == 3:
diff3 +=1
else:
raise("Unknown joltage difference: {}".format(diff))
return str(diff1 * diff3)
def part2():
#adapters = files.getNumInputs("../inputs/day10-input.txt")
adapters = files.getNumInputs("../inputs/test.txt")
adapters.sort()
adapters.insert(0, 0)
adapters.append(adapters[-1]+3)
visited_adapters = []
for i in range(len(adapters)):
return ""
if __name__ == "__main__":
main() | en | 0.351563 | #adapters = files.getNumInputs("../inputs/day10-input.txt") #adapters = files.getNumInputs("../inputs/day10-input.txt") | 3.297649 | 3 |
portfolio_item/admin.py | sajib1066/django-portfolio-cms | 0 | 6617796 | <reponame>sajib1066/django-portfolio-cms<gh_stars>0
from django.contrib import admin
from .models import (
About,
Service,
Education,
Experience,
Skill,
PortfolioCategory,
Portfolio,
CompletedTask,
ContactDetails
)
admin.site.register(About)
admin.site.register(Service)
admin.site.register(Education)
admin.site.register(Experience)
admin.site.register(Skill)
admin.site.register(PortfolioCategory)
admin.site.register(Portfolio)
admin.site.register(CompletedTask)
admin.site.register(ContactDetails) | from django.contrib import admin
from .models import (
About,
Service,
Education,
Experience,
Skill,
PortfolioCategory,
Portfolio,
CompletedTask,
ContactDetails
)
admin.site.register(About)
admin.site.register(Service)
admin.site.register(Education)
admin.site.register(Experience)
admin.site.register(Skill)
admin.site.register(PortfolioCategory)
admin.site.register(Portfolio)
admin.site.register(CompletedTask)
admin.site.register(ContactDetails) | none | 1 | 1.384683 | 1 | |
CPAC/func_preproc/utils.py | Lawreros/C-PAC | 0 | 6617797 | <reponame>Lawreros/C-PAC
def add_afni_prefix(tpattern):
if tpattern:
if ".txt" in tpattern:
tpattern = "@{0}".format(tpattern)
return tpattern
def nullify(value, function=None):
from traits.trait_base import Undefined
if value is None:
return Undefined
if function:
return function(value)
return value | def add_afni_prefix(tpattern):
if tpattern:
if ".txt" in tpattern:
tpattern = "@{0}".format(tpattern)
return tpattern
def nullify(value, function=None):
from traits.trait_base import Undefined
if value is None:
return Undefined
if function:
return function(value)
return value | none | 1 | 2.582893 | 3 | |
read_geodata.py | dongmeic/RTP | 0 | 6617798 | <reponame>dongmeic/RTP
'''
The script connects and reads data from RLID gepspatial database
'''
import pyodbc
import pandas as pd
cnxn = pyodbc.connect("Driver={SQL Server};"
"Server=rliddb.int.lcog.org,5433;"
"Database=RLIDGeo;"
"Trusted_Connection=yes;")
df = pd.read_sql_query('select * from dbo.BikeFacility', cnxn)
df.head()
list(df)
df.ftype.unique()
df.groupby(['ftype','ftypedes']).size().reset_index().rename(columns={0:'count'})
import geopandas as gpd
from sqlalchemy import create_engine
engine = create_engine(
"mssql+pyodbc:///?odbc_connect="
"Driver%3D%7BODBC+Driver+17+for+SQL+Server%7D%3B"
"Server%3Drliddb.int.lcog.org%2C5433%3B"
"Database%3DRLIDGeo%3B"
"Trusted_Connection%3Dyes%3B"
"ApplicationIntent%3DReadWrite%3B"
"WSID%3Dclwrk4087.int.lcog.org%3B")
sql = '''
SELECT CAST(bike_segid as varchar) AS id, ftype, ftypedes, Shape.STAsBinary() AS geom
FROM dbo.BikeFacility;
'''
BikeFacility = gpd.GeoDataFrame.from_postgis(sql, engine, geom_col='geom' )
import matplotlib.pyplot as plt
import contextily as ctx
MPObd = gpd.read_file("V:/Data/Transportation/MPO_Boundary.shp")
BikeFacility.crs = "EPSG:4152"
fig, ax = plt.subplots(figsize=(14, 12))
BikeFacility.plot(ax=ax, column='ftype', cmap='Set1', legend=True, aspect=1)
MPObd.plot(ax=ax, facecolor="none", edgecolor="black", linestyle='--', aspect=1)
ctx.add_basemap(ax)
plt.title("Bike Facilities in Lane County", fontsize=30, fontname="Palatino Linotype", color="grey")
ax.axis("off")
plt.show() | '''
The script connects and reads data from RLID gepspatial database
'''
import pyodbc
import pandas as pd
cnxn = pyodbc.connect("Driver={SQL Server};"
"Server=rliddb.int.lcog.org,5433;"
"Database=RLIDGeo;"
"Trusted_Connection=yes;")
df = pd.read_sql_query('select * from dbo.BikeFacility', cnxn)
df.head()
list(df)
df.ftype.unique()
df.groupby(['ftype','ftypedes']).size().reset_index().rename(columns={0:'count'})
import geopandas as gpd
from sqlalchemy import create_engine
engine = create_engine(
"mssql+pyodbc:///?odbc_connect="
"Driver%3D%7BODBC+Driver+17+for+SQL+Server%7D%3B"
"Server%3Drliddb.int.lcog.org%2C5433%3B"
"Database%3DRLIDGeo%3B"
"Trusted_Connection%3Dyes%3B"
"ApplicationIntent%3DReadWrite%3B"
"WSID%3Dclwrk4087.int.lcog.org%3B")
sql = '''
SELECT CAST(bike_segid as varchar) AS id, ftype, ftypedes, Shape.STAsBinary() AS geom
FROM dbo.BikeFacility;
'''
BikeFacility = gpd.GeoDataFrame.from_postgis(sql, engine, geom_col='geom' )
import matplotlib.pyplot as plt
import contextily as ctx
MPObd = gpd.read_file("V:/Data/Transportation/MPO_Boundary.shp")
BikeFacility.crs = "EPSG:4152"
fig, ax = plt.subplots(figsize=(14, 12))
BikeFacility.plot(ax=ax, column='ftype', cmap='Set1', legend=True, aspect=1)
MPObd.plot(ax=ax, facecolor="none", edgecolor="black", linestyle='--', aspect=1)
ctx.add_basemap(ax)
plt.title("Bike Facilities in Lane County", fontsize=30, fontname="Palatino Linotype", color="grey")
ax.axis("off")
plt.show() | en | 0.692223 | The script connects and reads data from RLID gepspatial database SELECT CAST(bike_segid as varchar) AS id, ftype, ftypedes, Shape.STAsBinary() AS geom FROM dbo.BikeFacility; | 2.714771 | 3 |
regexy/compile/nfa.py | nitely/regexy | 11 | 6617799 | <filename>regexy/compile/nfa.py
# -*- coding: utf-8 -*-
"""
Tools for creating the NFA states
:private:
"""
import copy
from typing import (
Iterator,
Tuple)
from ..shared.nodes import (
Node,
EOF,
CharNode,
RepetitionRangeNode,
OpNode,
SkipNode,
AssertionNode)
from ..shared import Symbols
__all__ = ['nfa']
def _dup(state: Node, visited: set) -> Node:
"""
Recursively shallow copy state and its connected states
Return the copy of the given state (root)
:param state: the root or state to copy
:param visited: a record of states to avoid cycles
:return: shallow copy of the root state
:private:
"""
assert isinstance(state, Node)
if state in visited:
return state
visited.add(state)
if state is EOF:
return EOF
state_copy = copy.copy(state)
state_copy.out = [
_dup(s, visited)
for s in state_copy.out]
return state_copy
def dup(state: Node) -> Node:
return _dup(state=state, visited=set())
def rep_range_fixed(node, state):
assert node.start > 0
first = dup(state)
curr = first
for _ in range(node.start - 1):
new_state = dup(state)
combine(curr, new_state)
curr = new_state
return first
def rep_range_no_end(node, state):
assert node.end is None
new_state = dup(state)
zero_or_more = OpNode(
char=Symbols.ZERO_OR_MORE,
out=[new_state, EOF])
if node.is_greedy:
zero_or_more.out.reverse()
combine(new_state, zero_or_more)
return zero_or_more
def rep_range_with_end(node, state):
assert node.start < node.end
zero_or_one = OpNode(
char=Symbols.ZERO_OR_ONE,
out=[dup(state), EOF])
if zero_or_one.is_greedy:
zero_or_one.out.reverse()
curr = zero_or_one
for _ in range(node.start, node.end - 1):
zero_or_one_ = OpNode(
char=Symbols.ZERO_OR_ONE,
out=[dup(state), EOF])
if zero_or_one_.is_greedy:
zero_or_one_.out.reverse()
combine(curr, zero_or_one_)
curr = zero_or_one_
return zero_or_one
def _combine(origin_state: Node, target_state: Node, visited: set) -> None:
"""
Set all state ends to the target state
We could keep track of node ends instead\
of iterating all of them on every combination.\
But it is a worthless optimization since\
the resulting NFAs can be cached
:param origin_state: the root of the state\
that will point to the target
:param target_state: the state the origin will point at
:param visited: for caching the visited nodes\
and breaking the cycle
:private:
"""
assert isinstance(origin_state, Node)
assert isinstance(target_state, Node)
if origin_state in visited:
return
visited.add(origin_state)
for i, state in enumerate(origin_state.out):
if state is EOF:
origin_state.out[i] = target_state
else:
_combine(state, target_state, visited)
def combine(origin_state: Node, target_state: Node) -> None:
_combine(origin_state, target_state, visited=set())
def nfa(nodes: Iterator[Node]) -> Node:
"""
Converts a sequence of nodes into a NFA\
ready to be matched against a string
A NFA is a graph where each node is a state
This creates the connections for every state.\
EOF is temporarily placed on latest created state ends\
and replaced by a connection to other state later,\
so leaf states are the only states containing an EOF\
in the resulting NFA
Repetition range operators are expanded (i.e: a{1,} -> aa*)
:param nodes: an iterator of nodes\
to be converted into a NFA
:return: the NFA first state
:private:
"""
states = []
nodes = tuple(nodes) # type: Tuple[Node]
if not nodes:
return SkipNode(out=[EOF])
for node in nodes:
if isinstance(node, (CharNode, AssertionNode)):
node.out = [EOF]
states.append(node)
continue
if node.char == Symbols.JOINER:
state_b = states.pop()
state_a = states.pop()
combine(state_a, state_b)
states.append(state_a)
continue
if node.char == Symbols.OR:
state_b = states.pop()
state_a = states.pop()
node.out = [state_a, state_b]
states.append(node)
continue
if node.char == Symbols.ZERO_OR_MORE:
state = states.pop()
node.out = [state, EOF]
if node.is_greedy:
node.out.reverse()
combine(state, node)
states.append(node)
continue
if node.char == Symbols.ONE_OR_MORE:
state = states.pop()
node.out = [state, EOF]
if node.is_greedy:
node.out.reverse()
combine(state, node)
states.append(state)
continue
if node.char == Symbols.ZERO_OR_ONE:
state = states.pop()
node.out = [state, EOF]
if node.is_greedy:
node.out.reverse()
states.append(node)
continue
if node.char == Symbols.GROUP_START:
state = states.pop()
node.out = [state]
states.append(node)
continue
if node.char == Symbols.GROUP_END:
state = states.pop()
node.out = [EOF]
combine(state, node)
states.append(state)
continue
if node.char == Symbols.REPETITION_RANGE:
assert isinstance(node, RepetitionRangeNode)
state = states.pop()
first = None
if node.start > 0:
first = rep_range_fixed(node, state)
if node.start == node.end:
states.append(first or SkipNode(out=[EOF]))
continue
if node.end is None:
end = rep_range_no_end(node, state)
else:
end = rep_range_with_end(node, state)
if first:
combine(first, end)
states.append(first or end)
continue
assert False, 'Unhandled node: %s' % repr(node)
assert len(states) == 1
return states[0]
| <filename>regexy/compile/nfa.py
# -*- coding: utf-8 -*-
"""
Tools for creating the NFA states
:private:
"""
import copy
from typing import (
Iterator,
Tuple)
from ..shared.nodes import (
Node,
EOF,
CharNode,
RepetitionRangeNode,
OpNode,
SkipNode,
AssertionNode)
from ..shared import Symbols
__all__ = ['nfa']
def _dup(state: Node, visited: set) -> Node:
"""
Recursively shallow copy state and its connected states
Return the copy of the given state (root)
:param state: the root or state to copy
:param visited: a record of states to avoid cycles
:return: shallow copy of the root state
:private:
"""
assert isinstance(state, Node)
if state in visited:
return state
visited.add(state)
if state is EOF:
return EOF
state_copy = copy.copy(state)
state_copy.out = [
_dup(s, visited)
for s in state_copy.out]
return state_copy
def dup(state: Node) -> Node:
return _dup(state=state, visited=set())
def rep_range_fixed(node, state):
assert node.start > 0
first = dup(state)
curr = first
for _ in range(node.start - 1):
new_state = dup(state)
combine(curr, new_state)
curr = new_state
return first
def rep_range_no_end(node, state):
assert node.end is None
new_state = dup(state)
zero_or_more = OpNode(
char=Symbols.ZERO_OR_MORE,
out=[new_state, EOF])
if node.is_greedy:
zero_or_more.out.reverse()
combine(new_state, zero_or_more)
return zero_or_more
def rep_range_with_end(node, state):
assert node.start < node.end
zero_or_one = OpNode(
char=Symbols.ZERO_OR_ONE,
out=[dup(state), EOF])
if zero_or_one.is_greedy:
zero_or_one.out.reverse()
curr = zero_or_one
for _ in range(node.start, node.end - 1):
zero_or_one_ = OpNode(
char=Symbols.ZERO_OR_ONE,
out=[dup(state), EOF])
if zero_or_one_.is_greedy:
zero_or_one_.out.reverse()
combine(curr, zero_or_one_)
curr = zero_or_one_
return zero_or_one
def _combine(origin_state: Node, target_state: Node, visited: set) -> None:
"""
Set all state ends to the target state
We could keep track of node ends instead\
of iterating all of them on every combination.\
But it is a worthless optimization since\
the resulting NFAs can be cached
:param origin_state: the root of the state\
that will point to the target
:param target_state: the state the origin will point at
:param visited: for caching the visited nodes\
and breaking the cycle
:private:
"""
assert isinstance(origin_state, Node)
assert isinstance(target_state, Node)
if origin_state in visited:
return
visited.add(origin_state)
for i, state in enumerate(origin_state.out):
if state is EOF:
origin_state.out[i] = target_state
else:
_combine(state, target_state, visited)
def combine(origin_state: Node, target_state: Node) -> None:
_combine(origin_state, target_state, visited=set())
def nfa(nodes: Iterator[Node]) -> Node:
"""
Converts a sequence of nodes into a NFA\
ready to be matched against a string
A NFA is a graph where each node is a state
This creates the connections for every state.\
EOF is temporarily placed on latest created state ends\
and replaced by a connection to other state later,\
so leaf states are the only states containing an EOF\
in the resulting NFA
Repetition range operators are expanded (i.e: a{1,} -> aa*)
:param nodes: an iterator of nodes\
to be converted into a NFA
:return: the NFA first state
:private:
"""
states = []
nodes = tuple(nodes) # type: Tuple[Node]
if not nodes:
return SkipNode(out=[EOF])
for node in nodes:
if isinstance(node, (CharNode, AssertionNode)):
node.out = [EOF]
states.append(node)
continue
if node.char == Symbols.JOINER:
state_b = states.pop()
state_a = states.pop()
combine(state_a, state_b)
states.append(state_a)
continue
if node.char == Symbols.OR:
state_b = states.pop()
state_a = states.pop()
node.out = [state_a, state_b]
states.append(node)
continue
if node.char == Symbols.ZERO_OR_MORE:
state = states.pop()
node.out = [state, EOF]
if node.is_greedy:
node.out.reverse()
combine(state, node)
states.append(node)
continue
if node.char == Symbols.ONE_OR_MORE:
state = states.pop()
node.out = [state, EOF]
if node.is_greedy:
node.out.reverse()
combine(state, node)
states.append(state)
continue
if node.char == Symbols.ZERO_OR_ONE:
state = states.pop()
node.out = [state, EOF]
if node.is_greedy:
node.out.reverse()
states.append(node)
continue
if node.char == Symbols.GROUP_START:
state = states.pop()
node.out = [state]
states.append(node)
continue
if node.char == Symbols.GROUP_END:
state = states.pop()
node.out = [EOF]
combine(state, node)
states.append(state)
continue
if node.char == Symbols.REPETITION_RANGE:
assert isinstance(node, RepetitionRangeNode)
state = states.pop()
first = None
if node.start > 0:
first = rep_range_fixed(node, state)
if node.start == node.end:
states.append(first or SkipNode(out=[EOF]))
continue
if node.end is None:
end = rep_range_no_end(node, state)
else:
end = rep_range_with_end(node, state)
if first:
combine(first, end)
states.append(first or end)
continue
assert False, 'Unhandled node: %s' % repr(node)
assert len(states) == 1
return states[0]
| en | 0.894296 | # -*- coding: utf-8 -*- Tools for creating the NFA states :private: Recursively shallow copy state and its connected states Return the copy of the given state (root) :param state: the root or state to copy :param visited: a record of states to avoid cycles :return: shallow copy of the root state :private: Set all state ends to the target state We could keep track of node ends instead\ of iterating all of them on every combination.\ But it is a worthless optimization since\ the resulting NFAs can be cached :param origin_state: the root of the state\ that will point to the target :param target_state: the state the origin will point at :param visited: for caching the visited nodes\ and breaking the cycle :private: Converts a sequence of nodes into a NFA\ ready to be matched against a string A NFA is a graph where each node is a state This creates the connections for every state.\ EOF is temporarily placed on latest created state ends\ and replaced by a connection to other state later,\ so leaf states are the only states containing an EOF\ in the resulting NFA Repetition range operators are expanded (i.e: a{1,} -> aa*) :param nodes: an iterator of nodes\ to be converted into a NFA :return: the NFA first state :private: # type: Tuple[Node] | 2.800963 | 3 |
setup.py | Wigder/fightin_words | 6 | 6617800 | from setuptools import setup
setup(name="fightin_words",
version="1.0.3",
description="Implementation of Monroe et al., 2008.",
url="https://github.com/Wigder/fightin_words",
author="<NAME>/<NAME>",
author_email="<EMAIL>",
python_requires=">=3",
license="MIT",
packages=["fightin_words"],
install_requires=[
"scikit-learn", "numpy"
],
zip_safe=False)
| from setuptools import setup
setup(name="fightin_words",
version="1.0.3",
description="Implementation of Monroe et al., 2008.",
url="https://github.com/Wigder/fightin_words",
author="<NAME>/<NAME>",
author_email="<EMAIL>",
python_requires=">=3",
license="MIT",
packages=["fightin_words"],
install_requires=[
"scikit-learn", "numpy"
],
zip_safe=False)
| none | 1 | 1.102613 | 1 | |
intro/part02-17_countdown/src/countdown.py | Hannah-Abi/python-pro-21 | 0 | 6617801 | number = 5
print("Countdown!")
while True:
print(number)
number = number - 1
if number < 1:
break
print("Now!")
| number = 5
print("Countdown!")
while True:
print(number)
number = number - 1
if number < 1:
break
print("Now!")
| none | 1 | 4.067938 | 4 | |
ref_src/2012-DEMON/test_networkx.py | GraphProcessor/LocalityBasedGraphAlgo | 4 | 6617802 | <filename>ref_src/2012-DEMON/test_networkx.py
import networkx as nx
g = nx.Graph()
fin = open("/home/cheyulin/gitrepos/SocialNetworkAnalysis/Codes-Yche/karate_edges_input.csv")
for l in fin:
l = l.rstrip().split(" ")
print l[0] + ',' + l[1];
g.add_edge(l[0], l[1])
print '\n'
# Iterate Through Vertices
for node in nx.nodes(g):
print node
# print len(nx.nodes(g))
print 'nodes_num:' + str(g.number_of_nodes())
| <filename>ref_src/2012-DEMON/test_networkx.py
import networkx as nx
g = nx.Graph()
fin = open("/home/cheyulin/gitrepos/SocialNetworkAnalysis/Codes-Yche/karate_edges_input.csv")
for l in fin:
l = l.rstrip().split(" ")
print l[0] + ',' + l[1];
g.add_edge(l[0], l[1])
print '\n'
# Iterate Through Vertices
for node in nx.nodes(g):
print node
# print len(nx.nodes(g))
print 'nodes_num:' + str(g.number_of_nodes())
| en | 0.20492 | # Iterate Through Vertices # print len(nx.nodes(g)) | 2.846151 | 3 |
scripts/mali_remove_gaps.py | CGATOxford/Optic | 0 | 6617803 | <reponame>CGATOxford/Optic
'''
mali_remove_gaps.py -
======================================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python mali_remove_gaps.py --help
Type::
python mali_remove_gaps.py --help
for command line help.
Command line options
--------------------
'''
import sys
import string
import re
import getopt
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.MaliIO as MaliIO
USAGE = """python %s [OPTIONS] < exonerate_output > filtered
Prune a nucelotide multiple alignment according to a master sequence.
1. Go in codon steps through the multiple alignment according
to the master sequence.
2. Remove all columns in other sequences, that
1. fall out of frame
2. are incomplete codons
Version = $Id: mali_remove_gaps.py 2782 2009-09-10 11:40:29Z andreas $
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-o, --file-output output
""" % sys.argv[0]
param_long_options = ["verbose=", "help", "file-output=", "version"]
param_short_options = "v:hm:e:p:c"
param_loglevel = 1
param_gap_char = "-"
param_mask_char = "x"
param_filename_output = None
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-o", "--file-output"):
param_filename_output = a
# 1. read multiple alignment in fasta format
mali, identifiers = MaliIO.readFasta(sys.stdin)
if param_loglevel >= 1:
print "# read mali with %i entries." % len(identifiers)
print E.GetHeader()
print E.GetParams()
# 1. remove gaps in multiple alignment
mali = MaliIO.removeGaps(mali)
if param_master:
frame_columns = GetFrameColumns(mali, param_master)
elif param_master_pattern:
columns = []
for id in identifiers:
if re.search(param_master_pattern, id):
columns += GetFrameColumns(mali, id)
if len(columns) == 0:
columns += GetFrameColumns(mali, identifiers[0])
# sort all columns by tuple. The "shortest" codon will be first (1,2,3)
# before (1,2,100)
columns.sort()
# select codons
frame_columns = []
last_codon = columns[0]
for codon in columns[1:]:
# skip identical codons
if codon == last_codon:
continue
# take first (shortest) codon in case of identical first residue
if codon[0] == last_codon[0]:
continue
# if not overlapping, keep
if codon[0] > last_codon[2]:
frame_columns.append(last_codon)
# if overlapping, but out of register: skip
last_codon = codon
frame_columns.append(last_codon)
# translate characters to upper/lower case according to exon info.
if exons:
for id in mali:
if id in exons:
mali[id] = AddExonInformation(
mali[id], exons[id], mask_char=param_mask_char)
if param_loglevel >= 1:
print "# found %i columns" % (len(frame_columns))
mask_chars = (string.upper(param_mask_char), string.lower(param_mask_char))
for id in mali.keys():
sequence = mali[id]
fragments = []
nstops, ncodons, naligned = 0, 0, 0
for a, b, c in frame_columns:
codon = sequence[a] + sequence[b] + sequence[c]
codon_is_aligned = False
codon_is_ok = True
for x in codon:
# a codon will be masked, if it either
# 1. contains a gap character
# 2. is an unaligned character, i.e.,
# exons and masked, or no exons and lowerwase
residue_is_unaligned = (x == param_gap_char) or \
(not exons and x in string.lowercase) or \
(exons and x in mask_chars)
codon_is_aligned = codon_is_aligned or not residue_is_unaligned
codon_is_ok = codon_is_ok and not residue_is_unaligned
if codon_is_aligned:
naligned += 1
if codon_is_ok:
ncodons += 1
if string.upper(codon) in ("TAG", "TAA", "TGA"):
if param_remove_stops:
fragments.append(param_gap_char * 3)
else:
fragments.append(codon)
nstops += 1
else:
fragments.append(codon)
else:
fragments.append(param_gap_char * 3)
mali[id] = string.join(fragments, "")
if param_loglevel >= 1:
print "# sequence: %s\tpositions: %i\taligned:%i\tcodons: %i\t stops: %i" % (id, len(fragments), naligned, ncodons, nstops)
sys.stdout.flush()
for id in mali.keys():
if param_mark_codons:
a = mali[id]
f = lambda x: a[x:x + 3]
s = string.join([f(x) for x in range(0, len(a), 3)], " ")
else:
s = mali[id]
print ">%s\n%s" % (id, s)
if param_filename_translation:
outfile = open(param_filename_translation, "w")
for id in mali.keys():
outfile.write(">%s\n%s\n" %
(id, Genomics.TranslateDNA2Protein(mali[id])))
outfile.close()
print E.GetFooter()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| '''
mali_remove_gaps.py -
======================================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python mali_remove_gaps.py --help
Type::
python mali_remove_gaps.py --help
for command line help.
Command line options
--------------------
'''
import sys
import string
import re
import getopt
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.MaliIO as MaliIO
USAGE = """python %s [OPTIONS] < exonerate_output > filtered
Prune a nucelotide multiple alignment according to a master sequence.
1. Go in codon steps through the multiple alignment according
to the master sequence.
2. Remove all columns in other sequences, that
1. fall out of frame
2. are incomplete codons
Version = $Id: mali_remove_gaps.py 2782 2009-09-10 11:40:29Z andreas $
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-o, --file-output output
""" % sys.argv[0]
param_long_options = ["verbose=", "help", "file-output=", "version"]
param_short_options = "v:hm:e:p:c"
param_loglevel = 1
param_gap_char = "-"
param_mask_char = "x"
param_filename_output = None
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-o", "--file-output"):
param_filename_output = a
# 1. read multiple alignment in fasta format
mali, identifiers = MaliIO.readFasta(sys.stdin)
if param_loglevel >= 1:
print "# read mali with %i entries." % len(identifiers)
print E.GetHeader()
print E.GetParams()
# 1. remove gaps in multiple alignment
mali = MaliIO.removeGaps(mali)
if param_master:
frame_columns = GetFrameColumns(mali, param_master)
elif param_master_pattern:
columns = []
for id in identifiers:
if re.search(param_master_pattern, id):
columns += GetFrameColumns(mali, id)
if len(columns) == 0:
columns += GetFrameColumns(mali, identifiers[0])
# sort all columns by tuple. The "shortest" codon will be first (1,2,3)
# before (1,2,100)
columns.sort()
# select codons
frame_columns = []
last_codon = columns[0]
for codon in columns[1:]:
# skip identical codons
if codon == last_codon:
continue
# take first (shortest) codon in case of identical first residue
if codon[0] == last_codon[0]:
continue
# if not overlapping, keep
if codon[0] > last_codon[2]:
frame_columns.append(last_codon)
# if overlapping, but out of register: skip
last_codon = codon
frame_columns.append(last_codon)
# translate characters to upper/lower case according to exon info.
if exons:
for id in mali:
if id in exons:
mali[id] = AddExonInformation(
mali[id], exons[id], mask_char=param_mask_char)
if param_loglevel >= 1:
print "# found %i columns" % (len(frame_columns))
mask_chars = (string.upper(param_mask_char), string.lower(param_mask_char))
for id in mali.keys():
sequence = mali[id]
fragments = []
nstops, ncodons, naligned = 0, 0, 0
for a, b, c in frame_columns:
codon = sequence[a] + sequence[b] + sequence[c]
codon_is_aligned = False
codon_is_ok = True
for x in codon:
# a codon will be masked, if it either
# 1. contains a gap character
# 2. is an unaligned character, i.e.,
# exons and masked, or no exons and lowerwase
residue_is_unaligned = (x == param_gap_char) or \
(not exons and x in string.lowercase) or \
(exons and x in mask_chars)
codon_is_aligned = codon_is_aligned or not residue_is_unaligned
codon_is_ok = codon_is_ok and not residue_is_unaligned
if codon_is_aligned:
naligned += 1
if codon_is_ok:
ncodons += 1
if string.upper(codon) in ("TAG", "TAA", "TGA"):
if param_remove_stops:
fragments.append(param_gap_char * 3)
else:
fragments.append(codon)
nstops += 1
else:
fragments.append(codon)
else:
fragments.append(param_gap_char * 3)
mali[id] = string.join(fragments, "")
if param_loglevel >= 1:
print "# sequence: %s\tpositions: %i\taligned:%i\tcodons: %i\t stops: %i" % (id, len(fragments), naligned, ncodons, nstops)
sys.stdout.flush()
for id in mali.keys():
if param_mark_codons:
a = mali[id]
f = lambda x: a[x:x + 3]
s = string.join([f(x) for x in range(0, len(a), 3)], " ")
else:
s = mali[id]
print ">%s\n%s" % (id, s)
if param_filename_translation:
outfile = open(param_filename_translation, "w")
for id in mali.keys():
outfile.write(">%s\n%s\n" %
(id, Genomics.TranslateDNA2Protein(mali[id])))
outfile.close()
print E.GetFooter()
if __name__ == "__main__":
sys.exit(main(sys.argv)) | en | 0.661786 | mali_remove_gaps.py - ====================================================== :Author: <NAME> :Release: $Id$ :Date: |today| :Tags: Python Purpose ------- .. todo:: describe purpose of the script. Usage ----- Example:: python mali_remove_gaps.py --help Type:: python mali_remove_gaps.py --help for command line help. Command line options -------------------- python %s [OPTIONS] < exonerate_output > filtered Prune a nucelotide multiple alignment according to a master sequence. 1. Go in codon steps through the multiple alignment according to the master sequence. 2. Remove all columns in other sequences, that 1. fall out of frame 2. are incomplete codons Version = $Id: mali_remove_gaps.py 2782 2009-09-10 11:40:29Z andreas $ Options: -h, --help print this message. -v, --verbose= loglevel. -o, --file-output output script main. parses command line options in sys.argv, unless *argv* is given. # 1. read multiple alignment in fasta format # 1. remove gaps in multiple alignment # sort all columns by tuple. The "shortest" codon will be first (1,2,3) # before (1,2,100) # select codons # skip identical codons # take first (shortest) codon in case of identical first residue # if not overlapping, keep # if overlapping, but out of register: skip # translate characters to upper/lower case according to exon info. # a codon will be masked, if it either # 1. contains a gap character # 2. is an unaligned character, i.e., # exons and masked, or no exons and lowerwase | 2.73841 | 3 |
docs/examples/open_sdg_simple.py | Defra-Data-Science-Centre-of-Excellence/sdg-build | 7 | 6617804 | <filename>docs/examples/open_sdg_simple.py
"""
This is an example of converting CSV data and YAML metadata into the JSON output
suitable for the Open SDG reporting platform. In contrast to the open_sdg.py
example, this approach uses helper functions along with a YAML configuration
file.
"""
import os
from sdg.open_sdg import open_sdg_build
from sdg.open_sdg import open_sdg_check
# Assumes that this 'open_sdg_config' file exists in the same folder as this one.
# For an example of the possible options, see docs/examples/open_sdg_config.yml.
folder = os.path.dirname(os.path.realpath(__file__))
config = os.path.join(folder, 'open_sdg_config.yml')
# Perhaps we need to alter the data in some way.
def alter_data(df):
df['my_column'] = 'foo'
# Perhaps we need to alter the metadata in some way.
def alter_meta(meta):
meta['my_field'] = 'bar'
# Validate the indicators.
validation_successful = open_sdg_check(config=config, alter_data=alter_data,
alter_meta=alter_meta)
# If everything was valid, perform the build.
if validation_successful:
open_sdg_build(config=config, alter_data=alter_data, alter_meta=alter_meta)
else:
raise Exception('There were validation errors. See output above.')
| <filename>docs/examples/open_sdg_simple.py
"""
This is an example of converting CSV data and YAML metadata into the JSON output
suitable for the Open SDG reporting platform. In contrast to the open_sdg.py
example, this approach uses helper functions along with a YAML configuration
file.
"""
import os
from sdg.open_sdg import open_sdg_build
from sdg.open_sdg import open_sdg_check
# Assumes that this 'open_sdg_config' file exists in the same folder as this one.
# For an example of the possible options, see docs/examples/open_sdg_config.yml.
folder = os.path.dirname(os.path.realpath(__file__))
config = os.path.join(folder, 'open_sdg_config.yml')
# Perhaps we need to alter the data in some way.
def alter_data(df):
df['my_column'] = 'foo'
# Perhaps we need to alter the metadata in some way.
def alter_meta(meta):
meta['my_field'] = 'bar'
# Validate the indicators.
validation_successful = open_sdg_check(config=config, alter_data=alter_data,
alter_meta=alter_meta)
# If everything was valid, perform the build.
if validation_successful:
open_sdg_build(config=config, alter_data=alter_data, alter_meta=alter_meta)
else:
raise Exception('There were validation errors. See output above.')
| en | 0.891923 | This is an example of converting CSV data and YAML metadata into the JSON output suitable for the Open SDG reporting platform. In contrast to the open_sdg.py example, this approach uses helper functions along with a YAML configuration file. # Assumes that this 'open_sdg_config' file exists in the same folder as this one. # For an example of the possible options, see docs/examples/open_sdg_config.yml. # Perhaps we need to alter the data in some way. # Perhaps we need to alter the metadata in some way. # Validate the indicators. # If everything was valid, perform the build. | 2.877532 | 3 |
src/hive-examples/python-examples/pyhs2/error.py | peanut-chenzhong/huaweicloud-mrs-example | 200 | 6617805 | class Pyhs2Exception(Exception):
def __init__(self, errorCode, errorMessage):
self.errorCode = errorCode
self.errorMessage = errorMessage
def __str__(self):
return repr(self.errorMessage) | class Pyhs2Exception(Exception):
def __init__(self, errorCode, errorMessage):
self.errorCode = errorCode
self.errorMessage = errorMessage
def __str__(self):
return repr(self.errorMessage) | none | 1 | 2.770726 | 3 | |
Older versions/Viscotester_v2.1.py | ThiagoDiasV/Viscotester | 6 | 6617806 | '''
Viscotester: a Python script to process data from a viscosimeter
Visco Tester 6L Haake.
The documentation is in English but the program is used in a Brazilian
laboratory, so the language of the prints is Portuguese-BR.
This program is made specifically for Visco Tester 6L Haake and Windows OS.
A viscosimeter is a equipment used to measure the viscosity of liquids and
fluids. The equipment use tools named spindles. The spindle is immersed
in the substance that will be evaluated and is rotated at different
rotations.
The output of equipment are the rotation per minute (RPM) parameter,
the viscosity (cP) and the torque (%) value. The torque value is calculated
based on the speed and the geometry of the spindle.
'''
# Imports
import re
from collections import OrderedDict
from os import startfile, path
from statistics import mean, stdev
from time import sleep
import colorama
from colorama import Fore, Style
import serial
import xlsxwriter
import datetime
from math import log10
colorama.init(autoreset=True, convert=True)
def initial_menu():
'''
Prints an initial menu at the screen.
'''
print(Fore.GREEN + '-' * 90)
print(Fore.BLUE + '#' * 37 + Fore.CYAN + ' VISCOTESTER 6L '
+ Style.RESET_ALL + Fore.BLUE + '#' * 37)
print(Fore.BLUE + '#' * 35 + Fore.CYAN + ' INSTRUÇÕES DE USO '
+ Style.RESET_ALL + Fore.BLUE + '#' * 36)
print(Fore.GREEN + '-' * 90)
print('1 - Ligue o aparelho e realize o ' + Fore.BLUE + 'AUTO TEST',
'pressionando a tecla ' + Fore.GREEN + 'START')
print('2 - Observe se não há nenhum fuso acoplado ao aparelho antes de '
'pressionar ' + Fore.GREEN + 'START')
print('3 - Aguarde o ' + Fore.BLUE + 'AUTO TEST ' + Style.RESET_ALL +
'ser finalizado e em seguida pressione ' + Fore.GREEN + 'START')
print('4 - Adicione o fuso correto e selecione o fuso correto no aparelho '
'pressionando ' + Fore.YELLOW + 'ENTER')
print('5 - Selecione a RPM desejada e pressione ' + Fore.YELLOW + 'ENTER')
print('6 - Observe se o fuso correto está acoplado ao aparelho e '
'pressione ' + Fore.GREEN + 'START')
print(Fore.GREEN + '-' * 90)
print(Fore.BLUE + '#' * 90)
print(Fore.BLUE + '#' * 90)
print(Fore.GREEN + '-' * 90)
def final_menu():
'''
Prints some informations if the maximum torque is obtained from the
Viscotester and require the user to press STOP on the equipment.
'''
print('Torque máximo atingido')
print('Leituras não são mais possíveis de serem feitas')
print('Pressione ' + Fore.RED + 'STOP' + Style.RESET_ALL +
' no aparelho e ' + Fore.GREEN + 'aguarde')
def regex_name_validation(name):
'''
Does a validation on sample name and worksheet name using regex
to avoid errors on the file that will be created.
The input is the name that the user typed to the program.
The function repeats the requirement of the name if the user used
forbidden characters (like \\/|<>*:?").
Returns the name that will be used.
'''
regexp = re.compile(r'[\\/|<>*:?\"[\]]')
while regexp.search(name):
print(Fore.RED + 'Você digitou um caractere não permitido '
'para nome de arquivo ou de planilha.')
print(Fore.RED + 'Saiba que você não pode usar nenhum dos '
'caracteres abaixo: ')
print(Fore.RED + r' [ ] \ / | < > * : " ?')
name = str(input('Digite novamente um nome para a amostra '
'sem caracteres proibidos: '))
return name
def file_name_function():
'''
Require the name of the sample to put on the xlsx filename.
The regex_name_validation() function is used here to avoid errors.
'''
file_name = str(input('Digite um nome para o arquivo (.xlsx) '
'que será gerado: ')).strip()
file_name = regex_name_validation(file_name)
return file_name
def serial_object_creator(time_set):
'''
At each rotation of the equipment this function creates a serial object.
This is important because at each rotation the timeout to close serial
port should change. This occurs because the time to break the while loop
is dependent of the rotation of equipment.
The time to closing port responsibility is assigned to 'time_set'
variable.
The data of serial port will be assigned to 'ser' variable.
The class serial.Serial receive 'COM1' as port parameter because this
program is used on Windows OS. Baudrate parameter is 9600 and timeout
parameter is equal to 'time_set' variable. The variable 'time_set' is
defined in timer_for_closing_port() function below.
Of 'serial_object', index [3] is the RPM value, index [5] is the torque
value and the index [7] is the viscosity (cP) value.
'''
ser = serial.Serial('COM1', 9600, timeout=time_set)
serial_object = ser.readline().split()
return serial_object
def timer_for_closing_port(serial_object):
'''
Defines a new time for closing serial port. This times depends on the
rotation per minute parameter of equipment.
The possible values for rotation per minute parameter of the equipment
are: 0.3, 0.5, 0.6, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 10, 12, 20, 30, 50, 60,
100 and 200 RPMs.
When the rotation per minute (RPM) parameter of equipment is lower than
6 RPMs, the 'time_for_closing' value is defined by the 'if' statement
below.
If the value of RPM is above 6 and below 100, 'time_for_closing' value
is defined by the 'elif' statement. Finally, if the RPM value is 100
or 200 RPMs, 'time_for_closing' value is defined by 'else' statement.
These differences on calculation of 'time_for_closing' variable occurs
because this variable is responsible to finish the loop that controls
the program, and at high rotations the probability of errors increase.
The 'float(object[3])' value below is the RPM parameter. 'float' function
is necessary because the equipment send to the computer bytes literals.
'''
rpm_value = float(serial_object[3])
if rpm_value <= 6:
time_for_closing = 2.5*(60/rpm_value)
elif rpm_value < 100:
time_for_closing = 3.5*(60/rpm_value)
else:
time_for_closing = 25*(60/rpm_value)
return time_for_closing
def torque_validator(serial_object):
'''
Returns a boolean value that depends on the torque of equipment.
'''
cp_value = serial_object[7]
if cp_value == b'off':
return False
else:
return True
def readings_printer(serial_object):
'''
Prints the results of the equipment readings at the screen.
As said before, the indexes 3, 5 and 7 represents the RPM
values, the torque values and the cP values respectively.
'''
rpm_value, cp_value, torque_value = (
float(serial_object[3]),
int(serial_object[7]),
float(serial_object[5])
)
print(f' RPM: {rpm_value:.>20} /// cP: {cp_value:.>20} '
f'/// Torque: {torque_value:.>20}%')
def values_storager(serial_object):
'''
Storages the readings inside a dict named 'registers'.
The keys are the RPM values. The values are two lists, the first
list receives the cP values and the second list receives the torque
values. Each key have two lists representing cP and torque values.
The 'object' parameter is the serial_object of serial_object_creator()
function.
The return is the dict registers with new values.
'''
rpm_value, cp_value, torque_value = (
float(serial_object[3]),
int(serial_object[7]),
float(serial_object[5])
)
if rpm_value not in registers.keys():
registers[rpm_value] = [[cp_value], [torque_value]]
elif rpm_value in registers.keys():
registers[rpm_value][0].append(cp_value)
registers[rpm_value][1].append(torque_value)
return registers
def data_processor(**registers):
'''
Processes the data of registers dict to delete outliers.
The cutoff parameter are (average - standard deviation) and
(average + standard deviation).
A for loop perform iteration on values of registers dict and exclude
outliers.
'''
for value in registers.values():
if len(value[0]) > 1:
mean_value = mean(value[0])
std_value = stdev(value[0])
if std_value != 0:
cp_list = [x for x in value[0] if (x > mean_value - std_value)]
cp_list = [x for x in cp_list if (x < mean_value + std_value)]
value[0] = cp_list
return registers
def logarithm_values_maker(**registers):
'''
Calculates the base-10 logarithm of the processed values.
The dict comprehension below is only to transform RPM values
in float types again, because the **kwargs only accept string
type as keys, and is necessary that RPM values are float type,
not string.
A new list (cp_list) is created to receive the cP values.
A iteration is made on keys of registers dict using for loop
to make a list with two lists inside of it. The first list
will store the base-10 logarithm values of RPM values. The second
list will store the base-10 logarithm values of cP values.
This function returns this logarithm_list.
'''
registers = {float(k): v for k, v in registers.items()}
cp_list = list()
for value in registers.values():
cp_list.append(mean(value[0]))
for key in registers.keys():
logarithm_list = [[log10(k) for k in registers.keys()
if mean(registers[k][0]) != 0],
[log10(v) for v in cp_list if v != 0]]
return logarithm_list
def date_storage():
'''
A function to create a tuple with the today's date.
This date will be in one cell of the workbook that
will be created and in the name of the xlsx file.
'''
date = datetime.date.today()
date_today = (date.day, date.month, date.year)
return date_today
def workbook_maker(file_name):
'''
This function creates a workbook in format .xlsx. and returns it.
The else statement below is because if some user delete the folder
'Viscosidades', the workbooks will be saved on Desktop.
'''
date_today = date_storage()
if path.isdir('C:/Users/UFC/Desktop/Viscosidades/'):
workbook = xlsxwriter.Workbook(
'C:/Users/UFC/Desktop/Viscosidades/'
f'{file_name}_{date_today[0]:02d}'
f'{date_today[1]:02d}{date_today[2]:04d}'
'.xlsx')
else:
workbook = xlsxwriter.Workbook(
'C:/Users/UFC/Desktop/'
f'{file_name}_{date_today[0]:02d}'
f'{date_today[1]:02d}{date_today[2]:04d}'
'.xlsx')
return workbook
def worksheet_name_function():
'''
This function records the name of each worksheet using the name of the
sample evaluated.
'''
sample_name = str(input('Digite o nome da amostra: ')).strip()
sample_name = regex_name_validation(sample_name)
return sample_name
def worksheet_maker(workbook, worksheet_name, **registers):
'''
This function creates new worksheets inside the created workbook and put
the values in columns.
In each worksheet:
Column 'A' will store the sample name and the date.
Columns 'B', 'C', and 'D' will store all read data (RPM, cP and Torque
values).
Columns 'F', 'G', 'H', and 'I' will store the processed data, without
outliers, respectively: RPM, average cP, standard deviation and relative
standard deviation.
Columns 'K' and 'L' will receive log10 values of processed RPM and cP
values.
Finally, in columns 'M', 'N' and 'O', the cells 'M2', 'N2' and 'O2' will
receive intercept, slope and R squared values of log10 values.
Each worksheet will have two charts, one for processed data and other for
log10 data.
'''
worksheet = workbook.add_worksheet(f'{worksheet_name.replace(" ", "")}')
bold = workbook.add_format({'bold': True})
italic = workbook.add_format({'italic': True})
float_format = workbook.add_format({'num_format': '0.0000'})
mean_format = workbook.add_format({'num_format': '0.00'})
percentage_format = workbook.add_format({'num_format': '0.00%'})
worksheet.set_column(0, 15, 16)
worksheet.set_column(4, 4, 25)
worksheet.set_column(9, 9, 20)
worksheet.write('A1', f'{worksheet_name}', bold)
worksheet.write('A2', 'Data', italic)
date_today = date_storage()
worksheet.write('A3',
f'{date_today[0]:02d}/{date_today[1]:02d}/'
f'{date_today[2]:04d}')
worksheet.write('B1', 'RPM', bold)
worksheet.write('C1', 'cP', bold)
worksheet.write('D1', 'Torque(%)', bold)
worksheet.write('E1', 'Processamento dos dados >>', bold)
worksheet.write('F1', 'RPM', bold)
worksheet.write('G1', 'Médias: cP', bold)
worksheet.write('H1', 'Desvio padrão: cP', bold)
worksheet.write('I1', 'DP (%): cP', bold)
worksheet.write('J1', 'Escala logarítmica >>', bold)
worksheet.write('K1', 'RPM Log10', bold)
worksheet.write('L1', 'cP Log10', bold)
worksheet.write('M1', 'Intercepto', bold)
worksheet.write('N1', 'Inclinação', bold)
worksheet.write('O1', 'R²', bold)
# The for loop below puts the read values inside .xlsx cells.
# RPM, cP and torque values will be stored on cols 1, 2 and 3.
row = 1
col = 1
for key, value in registers.items():
for cp in value[0]:
worksheet.write(row, col, float(key))
worksheet.write(row, col + 1, cp)
row += 1
row -= len(value[0])
for torque in value[1]:
worksheet.write(row, col + 2, torque)
row += 1
processed_registers = data_processor(**registers)
# The for loop below puts the processed values inside .xlsx cells.
# RPM, mean(cP), stdev and stdev% will be stored on cols 5, 6, 7 and 8.
row = col = 1
for key, value in processed_registers.items():
if mean(value[0]) != 0:
worksheet.write(row, col + 4, float(key))
if len(value[0]) > 1:
worksheet.write(row, col + 5, mean(value[0]), mean_format)
worksheet.write(row, col + 6, stdev(value[0]), float_format)
worksheet.write(row, col + 7,
(stdev(value[0])/(mean(value[0]))),
percentage_format)
else:
worksheet.write(row, col + 5, value[0][0], mean_format)
worksheet.write(row, col + 6, 0)
worksheet.write(row, col + 7, 0)
row += 1
log_list = logarithm_values_maker(**processed_registers)
# write_column() function below puts the log10 values inside .xlsx cells.
worksheet.write_column('K2', log_list[0], float_format)
worksheet.write_column('L2', log_list[1], float_format)
worksheet.write_array_formula(
'M2:M2', '{=INTERCEPT(L2:L20, K2:K20)}',
float_format
)
worksheet.write_array_formula(
'N2:N2', '{=SLOPE(L2:L20, K2:K20)}',
float_format
)
worksheet.write_array_formula(
'O2:O2', '{=RSQ(K2:K20, L2:L20)}',
float_format
)
chart_1 = workbook.add_chart({'type': 'scatter', 'subtype': 'straight'})
chart_1.add_series({
'categories': f'={worksheet_name.replace(" ", "")}'
f'!$F2$:$F${len(processed_registers.keys()) + 1}',
'values': f'={worksheet_name.replace(" ", "")}'
f'!$G$2:$G${len(processed_registers.values()) + 1}',
'line': {'color': 'green'}
})
chart_1.set_title({'name': f'{worksheet_name}'})
chart_1.set_x_axis({
'name': 'RPM',
'name_font': {'size': 14, 'bold': True},
})
chart_1.set_y_axis({
'name': 'cP',
'name_font': {'size': 14, 'bold': True},
})
chart_1.set_size({
'width': 500,
'height': 400
})
worksheet.insert_chart(row + 2, 5, chart_1)
chart_2 = workbook.add_chart({'type': 'scatter', 'subtype': 'straight'})
chart_2.add_series({
'categories': f'={worksheet_name.replace(" ", "")}'
f'!$K$2:$K${len(processed_registers.keys()) + 1}',
'values': f'={worksheet_name.replace(" ", "")}'
f'!$L$2:$L${len(processed_registers.values()) + 1}',
'line': {'color': 'blue'},
'trendline': {
'type': 'linear',
'display_equation': True,
'display_r_squared': True,
'line': {
'color': 'red',
'width': 1,
'dash_type': 'long_dash',
},
},
})
chart_2.set_title({'name': f'Curva escala log: {worksheet_name}'})
chart_2.set_x_axis({
'name': 'RPM',
'name_font': {'size': 14, 'bold': True},
})
chart_2.set_y_axis({
'name': 'cP',
'name_font': {'size': 14, 'bold': True},
})
chart_2.set_size({
'width': 500,
'height': 400
})
worksheet.insert_chart(row + 2, 10, chart_2)
def workbook_close_function(workbook):
'''
A simple function to close the created workbook.
'''
workbook.close()
def workbook_launcher(workbook):
'''
A simple function to launch the workbook for user to see his results.
'''
date_today = date_storage()
if path.isdir('C:/Users/UFC/Desktop/Viscosidades/'):
startfile('C:/Users/UFC/Desktop/Viscosidades/'
f'{file_name}_{date_today[0]:02d}'
f'{date_today[1]:02d}{date_today[2]:04d}'
'.xlsx')
else:
startfile('C:/Users/UFC/Desktop/'
f'{file_name}_{date_today[0]:02d}'
f'{date_today[1]:02d}{date_today[2]:04d}'
'.xlsx')
# Init.
initial_menu()
file_name = file_name_function()
workbook = workbook_maker(file_name)
repeat_option = ''
regex_repeat = re.compile(r'[NS]')
while repeat_option != 'N':
repeat_option = ''
worksheet_name = worksheet_name_function()
sleep(2.5)
print('Aguarde que em instantes o programa se inicializará.')
sleep(2.5)
print('Ao finalizar suas leituras, pressione ' + Fore.RED + 'STOP '
+ Style.RESET_ALL + 'no aparelho.')
sleep(2.5)
print('Ao pressionar ' + Fore.RED +
'STOP' + Style.RESET_ALL +
', o programa levará alguns segundos para preparar sua planilha. '
'Aguarde.')
registers = dict() # The registered values will be stored in this dict.
time = 300 # First timeout value. Will change after the first rotation.
sleep(5) # Delay the beginning of the script. This helps to avoid errors.
print(Fore.GREEN + '-' * 90)
print(Fore.BLUE + '#' * 40 + Fore.CYAN + ' LEITURAS '
+ Fore.BLUE + '#' * 40)
print(Fore.GREEN + '-' * 90)
while True:
try:
object = serial_object_creator(time)
time = timer_for_closing_port(object)
if torque_validator(object):
if not object:
print('Torque máximo atingido ou erro no aparelho')
else:
readings_printer(object)
registers = values_storager(object)
else:
final_menu()
except KeyboardInterrupt:
print('Programa interrompido por atalho de teclado')
break
except IndexError: # This exception finishes the loop.
print('Foi pressionado ' + Fore.RED + 'STOP'
+ Style.RESET_ALL + ' no aparelho')
registers = dict(OrderedDict(sorted(registers.items())))
break
worksheet_maker(
workbook, worksheet_name,
**{str(k): v for k, v in registers.items()}
)
print('Você quer ler outra amostra?')
print('Responda com "S" para se sim ou "N" para se não.')
print('Se você quiser ler outra amostra, coloque a nova amostra,')
print('retire e limpe o fuso e, após isso,')
print('responda abaixo após pressionar '
+ Fore.GREEN + 'START' + Style.RESET_ALL + ' no aparelho:')
while not regex_repeat.search(repeat_option):
repeat_option = str(input('[S/N]: ')).strip().upper()
if repeat_option == 'S':
print('Pressione ' + Fore.GREEN + 'START')
sleep(5)
workbook_close_function(workbook)
workbook_launcher(workbook)
print(Fore.GREEN + 'OBRIGADO POR USAR O VISCOTESTER 6L SCRIPT')
| '''
Viscotester: a Python script to process data from a viscosimeter
Visco Tester 6L Haake.
The documentation is in English but the program is used in a Brazilian
laboratory, so the language of the prints is Portuguese-BR.
This program is made specifically for Visco Tester 6L Haake and Windows OS.
A viscosimeter is a equipment used to measure the viscosity of liquids and
fluids. The equipment use tools named spindles. The spindle is immersed
in the substance that will be evaluated and is rotated at different
rotations.
The output of equipment are the rotation per minute (RPM) parameter,
the viscosity (cP) and the torque (%) value. The torque value is calculated
based on the speed and the geometry of the spindle.
'''
# Imports
import re
from collections import OrderedDict
from os import startfile, path
from statistics import mean, stdev
from time import sleep
import colorama
from colorama import Fore, Style
import serial
import xlsxwriter
import datetime
from math import log10
colorama.init(autoreset=True, convert=True)
def initial_menu():
'''
Prints an initial menu at the screen.
'''
print(Fore.GREEN + '-' * 90)
print(Fore.BLUE + '#' * 37 + Fore.CYAN + ' VISCOTESTER 6L '
+ Style.RESET_ALL + Fore.BLUE + '#' * 37)
print(Fore.BLUE + '#' * 35 + Fore.CYAN + ' INSTRUÇÕES DE USO '
+ Style.RESET_ALL + Fore.BLUE + '#' * 36)
print(Fore.GREEN + '-' * 90)
print('1 - Ligue o aparelho e realize o ' + Fore.BLUE + 'AUTO TEST',
'pressionando a tecla ' + Fore.GREEN + 'START')
print('2 - Observe se não há nenhum fuso acoplado ao aparelho antes de '
'pressionar ' + Fore.GREEN + 'START')
print('3 - Aguarde o ' + Fore.BLUE + 'AUTO TEST ' + Style.RESET_ALL +
'ser finalizado e em seguida pressione ' + Fore.GREEN + 'START')
print('4 - Adicione o fuso correto e selecione o fuso correto no aparelho '
'pressionando ' + Fore.YELLOW + 'ENTER')
print('5 - Selecione a RPM desejada e pressione ' + Fore.YELLOW + 'ENTER')
print('6 - Observe se o fuso correto está acoplado ao aparelho e '
'pressione ' + Fore.GREEN + 'START')
print(Fore.GREEN + '-' * 90)
print(Fore.BLUE + '#' * 90)
print(Fore.BLUE + '#' * 90)
print(Fore.GREEN + '-' * 90)
def final_menu():
'''
Prints some informations if the maximum torque is obtained from the
Viscotester and require the user to press STOP on the equipment.
'''
print('Torque máximo atingido')
print('Leituras não são mais possíveis de serem feitas')
print('Pressione ' + Fore.RED + 'STOP' + Style.RESET_ALL +
' no aparelho e ' + Fore.GREEN + 'aguarde')
def regex_name_validation(name):
'''
Does a validation on sample name and worksheet name using regex
to avoid errors on the file that will be created.
The input is the name that the user typed to the program.
The function repeats the requirement of the name if the user used
forbidden characters (like \\/|<>*:?").
Returns the name that will be used.
'''
regexp = re.compile(r'[\\/|<>*:?\"[\]]')
while regexp.search(name):
print(Fore.RED + 'Você digitou um caractere não permitido '
'para nome de arquivo ou de planilha.')
print(Fore.RED + 'Saiba que você não pode usar nenhum dos '
'caracteres abaixo: ')
print(Fore.RED + r' [ ] \ / | < > * : " ?')
name = str(input('Digite novamente um nome para a amostra '
'sem caracteres proibidos: '))
return name
def file_name_function():
'''
Require the name of the sample to put on the xlsx filename.
The regex_name_validation() function is used here to avoid errors.
'''
file_name = str(input('Digite um nome para o arquivo (.xlsx) '
'que será gerado: ')).strip()
file_name = regex_name_validation(file_name)
return file_name
def serial_object_creator(time_set):
'''
At each rotation of the equipment this function creates a serial object.
This is important because at each rotation the timeout to close serial
port should change. This occurs because the time to break the while loop
is dependent of the rotation of equipment.
The time to closing port responsibility is assigned to 'time_set'
variable.
The data of serial port will be assigned to 'ser' variable.
The class serial.Serial receive 'COM1' as port parameter because this
program is used on Windows OS. Baudrate parameter is 9600 and timeout
parameter is equal to 'time_set' variable. The variable 'time_set' is
defined in timer_for_closing_port() function below.
Of 'serial_object', index [3] is the RPM value, index [5] is the torque
value and the index [7] is the viscosity (cP) value.
'''
ser = serial.Serial('COM1', 9600, timeout=time_set)
serial_object = ser.readline().split()
return serial_object
def timer_for_closing_port(serial_object):
'''
Defines a new time for closing serial port. This times depends on the
rotation per minute parameter of equipment.
The possible values for rotation per minute parameter of the equipment
are: 0.3, 0.5, 0.6, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 10, 12, 20, 30, 50, 60,
100 and 200 RPMs.
When the rotation per minute (RPM) parameter of equipment is lower than
6 RPMs, the 'time_for_closing' value is defined by the 'if' statement
below.
If the value of RPM is above 6 and below 100, 'time_for_closing' value
is defined by the 'elif' statement. Finally, if the RPM value is 100
or 200 RPMs, 'time_for_closing' value is defined by 'else' statement.
These differences on calculation of 'time_for_closing' variable occurs
because this variable is responsible to finish the loop that controls
the program, and at high rotations the probability of errors increase.
The 'float(object[3])' value below is the RPM parameter. 'float' function
is necessary because the equipment send to the computer bytes literals.
'''
rpm_value = float(serial_object[3])
if rpm_value <= 6:
time_for_closing = 2.5*(60/rpm_value)
elif rpm_value < 100:
time_for_closing = 3.5*(60/rpm_value)
else:
time_for_closing = 25*(60/rpm_value)
return time_for_closing
def torque_validator(serial_object):
'''
Returns a boolean value that depends on the torque of equipment.
'''
cp_value = serial_object[7]
if cp_value == b'off':
return False
else:
return True
def readings_printer(serial_object):
'''
Prints the results of the equipment readings at the screen.
As said before, the indexes 3, 5 and 7 represents the RPM
values, the torque values and the cP values respectively.
'''
rpm_value, cp_value, torque_value = (
float(serial_object[3]),
int(serial_object[7]),
float(serial_object[5])
)
print(f' RPM: {rpm_value:.>20} /// cP: {cp_value:.>20} '
f'/// Torque: {torque_value:.>20}%')
def values_storager(serial_object):
'''
Storages the readings inside a dict named 'registers'.
The keys are the RPM values. The values are two lists, the first
list receives the cP values and the second list receives the torque
values. Each key have two lists representing cP and torque values.
The 'object' parameter is the serial_object of serial_object_creator()
function.
The return is the dict registers with new values.
'''
rpm_value, cp_value, torque_value = (
float(serial_object[3]),
int(serial_object[7]),
float(serial_object[5])
)
if rpm_value not in registers.keys():
registers[rpm_value] = [[cp_value], [torque_value]]
elif rpm_value in registers.keys():
registers[rpm_value][0].append(cp_value)
registers[rpm_value][1].append(torque_value)
return registers
def data_processor(**registers):
'''
Processes the data of registers dict to delete outliers.
The cutoff parameter are (average - standard deviation) and
(average + standard deviation).
A for loop perform iteration on values of registers dict and exclude
outliers.
'''
for value in registers.values():
if len(value[0]) > 1:
mean_value = mean(value[0])
std_value = stdev(value[0])
if std_value != 0:
cp_list = [x for x in value[0] if (x > mean_value - std_value)]
cp_list = [x for x in cp_list if (x < mean_value + std_value)]
value[0] = cp_list
return registers
def logarithm_values_maker(**registers):
'''
Calculates the base-10 logarithm of the processed values.
The dict comprehension below is only to transform RPM values
in float types again, because the **kwargs only accept string
type as keys, and is necessary that RPM values are float type,
not string.
A new list (cp_list) is created to receive the cP values.
A iteration is made on keys of registers dict using for loop
to make a list with two lists inside of it. The first list
will store the base-10 logarithm values of RPM values. The second
list will store the base-10 logarithm values of cP values.
This function returns this logarithm_list.
'''
registers = {float(k): v for k, v in registers.items()}
cp_list = list()
for value in registers.values():
cp_list.append(mean(value[0]))
for key in registers.keys():
logarithm_list = [[log10(k) for k in registers.keys()
if mean(registers[k][0]) != 0],
[log10(v) for v in cp_list if v != 0]]
return logarithm_list
def date_storage():
'''
A function to create a tuple with the today's date.
This date will be in one cell of the workbook that
will be created and in the name of the xlsx file.
'''
date = datetime.date.today()
date_today = (date.day, date.month, date.year)
return date_today
def workbook_maker(file_name):
'''
This function creates a workbook in format .xlsx. and returns it.
The else statement below is because if some user delete the folder
'Viscosidades', the workbooks will be saved on Desktop.
'''
date_today = date_storage()
if path.isdir('C:/Users/UFC/Desktop/Viscosidades/'):
workbook = xlsxwriter.Workbook(
'C:/Users/UFC/Desktop/Viscosidades/'
f'{file_name}_{date_today[0]:02d}'
f'{date_today[1]:02d}{date_today[2]:04d}'
'.xlsx')
else:
workbook = xlsxwriter.Workbook(
'C:/Users/UFC/Desktop/'
f'{file_name}_{date_today[0]:02d}'
f'{date_today[1]:02d}{date_today[2]:04d}'
'.xlsx')
return workbook
def worksheet_name_function():
'''
This function records the name of each worksheet using the name of the
sample evaluated.
'''
sample_name = str(input('Digite o nome da amostra: ')).strip()
sample_name = regex_name_validation(sample_name)
return sample_name
def worksheet_maker(workbook, worksheet_name, **registers):
'''
This function creates new worksheets inside the created workbook and put
the values in columns.
In each worksheet:
Column 'A' will store the sample name and the date.
Columns 'B', 'C', and 'D' will store all read data (RPM, cP and Torque
values).
Columns 'F', 'G', 'H', and 'I' will store the processed data, without
outliers, respectively: RPM, average cP, standard deviation and relative
standard deviation.
Columns 'K' and 'L' will receive log10 values of processed RPM and cP
values.
Finally, in columns 'M', 'N' and 'O', the cells 'M2', 'N2' and 'O2' will
receive intercept, slope and R squared values of log10 values.
Each worksheet will have two charts, one for processed data and other for
log10 data.
'''
worksheet = workbook.add_worksheet(f'{worksheet_name.replace(" ", "")}')
bold = workbook.add_format({'bold': True})
italic = workbook.add_format({'italic': True})
float_format = workbook.add_format({'num_format': '0.0000'})
mean_format = workbook.add_format({'num_format': '0.00'})
percentage_format = workbook.add_format({'num_format': '0.00%'})
worksheet.set_column(0, 15, 16)
worksheet.set_column(4, 4, 25)
worksheet.set_column(9, 9, 20)
worksheet.write('A1', f'{worksheet_name}', bold)
worksheet.write('A2', 'Data', italic)
date_today = date_storage()
worksheet.write('A3',
f'{date_today[0]:02d}/{date_today[1]:02d}/'
f'{date_today[2]:04d}')
worksheet.write('B1', 'RPM', bold)
worksheet.write('C1', 'cP', bold)
worksheet.write('D1', 'Torque(%)', bold)
worksheet.write('E1', 'Processamento dos dados >>', bold)
worksheet.write('F1', 'RPM', bold)
worksheet.write('G1', 'Médias: cP', bold)
worksheet.write('H1', 'Desvio padrão: cP', bold)
worksheet.write('I1', 'DP (%): cP', bold)
worksheet.write('J1', 'Escala logarítmica >>', bold)
worksheet.write('K1', 'RPM Log10', bold)
worksheet.write('L1', 'cP Log10', bold)
worksheet.write('M1', 'Intercepto', bold)
worksheet.write('N1', 'Inclinação', bold)
worksheet.write('O1', 'R²', bold)
# The for loop below puts the read values inside .xlsx cells.
# RPM, cP and torque values will be stored on cols 1, 2 and 3.
row = 1
col = 1
for key, value in registers.items():
for cp in value[0]:
worksheet.write(row, col, float(key))
worksheet.write(row, col + 1, cp)
row += 1
row -= len(value[0])
for torque in value[1]:
worksheet.write(row, col + 2, torque)
row += 1
processed_registers = data_processor(**registers)
# The for loop below puts the processed values inside .xlsx cells.
# RPM, mean(cP), stdev and stdev% will be stored on cols 5, 6, 7 and 8.
row = col = 1
for key, value in processed_registers.items():
if mean(value[0]) != 0:
worksheet.write(row, col + 4, float(key))
if len(value[0]) > 1:
worksheet.write(row, col + 5, mean(value[0]), mean_format)
worksheet.write(row, col + 6, stdev(value[0]), float_format)
worksheet.write(row, col + 7,
(stdev(value[0])/(mean(value[0]))),
percentage_format)
else:
worksheet.write(row, col + 5, value[0][0], mean_format)
worksheet.write(row, col + 6, 0)
worksheet.write(row, col + 7, 0)
row += 1
log_list = logarithm_values_maker(**processed_registers)
# write_column() function below puts the log10 values inside .xlsx cells.
worksheet.write_column('K2', log_list[0], float_format)
worksheet.write_column('L2', log_list[1], float_format)
worksheet.write_array_formula(
'M2:M2', '{=INTERCEPT(L2:L20, K2:K20)}',
float_format
)
worksheet.write_array_formula(
'N2:N2', '{=SLOPE(L2:L20, K2:K20)}',
float_format
)
worksheet.write_array_formula(
'O2:O2', '{=RSQ(K2:K20, L2:L20)}',
float_format
)
chart_1 = workbook.add_chart({'type': 'scatter', 'subtype': 'straight'})
chart_1.add_series({
'categories': f'={worksheet_name.replace(" ", "")}'
f'!$F2$:$F${len(processed_registers.keys()) + 1}',
'values': f'={worksheet_name.replace(" ", "")}'
f'!$G$2:$G${len(processed_registers.values()) + 1}',
'line': {'color': 'green'}
})
chart_1.set_title({'name': f'{worksheet_name}'})
chart_1.set_x_axis({
'name': 'RPM',
'name_font': {'size': 14, 'bold': True},
})
chart_1.set_y_axis({
'name': 'cP',
'name_font': {'size': 14, 'bold': True},
})
chart_1.set_size({
'width': 500,
'height': 400
})
worksheet.insert_chart(row + 2, 5, chart_1)
chart_2 = workbook.add_chart({'type': 'scatter', 'subtype': 'straight'})
chart_2.add_series({
'categories': f'={worksheet_name.replace(" ", "")}'
f'!$K$2:$K${len(processed_registers.keys()) + 1}',
'values': f'={worksheet_name.replace(" ", "")}'
f'!$L$2:$L${len(processed_registers.values()) + 1}',
'line': {'color': 'blue'},
'trendline': {
'type': 'linear',
'display_equation': True,
'display_r_squared': True,
'line': {
'color': 'red',
'width': 1,
'dash_type': 'long_dash',
},
},
})
chart_2.set_title({'name': f'Curva escala log: {worksheet_name}'})
chart_2.set_x_axis({
'name': 'RPM',
'name_font': {'size': 14, 'bold': True},
})
chart_2.set_y_axis({
'name': 'cP',
'name_font': {'size': 14, 'bold': True},
})
chart_2.set_size({
'width': 500,
'height': 400
})
worksheet.insert_chart(row + 2, 10, chart_2)
def workbook_close_function(workbook):
'''
A simple function to close the created workbook.
'''
workbook.close()
def workbook_launcher(workbook):
'''
A simple function to launch the workbook for user to see his results.
'''
date_today = date_storage()
if path.isdir('C:/Users/UFC/Desktop/Viscosidades/'):
startfile('C:/Users/UFC/Desktop/Viscosidades/'
f'{file_name}_{date_today[0]:02d}'
f'{date_today[1]:02d}{date_today[2]:04d}'
'.xlsx')
else:
startfile('C:/Users/UFC/Desktop/'
f'{file_name}_{date_today[0]:02d}'
f'{date_today[1]:02d}{date_today[2]:04d}'
'.xlsx')
# Init.
initial_menu()
file_name = file_name_function()
workbook = workbook_maker(file_name)
repeat_option = ''
regex_repeat = re.compile(r'[NS]')
while repeat_option != 'N':
repeat_option = ''
worksheet_name = worksheet_name_function()
sleep(2.5)
print('Aguarde que em instantes o programa se inicializará.')
sleep(2.5)
print('Ao finalizar suas leituras, pressione ' + Fore.RED + 'STOP '
+ Style.RESET_ALL + 'no aparelho.')
sleep(2.5)
print('Ao pressionar ' + Fore.RED +
'STOP' + Style.RESET_ALL +
', o programa levará alguns segundos para preparar sua planilha. '
'Aguarde.')
registers = dict() # The registered values will be stored in this dict.
time = 300 # First timeout value. Will change after the first rotation.
sleep(5) # Delay the beginning of the script. This helps to avoid errors.
print(Fore.GREEN + '-' * 90)
print(Fore.BLUE + '#' * 40 + Fore.CYAN + ' LEITURAS '
+ Fore.BLUE + '#' * 40)
print(Fore.GREEN + '-' * 90)
while True:
try:
object = serial_object_creator(time)
time = timer_for_closing_port(object)
if torque_validator(object):
if not object:
print('Torque máximo atingido ou erro no aparelho')
else:
readings_printer(object)
registers = values_storager(object)
else:
final_menu()
except KeyboardInterrupt:
print('Programa interrompido por atalho de teclado')
break
except IndexError: # This exception finishes the loop.
print('Foi pressionado ' + Fore.RED + 'STOP'
+ Style.RESET_ALL + ' no aparelho')
registers = dict(OrderedDict(sorted(registers.items())))
break
worksheet_maker(
workbook, worksheet_name,
**{str(k): v for k, v in registers.items()}
)
print('Você quer ler outra amostra?')
print('Responda com "S" para se sim ou "N" para se não.')
print('Se você quiser ler outra amostra, coloque a nova amostra,')
print('retire e limpe o fuso e, após isso,')
print('responda abaixo após pressionar '
+ Fore.GREEN + 'START' + Style.RESET_ALL + ' no aparelho:')
while not regex_repeat.search(repeat_option):
repeat_option = str(input('[S/N]: ')).strip().upper()
if repeat_option == 'S':
print('Pressione ' + Fore.GREEN + 'START')
sleep(5)
workbook_close_function(workbook)
workbook_launcher(workbook)
print(Fore.GREEN + 'OBRIGADO POR USAR O VISCOTESTER 6L SCRIPT')
| en | 0.810419 | Viscotester: a Python script to process data from a viscosimeter Visco Tester 6L Haake. The documentation is in English but the program is used in a Brazilian laboratory, so the language of the prints is Portuguese-BR. This program is made specifically for Visco Tester 6L Haake and Windows OS. A viscosimeter is a equipment used to measure the viscosity of liquids and fluids. The equipment use tools named spindles. The spindle is immersed in the substance that will be evaluated and is rotated at different rotations. The output of equipment are the rotation per minute (RPM) parameter, the viscosity (cP) and the torque (%) value. The torque value is calculated based on the speed and the geometry of the spindle. # Imports Prints an initial menu at the screen. Prints some informations if the maximum torque is obtained from the Viscotester and require the user to press STOP on the equipment. Does a validation on sample name and worksheet name using regex to avoid errors on the file that will be created. The input is the name that the user typed to the program. The function repeats the requirement of the name if the user used forbidden characters (like \\/|<>*:?"). Returns the name that will be used. Require the name of the sample to put on the xlsx filename. The regex_name_validation() function is used here to avoid errors. At each rotation of the equipment this function creates a serial object. This is important because at each rotation the timeout to close serial port should change. This occurs because the time to break the while loop is dependent of the rotation of equipment. The time to closing port responsibility is assigned to 'time_set' variable. The data of serial port will be assigned to 'ser' variable. The class serial.Serial receive 'COM1' as port parameter because this program is used on Windows OS. Baudrate parameter is 9600 and timeout parameter is equal to 'time_set' variable. The variable 'time_set' is defined in timer_for_closing_port() function below. Of 'serial_object', index [3] is the RPM value, index [5] is the torque value and the index [7] is the viscosity (cP) value. Defines a new time for closing serial port. This times depends on the rotation per minute parameter of equipment. The possible values for rotation per minute parameter of the equipment are: 0.3, 0.5, 0.6, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 10, 12, 20, 30, 50, 60, 100 and 200 RPMs. When the rotation per minute (RPM) parameter of equipment is lower than 6 RPMs, the 'time_for_closing' value is defined by the 'if' statement below. If the value of RPM is above 6 and below 100, 'time_for_closing' value is defined by the 'elif' statement. Finally, if the RPM value is 100 or 200 RPMs, 'time_for_closing' value is defined by 'else' statement. These differences on calculation of 'time_for_closing' variable occurs because this variable is responsible to finish the loop that controls the program, and at high rotations the probability of errors increase. The 'float(object[3])' value below is the RPM parameter. 'float' function is necessary because the equipment send to the computer bytes literals. Returns a boolean value that depends on the torque of equipment. Prints the results of the equipment readings at the screen. As said before, the indexes 3, 5 and 7 represents the RPM values, the torque values and the cP values respectively. Storages the readings inside a dict named 'registers'. The keys are the RPM values. The values are two lists, the first list receives the cP values and the second list receives the torque values. Each key have two lists representing cP and torque values. The 'object' parameter is the serial_object of serial_object_creator() function. The return is the dict registers with new values. Processes the data of registers dict to delete outliers. The cutoff parameter are (average - standard deviation) and (average + standard deviation). A for loop perform iteration on values of registers dict and exclude outliers. Calculates the base-10 logarithm of the processed values. The dict comprehension below is only to transform RPM values in float types again, because the **kwargs only accept string type as keys, and is necessary that RPM values are float type, not string. A new list (cp_list) is created to receive the cP values. A iteration is made on keys of registers dict using for loop to make a list with two lists inside of it. The first list will store the base-10 logarithm values of RPM values. The second list will store the base-10 logarithm values of cP values. This function returns this logarithm_list. A function to create a tuple with the today's date. This date will be in one cell of the workbook that will be created and in the name of the xlsx file. This function creates a workbook in format .xlsx. and returns it. The else statement below is because if some user delete the folder 'Viscosidades', the workbooks will be saved on Desktop. This function records the name of each worksheet using the name of the sample evaluated. This function creates new worksheets inside the created workbook and put the values in columns. In each worksheet: Column 'A' will store the sample name and the date. Columns 'B', 'C', and 'D' will store all read data (RPM, cP and Torque values). Columns 'F', 'G', 'H', and 'I' will store the processed data, without outliers, respectively: RPM, average cP, standard deviation and relative standard deviation. Columns 'K' and 'L' will receive log10 values of processed RPM and cP values. Finally, in columns 'M', 'N' and 'O', the cells 'M2', 'N2' and 'O2' will receive intercept, slope and R squared values of log10 values. Each worksheet will have two charts, one for processed data and other for log10 data. # The for loop below puts the read values inside .xlsx cells. # RPM, cP and torque values will be stored on cols 1, 2 and 3. # The for loop below puts the processed values inside .xlsx cells. # RPM, mean(cP), stdev and stdev% will be stored on cols 5, 6, 7 and 8. # write_column() function below puts the log10 values inside .xlsx cells. A simple function to close the created workbook. A simple function to launch the workbook for user to see his results. # Init. # The registered values will be stored in this dict. # First timeout value. Will change after the first rotation. # Delay the beginning of the script. This helps to avoid errors. # This exception finishes the loop. | 2.645195 | 3 |
src/account/azext_account/generated/_params.py | tilnl/azure-cli-extensions | 0 | 6617807 | <filename>src/account/azext_account/generated/_params.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
get_enum_type
)
def load_arguments(self, _):
with self.argument_context('account subscription create') as c:
c.argument('billing_account_name', help='The name of the Microsoft Customer Agreement billing account for which you want to create the subscription.')
c.argument('billing_profile_name', help='The name of the billing profile in the billing account for which you want to create the subscription.')
c.argument('invoice_section_name', help='The name of the invoice section in the billing account for which you want to create the subscription.')
c.argument('display_name', help='The friendly name of the subscription.')
c.argument('sku_id', help='The SKU ID of the Azure plan. Azure plan determines the pricing and service-level agreement of the subscription. Use 001 for Microsoft Azure Plan and 002 for Microsoft Azure Plan for DevTest.')
c.argument('cost_center', help='If set, the cost center will show up on the Azure usage and charges file.')
c.argument('owner', help='Active Directory Principal who’ll get owner access on the new subscription.')
c.argument('management_group_id', help='The identifier of the management group to which this subscription will be associated.')
with self.argument_context('account subscription create-in-enrollment-account') as c:
c.argument('enrollment_account_name', help='The name of the enrollment account to which the subscription will be billed.')
c.argument('display_name', help='The display name of the subscription.')
c.argument('management_group_id', help='The Management Group Id.')
c.argument('owners', nargs='+', help='The list of principals that should be granted Owner access on the subscription. Principals should be of type User, Service Principal or Security Group.')
c.argument('offer_type', arg_type=get_enum_type(['MS-AZR-0017P', 'MS-AZR-0148P']), help='The offer type of the subscription. For example, MS-AZR-0017P (EnterpriseAgreement) and MS-AZR-0148P (EnterpriseAgreement devTest) are available. Only valid when creating a subscription in a enrollment account scope.')
with self.argument_context('account subscription create-csp') as c:
c.argument('billing_account_name', help='The name of the Microsoft Customer Agreement billing account for which you want to create the subscription.')
c.argument('customer_name', help='The name of the customer.')
c.argument('display_name', help='The friendly name of the subscription.')
c.argument('sku_id', help='The SKU ID of the Azure plan. Azure plan determines the pricing and service-level agreement of the subscription. Use 001 for Microsoft Azure Plan and 002 for Microsoft Azure Plan for DevTest.')
c.argument('reseller_id', help='Reseller ID, basically MPN Id.')
with self.argument_context('account subscription rename') as c:
c.argument('subscription_id', help='Subscription Id.')
c.argument('subscription_name', help='New subscription name')
with self.argument_context('account subscription cancel') as c:
c.argument('subscription_id', help='Subscription Id.')
with self.argument_context('account subscription enable') as c:
c.argument('subscription_id', help='Subscription Id.')
| <filename>src/account/azext_account/generated/_params.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
get_enum_type
)
def load_arguments(self, _):
with self.argument_context('account subscription create') as c:
c.argument('billing_account_name', help='The name of the Microsoft Customer Agreement billing account for which you want to create the subscription.')
c.argument('billing_profile_name', help='The name of the billing profile in the billing account for which you want to create the subscription.')
c.argument('invoice_section_name', help='The name of the invoice section in the billing account for which you want to create the subscription.')
c.argument('display_name', help='The friendly name of the subscription.')
c.argument('sku_id', help='The SKU ID of the Azure plan. Azure plan determines the pricing and service-level agreement of the subscription. Use 001 for Microsoft Azure Plan and 002 for Microsoft Azure Plan for DevTest.')
c.argument('cost_center', help='If set, the cost center will show up on the Azure usage and charges file.')
c.argument('owner', help='Active Directory Principal who’ll get owner access on the new subscription.')
c.argument('management_group_id', help='The identifier of the management group to which this subscription will be associated.')
with self.argument_context('account subscription create-in-enrollment-account') as c:
c.argument('enrollment_account_name', help='The name of the enrollment account to which the subscription will be billed.')
c.argument('display_name', help='The display name of the subscription.')
c.argument('management_group_id', help='The Management Group Id.')
c.argument('owners', nargs='+', help='The list of principals that should be granted Owner access on the subscription. Principals should be of type User, Service Principal or Security Group.')
c.argument('offer_type', arg_type=get_enum_type(['MS-AZR-0017P', 'MS-AZR-0148P']), help='The offer type of the subscription. For example, MS-AZR-0017P (EnterpriseAgreement) and MS-AZR-0148P (EnterpriseAgreement devTest) are available. Only valid when creating a subscription in a enrollment account scope.')
with self.argument_context('account subscription create-csp') as c:
c.argument('billing_account_name', help='The name of the Microsoft Customer Agreement billing account for which you want to create the subscription.')
c.argument('customer_name', help='The name of the customer.')
c.argument('display_name', help='The friendly name of the subscription.')
c.argument('sku_id', help='The SKU ID of the Azure plan. Azure plan determines the pricing and service-level agreement of the subscription. Use 001 for Microsoft Azure Plan and 002 for Microsoft Azure Plan for DevTest.')
c.argument('reseller_id', help='Reseller ID, basically MPN Id.')
with self.argument_context('account subscription rename') as c:
c.argument('subscription_id', help='Subscription Id.')
c.argument('subscription_name', help='New subscription name')
with self.argument_context('account subscription cancel') as c:
c.argument('subscription_id', help='Subscription Id.')
with self.argument_context('account subscription enable') as c:
c.argument('subscription_id', help='Subscription Id.')
| en | 0.48263 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long # pylint: disable=too-many-lines # pylint: disable=too-many-statements | 2.140429 | 2 |
main.py | xaviermarquez-alba/ulauncher-virtualbox | 7 | 6617808 | from ulauncher_virtualbox.VirtualboxExtension import VirtualboxExtension
if __name__ == '__main__':
VirtualboxExtension().run()
| from ulauncher_virtualbox.VirtualboxExtension import VirtualboxExtension
if __name__ == '__main__':
VirtualboxExtension().run()
| none | 1 | 1.083837 | 1 | |
tensorflow_transform/beam/analysis_graph_builder.py | brianmartin/transform | 2 | 6617809 | <gh_stars>1-10
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to create the implementation graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow_transform import analyzer_nodes
from tensorflow_transform import graph_tools
from tensorflow_transform import nodes
from tensorflow_transform.beam import beam_nodes
def _tensor_name(tensor):
"""Get a name of a tensor without trailing ":0" when relevant."""
# tensor.name is unicode in Python 3 and bytes in Python 2 so convert to
# bytes here.
name = str(tensor.name)
return name[:-2] if name.endswith(':0') else name
class _ReadyVisitor(nodes.Visitor):
"""Visitor to determine if a node is ready to run."""
def __init__(self, graph_analyzer):
self._graph_analyzer = graph_analyzer
def visit(self, operation_def, input_values):
if isinstance(operation_def, analyzer_nodes.TensorSource):
is_ready = all(self._graph_analyzer.ready_to_run(tensor)
for tensor in operation_def.tensors)
else:
is_ready = all(input_values)
return (is_ready,) * operation_def.num_outputs
def validate_value(self, value):
assert isinstance(value, bool)
class _TranslateVisitor(nodes.Visitor):
"""Visitor that translates the operation graph.
The original graph is defined by the user in the preprocessing_fn. The
translated graph represents a Beam pipeline.
"""
def __init__(self):
self.phase = None
self.extracted_values_dict = None
self.intermediate_output_signature = None
def visit(self, operation_def, input_values):
if isinstance(operation_def, analyzer_nodes.TensorSource):
tensors = operation_def.tensors
label = operation_def.label
# Add tensor to signature so it gets produced by the SavedModel.
for tensor in tensors:
self.intermediate_output_signature[_tensor_name(tensor)] = tensor
keys = tuple(map(_tensor_name, tensors))
output = nodes.apply_operation(
beam_nodes.ExtractFromDict, self.extracted_values_dict,
keys=keys, label=label)
return (output,)
else:
return nodes.OperationNode(operation_def, input_values).outputs
def validate_value(self, value):
assert isinstance(value, nodes.ValueNode)
def build(graph, input_signature, output_signature):
"""Returns a list of `Phase`s describing how to execute the pipeline.
The default graph is assumed to contain some `Analyzer`s which must be
executed by doing a full pass over the dataset, and passing the inputs for
that analyzer into some implementation, then taking the results and replacing
the `Analyzer`s outputs with constants in the graph containing these results.
The execution plan is described by a list of `Phase`s. Each phase contains
a list of `Analyzer`s, which are the `Analyzer`s which are ready to run in
that phase, together with a list of ops, which are the table initializers that
are ready to run in that phase.
An `Analyzer` or op is ready to run when all its dependencies in the graph
have been computed. Thus if the graph is constructed by
def preprocessing_fn(input)
x = inputs['x']
scaled_0 = x - tft.min(x)
scaled_0_1 = scaled_0 / tft.max(scaled_0)
Then the first phase will contain the analyzer corresponding to the call to
`min`, because `x` is an input and so is ready to compute in the first phase,
while the second phase will contain the analyzer corresponding to the call to
`max` since `scaled_1` depends on the result of the call to `tft.min` which
is computed in the first phase.
More generally, we define a level for each op and each `Analyzer` by walking
the graph, assigning to each operation the max level of its inputs, to each
`Tensor` the level of its operation, unless it's the output of an `Analyzer`
in which case we assign the level of its `Analyzer` plus one.
Args:
graph: A `tf.Graph`.
input_signature: A dict whose keys are strings and values are `Tensor`s or
`SparseTensor`s.
output_signature: A dict whose keys are strings and values are `Tensor`s or
`SparseTensor`s.
Returns:
A list of `Phase`s.
Raises:
ValueError: if the graph cannot be analyzed.
"""
tensor_sinks = graph.get_collection(analyzer_nodes.TENSOR_REPLACEMENTS)
graph.clear_collection(analyzer_nodes.TENSOR_REPLACEMENTS)
phase = 0
tensor_bindings = []
sink_tensors_ready = {tensor_sink.tensor: False
for tensor_sink in tensor_sinks}
translate_visitor = _TranslateVisitor()
translate_traverser = nodes.Traverser(translate_visitor)
while not all(sink_tensors_ready.values()):
# Determine which table init ops are ready to run in this phase
# Determine which keys of pending_tensor_replacements are ready to run
# in this phase, based in whether their dependencies are ready.
graph_analyzer = graph_tools.InitializableGraphAnalyzer(
graph, input_signature.values(), sink_tensors_ready)
ready_traverser = nodes.Traverser(_ReadyVisitor(graph_analyzer))
# Now create and apply a SavedModel with all tensors in tensor_bindings
# bound, which outputs all the tensors in the required tensor tuples.
intermediate_output_signature = collections.OrderedDict()
saved_model_future = nodes.apply_operation(
beam_nodes.CreateSavedModel,
*tensor_bindings,
table_initializers=tuple(graph_analyzer.ready_table_initializers),
output_signature=intermediate_output_signature,
label='CreateSavedModelForAnalyzerInputs[{}]'.format(phase))
extracted_values_dict = nodes.apply_operation(
beam_nodes.ApplySavedModel,
saved_model_future,
phase=phase,
label='ApplySavedModel[{}]'.format(phase))
translate_visitor.phase = phase
translate_visitor.intermediate_output_signature = (
intermediate_output_signature)
translate_visitor.extracted_values_dict = extracted_values_dict
for tensor, value_node, is_asset_filepath in tensor_sinks:
# Don't compute a binding/sink/replacement that's already been computed
if sink_tensors_ready[tensor]:
continue
if not ready_traverser.visit_value_node(value_node):
continue
translated_value_node = translate_traverser.visit_value_node(value_node)
name = _tensor_name(tensor)
tensor_bindings.append(nodes.apply_operation(
beam_nodes.CreateTensorBinding, translated_value_node,
tensor=str(tensor.name), is_asset_filepath=is_asset_filepath,
label='CreateTensorBinding[{}]'.format(name)))
sink_tensors_ready[tensor] = True
phase += 1
# We need to make sure that the representation of this output_signature is
# deterministic.
output_signature = collections.OrderedDict(
sorted(output_signature.items(), key=lambda t: t[0]))
return nodes.apply_operation(
beam_nodes.CreateSavedModel,
*tensor_bindings,
table_initializers=tuple(
graph.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)),
output_signature=output_signature,
label='CreateSavedModel')
| # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to create the implementation graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow_transform import analyzer_nodes
from tensorflow_transform import graph_tools
from tensorflow_transform import nodes
from tensorflow_transform.beam import beam_nodes
def _tensor_name(tensor):
"""Get a name of a tensor without trailing ":0" when relevant."""
# tensor.name is unicode in Python 3 and bytes in Python 2 so convert to
# bytes here.
name = str(tensor.name)
return name[:-2] if name.endswith(':0') else name
class _ReadyVisitor(nodes.Visitor):
"""Visitor to determine if a node is ready to run."""
def __init__(self, graph_analyzer):
self._graph_analyzer = graph_analyzer
def visit(self, operation_def, input_values):
if isinstance(operation_def, analyzer_nodes.TensorSource):
is_ready = all(self._graph_analyzer.ready_to_run(tensor)
for tensor in operation_def.tensors)
else:
is_ready = all(input_values)
return (is_ready,) * operation_def.num_outputs
def validate_value(self, value):
assert isinstance(value, bool)
class _TranslateVisitor(nodes.Visitor):
"""Visitor that translates the operation graph.
The original graph is defined by the user in the preprocessing_fn. The
translated graph represents a Beam pipeline.
"""
def __init__(self):
self.phase = None
self.extracted_values_dict = None
self.intermediate_output_signature = None
def visit(self, operation_def, input_values):
if isinstance(operation_def, analyzer_nodes.TensorSource):
tensors = operation_def.tensors
label = operation_def.label
# Add tensor to signature so it gets produced by the SavedModel.
for tensor in tensors:
self.intermediate_output_signature[_tensor_name(tensor)] = tensor
keys = tuple(map(_tensor_name, tensors))
output = nodes.apply_operation(
beam_nodes.ExtractFromDict, self.extracted_values_dict,
keys=keys, label=label)
return (output,)
else:
return nodes.OperationNode(operation_def, input_values).outputs
def validate_value(self, value):
assert isinstance(value, nodes.ValueNode)
def build(graph, input_signature, output_signature):
"""Returns a list of `Phase`s describing how to execute the pipeline.
The default graph is assumed to contain some `Analyzer`s which must be
executed by doing a full pass over the dataset, and passing the inputs for
that analyzer into some implementation, then taking the results and replacing
the `Analyzer`s outputs with constants in the graph containing these results.
The execution plan is described by a list of `Phase`s. Each phase contains
a list of `Analyzer`s, which are the `Analyzer`s which are ready to run in
that phase, together with a list of ops, which are the table initializers that
are ready to run in that phase.
An `Analyzer` or op is ready to run when all its dependencies in the graph
have been computed. Thus if the graph is constructed by
def preprocessing_fn(input)
x = inputs['x']
scaled_0 = x - tft.min(x)
scaled_0_1 = scaled_0 / tft.max(scaled_0)
Then the first phase will contain the analyzer corresponding to the call to
`min`, because `x` is an input and so is ready to compute in the first phase,
while the second phase will contain the analyzer corresponding to the call to
`max` since `scaled_1` depends on the result of the call to `tft.min` which
is computed in the first phase.
More generally, we define a level for each op and each `Analyzer` by walking
the graph, assigning to each operation the max level of its inputs, to each
`Tensor` the level of its operation, unless it's the output of an `Analyzer`
in which case we assign the level of its `Analyzer` plus one.
Args:
graph: A `tf.Graph`.
input_signature: A dict whose keys are strings and values are `Tensor`s or
`SparseTensor`s.
output_signature: A dict whose keys are strings and values are `Tensor`s or
`SparseTensor`s.
Returns:
A list of `Phase`s.
Raises:
ValueError: if the graph cannot be analyzed.
"""
tensor_sinks = graph.get_collection(analyzer_nodes.TENSOR_REPLACEMENTS)
graph.clear_collection(analyzer_nodes.TENSOR_REPLACEMENTS)
phase = 0
tensor_bindings = []
sink_tensors_ready = {tensor_sink.tensor: False
for tensor_sink in tensor_sinks}
translate_visitor = _TranslateVisitor()
translate_traverser = nodes.Traverser(translate_visitor)
while not all(sink_tensors_ready.values()):
# Determine which table init ops are ready to run in this phase
# Determine which keys of pending_tensor_replacements are ready to run
# in this phase, based in whether their dependencies are ready.
graph_analyzer = graph_tools.InitializableGraphAnalyzer(
graph, input_signature.values(), sink_tensors_ready)
ready_traverser = nodes.Traverser(_ReadyVisitor(graph_analyzer))
# Now create and apply a SavedModel with all tensors in tensor_bindings
# bound, which outputs all the tensors in the required tensor tuples.
intermediate_output_signature = collections.OrderedDict()
saved_model_future = nodes.apply_operation(
beam_nodes.CreateSavedModel,
*tensor_bindings,
table_initializers=tuple(graph_analyzer.ready_table_initializers),
output_signature=intermediate_output_signature,
label='CreateSavedModelForAnalyzerInputs[{}]'.format(phase))
extracted_values_dict = nodes.apply_operation(
beam_nodes.ApplySavedModel,
saved_model_future,
phase=phase,
label='ApplySavedModel[{}]'.format(phase))
translate_visitor.phase = phase
translate_visitor.intermediate_output_signature = (
intermediate_output_signature)
translate_visitor.extracted_values_dict = extracted_values_dict
for tensor, value_node, is_asset_filepath in tensor_sinks:
# Don't compute a binding/sink/replacement that's already been computed
if sink_tensors_ready[tensor]:
continue
if not ready_traverser.visit_value_node(value_node):
continue
translated_value_node = translate_traverser.visit_value_node(value_node)
name = _tensor_name(tensor)
tensor_bindings.append(nodes.apply_operation(
beam_nodes.CreateTensorBinding, translated_value_node,
tensor=str(tensor.name), is_asset_filepath=is_asset_filepath,
label='CreateTensorBinding[{}]'.format(name)))
sink_tensors_ready[tensor] = True
phase += 1
# We need to make sure that the representation of this output_signature is
# deterministic.
output_signature = collections.OrderedDict(
sorted(output_signature.items(), key=lambda t: t[0]))
return nodes.apply_operation(
beam_nodes.CreateSavedModel,
*tensor_bindings,
table_initializers=tuple(
graph.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)),
output_signature=output_signature,
label='CreateSavedModel') | en | 0.899064 | # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Functions to create the implementation graph. Get a name of a tensor without trailing ":0" when relevant. # tensor.name is unicode in Python 3 and bytes in Python 2 so convert to # bytes here. Visitor to determine if a node is ready to run. Visitor that translates the operation graph. The original graph is defined by the user in the preprocessing_fn. The translated graph represents a Beam pipeline. # Add tensor to signature so it gets produced by the SavedModel. Returns a list of `Phase`s describing how to execute the pipeline. The default graph is assumed to contain some `Analyzer`s which must be executed by doing a full pass over the dataset, and passing the inputs for that analyzer into some implementation, then taking the results and replacing the `Analyzer`s outputs with constants in the graph containing these results. The execution plan is described by a list of `Phase`s. Each phase contains a list of `Analyzer`s, which are the `Analyzer`s which are ready to run in that phase, together with a list of ops, which are the table initializers that are ready to run in that phase. An `Analyzer` or op is ready to run when all its dependencies in the graph have been computed. Thus if the graph is constructed by def preprocessing_fn(input) x = inputs['x'] scaled_0 = x - tft.min(x) scaled_0_1 = scaled_0 / tft.max(scaled_0) Then the first phase will contain the analyzer corresponding to the call to `min`, because `x` is an input and so is ready to compute in the first phase, while the second phase will contain the analyzer corresponding to the call to `max` since `scaled_1` depends on the result of the call to `tft.min` which is computed in the first phase. More generally, we define a level for each op and each `Analyzer` by walking the graph, assigning to each operation the max level of its inputs, to each `Tensor` the level of its operation, unless it's the output of an `Analyzer` in which case we assign the level of its `Analyzer` plus one. Args: graph: A `tf.Graph`. input_signature: A dict whose keys are strings and values are `Tensor`s or `SparseTensor`s. output_signature: A dict whose keys are strings and values are `Tensor`s or `SparseTensor`s. Returns: A list of `Phase`s. Raises: ValueError: if the graph cannot be analyzed. # Determine which table init ops are ready to run in this phase # Determine which keys of pending_tensor_replacements are ready to run # in this phase, based in whether their dependencies are ready. # Now create and apply a SavedModel with all tensors in tensor_bindings # bound, which outputs all the tensors in the required tensor tuples. # Don't compute a binding/sink/replacement that's already been computed # We need to make sure that the representation of this output_signature is # deterministic. | 2.694805 | 3 |
TestCase.py | bluefin1986/tinyspark | 3 | 6617810 |
# coding: utf-8
# In[ ]:
import KlineService
import RSICompute
# KlineService.downloadAllStocks("2019-10-16")
# KlineService.downloadAllKlineDataOfSingleDay("2019-09-30")
# KlineService.readStockKline("sh.600000", "day", "2018-01-01", "2019-09-29")
# KlineService.downloadAllKlineDataOfPeriod("60m", "2019-10-21")
# KlineService.downloadAllKlineDataOfPeriod("day", "2019-10-21")
# KlineService.readAllStockKline("day", "2019-10-11", "2019-10-11")
# RSICompute.downloadAllKlineDataOfSingleDay("2019-09-24")
# RSICompute.downloadAllKlineDataOfPeriod("day", "2017-01-01")
# RSICompute.downloadAllStocks("2019-09-23")
# dfStocks = KlineService.allStocks()
#计算RSI
# RSICompute.computeAllRSIDataOfPeriod("day", "2017-01-01")
# RSICompute.computeAllRSIDataOfPeriod("60m", "2018-01-01")
# RSICompute.computeAllRSIData("day", "2019-09-27")
# df600673 = readRSI("day", "sh.600673", "2019-09-24","2019-09-30")
# df002030 = readRSI("60m", "sz.002030", "2019-09-30","2019-09-30")
# valueArr = df600673["rsi_6"]
# valueArr = np.array(valueArr)
# set_trace()
# a = np.reshape(valueArr, (-1, len(df600673["rsi_6"])))
# integrateValues(a)
computeDate = "2019-10-21"
df = RSICompute.computeAllRSIDataIntegrate("day", computeDate, False)
df = df[df["rsi_inte_6"] <= RSICompute.RSI_INTE_OVERSELL_THRESHOLD_DAY].sort_values(by=['rsi_inte_6'])
df.to_csv("/Users/matt/Downloads/dayRSI_integrate_" + computeDate + ".csv")
|
# coding: utf-8
# In[ ]:
import KlineService
import RSICompute
# KlineService.downloadAllStocks("2019-10-16")
# KlineService.downloadAllKlineDataOfSingleDay("2019-09-30")
# KlineService.readStockKline("sh.600000", "day", "2018-01-01", "2019-09-29")
# KlineService.downloadAllKlineDataOfPeriod("60m", "2019-10-21")
# KlineService.downloadAllKlineDataOfPeriod("day", "2019-10-21")
# KlineService.readAllStockKline("day", "2019-10-11", "2019-10-11")
# RSICompute.downloadAllKlineDataOfSingleDay("2019-09-24")
# RSICompute.downloadAllKlineDataOfPeriod("day", "2017-01-01")
# RSICompute.downloadAllStocks("2019-09-23")
# dfStocks = KlineService.allStocks()
#计算RSI
# RSICompute.computeAllRSIDataOfPeriod("day", "2017-01-01")
# RSICompute.computeAllRSIDataOfPeriod("60m", "2018-01-01")
# RSICompute.computeAllRSIData("day", "2019-09-27")
# df600673 = readRSI("day", "sh.600673", "2019-09-24","2019-09-30")
# df002030 = readRSI("60m", "sz.002030", "2019-09-30","2019-09-30")
# valueArr = df600673["rsi_6"]
# valueArr = np.array(valueArr)
# set_trace()
# a = np.reshape(valueArr, (-1, len(df600673["rsi_6"])))
# integrateValues(a)
computeDate = "2019-10-21"
df = RSICompute.computeAllRSIDataIntegrate("day", computeDate, False)
df = df[df["rsi_inte_6"] <= RSICompute.RSI_INTE_OVERSELL_THRESHOLD_DAY].sort_values(by=['rsi_inte_6'])
df.to_csv("/Users/matt/Downloads/dayRSI_integrate_" + computeDate + ".csv")
| en | 0.243683 | # coding: utf-8 # In[ ]: # KlineService.downloadAllStocks("2019-10-16") # KlineService.downloadAllKlineDataOfSingleDay("2019-09-30") # KlineService.readStockKline("sh.600000", "day", "2018-01-01", "2019-09-29") # KlineService.downloadAllKlineDataOfPeriod("60m", "2019-10-21") # KlineService.downloadAllKlineDataOfPeriod("day", "2019-10-21") # KlineService.readAllStockKline("day", "2019-10-11", "2019-10-11") # RSICompute.downloadAllKlineDataOfSingleDay("2019-09-24") # RSICompute.downloadAllKlineDataOfPeriod("day", "2017-01-01") # RSICompute.downloadAllStocks("2019-09-23") # dfStocks = KlineService.allStocks() #计算RSI # RSICompute.computeAllRSIDataOfPeriod("day", "2017-01-01") # RSICompute.computeAllRSIDataOfPeriod("60m", "2018-01-01") # RSICompute.computeAllRSIData("day", "2019-09-27") # df600673 = readRSI("day", "sh.600673", "2019-09-24","2019-09-30") # df002030 = readRSI("60m", "sz.002030", "2019-09-30","2019-09-30") # valueArr = df600673["rsi_6"] # valueArr = np.array(valueArr) # set_trace() # a = np.reshape(valueArr, (-1, len(df600673["rsi_6"]))) # integrateValues(a) | 1.975162 | 2 |
api/v1/rest/forms.py | anthill-gaming/promo | 0 | 6617811 | from anthill.framework.forms.orm import (
ModelForm, ModelUpdateForm, ModelCreateForm, ModelSearchForm)
from promo.models import PromoCode
class EditPromoCodeForm(ModelForm):
class Meta:
model = PromoCode
exclude = ['key']
| from anthill.framework.forms.orm import (
ModelForm, ModelUpdateForm, ModelCreateForm, ModelSearchForm)
from promo.models import PromoCode
class EditPromoCodeForm(ModelForm):
class Meta:
model = PromoCode
exclude = ['key']
| none | 1 | 1.531071 | 2 | |
kubernetes_typed/client/models/v1beta1_ingress_class_list.py | sobolevn/kubernetes-typed | 22 | 6617812 | <reponame>sobolevn/kubernetes-typed
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1beta1IngressClassListDict generated type."""
from typing import TypedDict, List
from kubernetes_typed.client import V1ListMetaDict, V1beta1IngressClassDict
V1beta1IngressClassListDict = TypedDict(
"V1beta1IngressClassListDict",
{
"apiVersion": str,
"items": List[V1beta1IngressClassDict],
"kind": str,
"metadata": V1ListMetaDict,
},
total=False,
)
| # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1beta1IngressClassListDict generated type."""
from typing import TypedDict, List
from kubernetes_typed.client import V1ListMetaDict, V1beta1IngressClassDict
V1beta1IngressClassListDict = TypedDict(
"V1beta1IngressClassListDict",
{
"apiVersion": str,
"items": List[V1beta1IngressClassDict],
"kind": str,
"metadata": V1ListMetaDict,
},
total=False,
) | en | 0.429069 | # Code generated by `typeddictgen`. DO NOT EDIT. V1beta1IngressClassListDict generated type. | 1.57391 | 2 |
tests/test_client.py | bachya/pypi-template | 0 | 6617813 | """Define tests for the client."""
| """Define tests for the client."""
| en | 0.752933 | Define tests for the client. | 1.143976 | 1 |
omni_base_driver/script/omni_serial_com.py | EwingKang/Neuron-OmniBot | 12 | 6617814 | <reponame>EwingKang/Neuron-OmniBot<filename>omni_base_driver/script/omni_serial_com.py
from __future__ import print_function
import rospy
import time
import sys
import math
import serial
import threading
import struct
import binascii
class OmniSerialCom:
def __init__(self, port, baudrate, imu_freq, odom_freq, timeout):
self.port = port
self.baudrate = baudrate
self.imu_freq = imu_freq
self.odom_freq = odom_freq
self.timeout = timeout
self._serialOK = False
self._is_synced = False
self._imu_new_data = False
self._odom_new_data = False
self._cmd_new_data = False
self._first_odom = True
self._first_cmd = True
self.error_flag = False
self.t_stop = threading.Event()
try:
rospy.loginfo("Opening serial port: "+ self.port)
self.serial = serial.Serial(self.port, self.baudrate, timeout=self.timeout)
except:
rospy.logerr("Failed to open serial port")
raise
return
self.imu = {"accel":[0, 0, 0], "gyro":[0, 0, 0]}
self.imu_bfr = {"accel":[0, 0, 0], "gyro":[0, 0, 0]}
self.odom = [0, 0, 0]
self.odom_bfr = [0, 0, 0]
self.cmd = [0, 0, 0]
self.cmd_bfr = [0, 0, 0]
self.odom_seq = 0
self.cmd_seq = 0
self.last_odom_seq = 0
self.last_cmd_seq = 0
'''*******************************************************************
Independent thread that constantly checking Serial port
*******************************************************************'''
def serial_thread(self):
# Serial initialization
rospy.loginfo("===== Serial thread =====")
try:
rospy.loginfo("First 3 data readings:")
self.serial.reset_input_buffer()
init_msg = self.serial.readline()
for x in range(0, 3):
init_msg = self.serial.readline() ### Note: try restart motor board if error ###
print( init_msg.encode('ascii')[0:(len(init_msg)-1)] )
except Exception:
rospy.logerr("Port timeout after %d seconds at: %s", self.timeout, self.port)
self.serial.close()
raise
return
# sent pause and start signal
self._serialOK = True
rospy.loginfo("Sending starting signal \"SSSS\"")
self.serial.write( "SSSS".encode('ascii') )
# time.sleep(0.01) # for Arduino to reboot
# continuous packet recieve
while (not self.t_stop.is_set()):
try:
reading = self.serial.read(2)
except Exception:
self.error_flag = True
break
#========= imu data packet =========#
if reading[0] == '\xFF' and reading[1] == '\xFA':
#ser_in = self.serial.read(12)
try:
ser_in = self.serial.read(12)
except Exception:
self.error_flag = True
break
self.imu_decode(ser_in, 12)
self._is_synced = True
#debug
#toHex = lambda x: "".join("{:02X}".format(ord(c)) for c in reading)
#print(toHex(b'\x03\xac23\n'))
#========= encoder data packet =========#
elif reading[0] == '\xFF' and reading[1] == '\xFB':
#ser_in = self.serial.read(7)
try:
ser_in = self.serial.read(7)
except Exception:
self.error_flag = True
break
self.odom_decode(ser_in, 7)
self._is_synced = True
#========= command data packet =========#
elif reading[0] == '\xFF' and reading[1] == '\xFC':
#ser_in = self.serial.read(13)
try:
ser_in = self.serial.read(13)
except Exception:
self.error_flag = True
break
self.cmd_decode(ser_in, 13)
self._is_synced = True
#========= lost sync =========#
else:
if self._is_synced:
if self._first_odom or self._first_cmd:
rospy.loginfo("Initial syncing...")
self._is_synced = False
continue
rospy.logerr('Out of sync:')
toHex = lambda x: "".join("{:02X}".format(ord(c)) for c in reading)
print(toHex(b'\x03\xac23\n'))
bfr = self.serial.read(1)
toHex = lambda x: " ".join("{:02X}".format(ord(c)) for c in bfr)
print(toHex(b' '), end='')
self._is_synced = False
# if loop breaks with an error flag
if self.error_flag:
rospy.logerr('serial read error')
self.serial.write( 'RRR'.encode('ascii') )
self.serial.close()
self._serialOK = False
self._is_synced = False
self._odom_new_data = False
self._cmd_new_data = False
print("thread ends")
raise
return
# if threads ends here
print("Sending stoping signal to motor controller")
self.serial.write( 'R'.encode('ascii') )
self.serial.close()
self._serialOK = False
self._is_synced = False
self._odom_new_data = False
self._imu_new_data = False
self._cmd_new_data = False
print("thread ends")
'''*******************************************************************
Decode imu data
*******************************************************************'''
def imu_decode(self, data, size):
#https://docs.python.org/3/library/struct.html
self.imu_bfr["accel"][0] = struct.unpack('>h', data[0:2])[0] # signed short, 2B
self.imu_bfr["accel"][1] = struct.unpack('>h', data[2:4])[0]
self.imu_bfr["accel"][2] = struct.unpack('>h', data[4:6])[0]
self.imu_bfr["gyro"][0] = struct.unpack('>h', data[6:8])[0]
self.imu_bfr["gyro"][1] = struct.unpack('>h', data[8:10])[0]
self.imu_bfr["gyro"][2] = struct.unpack('>h', data[10:12])[0]
#debug
#print("imu", self.seq, " t_micro:", self.t_micro)
self.imu = self.imu_bfr
self._imu_new_data = True
'''*******************************************************************
Decode odometry data
*******************************************************************'''
def odom_decode(self, data, size):
#https://docs.python.org/3/library/struct.html
self.odom_bfr[0] = struct.unpack('>h', data[0:2])[0] # signed short 2B
self.odom_bfr[1] = struct.unpack('>h', data[2:4])[0]
self.odom_bfr[2] = struct.unpack('>h', data[4:6])[0]
self.odom_seq = struct.unpack('B', data[6:7])[0] # unsigned byte
#debug
#print("odom", self.odom_seq, self.odom[0:3])
if (self.odom_seq != ((self.last_odom_seq + 1)%256) ):
if not self._first_odom:
rospy.logwarn("odom seq mismatch, prev: %d, now:%d", self.last_odom_seq, self.odom_seq)
if self._first_odom:
self._first_odom = False
self.last_odom_seq = self.odom_seq
self.odom = self.odom_bfr
self._odom_new_data = True
'''*******************************************************************
Decode 5hz data
*******************************************************************'''
def cmd_decode(self, data, size):
#https://docs.python.org/3/library/struct.html
self.cmd_bfr[0] = struct.unpack('>f', data[0:4])[0] # int 4B
self.cmd_bfr[1] = struct.unpack('>f', data[4:8])[0]
self.cmd_bfr[2] = struct.unpack('>f', data[8:12])[0]
self.cmd_seq = struct.unpack('B', data[12:13])[0] # unsigned byte
#debug
#print("cmdA", self.cmd[0], "cmdB", self.cmd[1], "cmdC", self.cmd[2])
if (self.cmd_seq != ((self.last_cmd_seq + 1)%256) ):
if not self._first_cmd:
rospy.logwarn("cmd seq mismatch, prev: %d, now:%d", self.last_cmd_seq, self.cmd_seq)
if self._first_cmd:
self._first_cmd = False
self.last_cmd_seq = self.cmd_seq
self.cmd = self.cmd_bfr
self._cmd_new_data = True
'''*******************************************************************
Module communication from outside
*******************************************************************'''
def serialOK(self):
return self._serialOK
def imu_new_data(self):
return self._imu_new_data
def odom_new_data(self):
return self._odom_new_data
def cmd_new_data(self):
return self._cmd_new_data
def get_imu_data(self):
if self._imu_new_data:
# data assign
self._imu_new_data = False
return self.imu
else:
return None
def get_odom_data(self):
if self._odom_new_data:
# data assign
self._odom_new_data = False
return {"seq":self.odom_seq, "pos_dt":self.odom}
else:
return None
def get_cmd_data(self):
if self._cmd_new_data:
self._cmd_new_data = False
return {"seq":self.cmd_seq, "cmd":self.cmd}
else:
return None
'''*******************************************************************
send vel_cmd to vehicle
*******************************************************************'''
def send_vel_cmd(self, veh_cmd):
if self._serialOK:
serial_cmd = bytearray(b'\xFF\xFE')
serial_cmd.append(0x01) # base vector mode
clamp = lambda n, minn, maxn: max(min(maxn, n), minn)
serial_cmd += struct.pack('>h',clamp( veh_cmd[0], -37268, 32767 ) ) #2-bytes
serial_cmd += struct.pack('>h',clamp( veh_cmd[1], -37268, 32767 ) )
serial_cmd += struct.pack('>h',clamp( veh_cmd[2], -37268, 32767 ) )
#debug
#print(binascii.hexlify(serial_cmd))
self.serial.write( serial_cmd )
def stopThread(self):
self.t_stop.set()
if self._serialOK:
while self._serialOK:
time.sleep(0.1) # wait for thread to stop
self.serial.close()
| from __future__ import print_function
import rospy
import time
import sys
import math
import serial
import threading
import struct
import binascii
class OmniSerialCom:
def __init__(self, port, baudrate, imu_freq, odom_freq, timeout):
self.port = port
self.baudrate = baudrate
self.imu_freq = imu_freq
self.odom_freq = odom_freq
self.timeout = timeout
self._serialOK = False
self._is_synced = False
self._imu_new_data = False
self._odom_new_data = False
self._cmd_new_data = False
self._first_odom = True
self._first_cmd = True
self.error_flag = False
self.t_stop = threading.Event()
try:
rospy.loginfo("Opening serial port: "+ self.port)
self.serial = serial.Serial(self.port, self.baudrate, timeout=self.timeout)
except:
rospy.logerr("Failed to open serial port")
raise
return
self.imu = {"accel":[0, 0, 0], "gyro":[0, 0, 0]}
self.imu_bfr = {"accel":[0, 0, 0], "gyro":[0, 0, 0]}
self.odom = [0, 0, 0]
self.odom_bfr = [0, 0, 0]
self.cmd = [0, 0, 0]
self.cmd_bfr = [0, 0, 0]
self.odom_seq = 0
self.cmd_seq = 0
self.last_odom_seq = 0
self.last_cmd_seq = 0
'''*******************************************************************
Independent thread that constantly checking Serial port
*******************************************************************'''
def serial_thread(self):
# Serial initialization
rospy.loginfo("===== Serial thread =====")
try:
rospy.loginfo("First 3 data readings:")
self.serial.reset_input_buffer()
init_msg = self.serial.readline()
for x in range(0, 3):
init_msg = self.serial.readline() ### Note: try restart motor board if error ###
print( init_msg.encode('ascii')[0:(len(init_msg)-1)] )
except Exception:
rospy.logerr("Port timeout after %d seconds at: %s", self.timeout, self.port)
self.serial.close()
raise
return
# sent pause and start signal
self._serialOK = True
rospy.loginfo("Sending starting signal \"SSSS\"")
self.serial.write( "SSSS".encode('ascii') )
# time.sleep(0.01) # for Arduino to reboot
# continuous packet recieve
while (not self.t_stop.is_set()):
try:
reading = self.serial.read(2)
except Exception:
self.error_flag = True
break
#========= imu data packet =========#
if reading[0] == '\xFF' and reading[1] == '\xFA':
#ser_in = self.serial.read(12)
try:
ser_in = self.serial.read(12)
except Exception:
self.error_flag = True
break
self.imu_decode(ser_in, 12)
self._is_synced = True
#debug
#toHex = lambda x: "".join("{:02X}".format(ord(c)) for c in reading)
#print(toHex(b'\x03\xac23\n'))
#========= encoder data packet =========#
elif reading[0] == '\xFF' and reading[1] == '\xFB':
#ser_in = self.serial.read(7)
try:
ser_in = self.serial.read(7)
except Exception:
self.error_flag = True
break
self.odom_decode(ser_in, 7)
self._is_synced = True
#========= command data packet =========#
elif reading[0] == '\xFF' and reading[1] == '\xFC':
#ser_in = self.serial.read(13)
try:
ser_in = self.serial.read(13)
except Exception:
self.error_flag = True
break
self.cmd_decode(ser_in, 13)
self._is_synced = True
#========= lost sync =========#
else:
if self._is_synced:
if self._first_odom or self._first_cmd:
rospy.loginfo("Initial syncing...")
self._is_synced = False
continue
rospy.logerr('Out of sync:')
toHex = lambda x: "".join("{:02X}".format(ord(c)) for c in reading)
print(toHex(b'\x03\xac23\n'))
bfr = self.serial.read(1)
toHex = lambda x: " ".join("{:02X}".format(ord(c)) for c in bfr)
print(toHex(b' '), end='')
self._is_synced = False
# if loop breaks with an error flag
if self.error_flag:
rospy.logerr('serial read error')
self.serial.write( 'RRR'.encode('ascii') )
self.serial.close()
self._serialOK = False
self._is_synced = False
self._odom_new_data = False
self._cmd_new_data = False
print("thread ends")
raise
return
# if threads ends here
print("Sending stoping signal to motor controller")
self.serial.write( 'R'.encode('ascii') )
self.serial.close()
self._serialOK = False
self._is_synced = False
self._odom_new_data = False
self._imu_new_data = False
self._cmd_new_data = False
print("thread ends")
'''*******************************************************************
Decode imu data
*******************************************************************'''
def imu_decode(self, data, size):
#https://docs.python.org/3/library/struct.html
self.imu_bfr["accel"][0] = struct.unpack('>h', data[0:2])[0] # signed short, 2B
self.imu_bfr["accel"][1] = struct.unpack('>h', data[2:4])[0]
self.imu_bfr["accel"][2] = struct.unpack('>h', data[4:6])[0]
self.imu_bfr["gyro"][0] = struct.unpack('>h', data[6:8])[0]
self.imu_bfr["gyro"][1] = struct.unpack('>h', data[8:10])[0]
self.imu_bfr["gyro"][2] = struct.unpack('>h', data[10:12])[0]
#debug
#print("imu", self.seq, " t_micro:", self.t_micro)
self.imu = self.imu_bfr
self._imu_new_data = True
'''*******************************************************************
Decode odometry data
*******************************************************************'''
def odom_decode(self, data, size):
#https://docs.python.org/3/library/struct.html
self.odom_bfr[0] = struct.unpack('>h', data[0:2])[0] # signed short 2B
self.odom_bfr[1] = struct.unpack('>h', data[2:4])[0]
self.odom_bfr[2] = struct.unpack('>h', data[4:6])[0]
self.odom_seq = struct.unpack('B', data[6:7])[0] # unsigned byte
#debug
#print("odom", self.odom_seq, self.odom[0:3])
if (self.odom_seq != ((self.last_odom_seq + 1)%256) ):
if not self._first_odom:
rospy.logwarn("odom seq mismatch, prev: %d, now:%d", self.last_odom_seq, self.odom_seq)
if self._first_odom:
self._first_odom = False
self.last_odom_seq = self.odom_seq
self.odom = self.odom_bfr
self._odom_new_data = True
'''*******************************************************************
Decode 5hz data
*******************************************************************'''
def cmd_decode(self, data, size):
#https://docs.python.org/3/library/struct.html
self.cmd_bfr[0] = struct.unpack('>f', data[0:4])[0] # int 4B
self.cmd_bfr[1] = struct.unpack('>f', data[4:8])[0]
self.cmd_bfr[2] = struct.unpack('>f', data[8:12])[0]
self.cmd_seq = struct.unpack('B', data[12:13])[0] # unsigned byte
#debug
#print("cmdA", self.cmd[0], "cmdB", self.cmd[1], "cmdC", self.cmd[2])
if (self.cmd_seq != ((self.last_cmd_seq + 1)%256) ):
if not self._first_cmd:
rospy.logwarn("cmd seq mismatch, prev: %d, now:%d", self.last_cmd_seq, self.cmd_seq)
if self._first_cmd:
self._first_cmd = False
self.last_cmd_seq = self.cmd_seq
self.cmd = self.cmd_bfr
self._cmd_new_data = True
'''*******************************************************************
Module communication from outside
*******************************************************************'''
def serialOK(self):
return self._serialOK
def imu_new_data(self):
return self._imu_new_data
def odom_new_data(self):
return self._odom_new_data
def cmd_new_data(self):
return self._cmd_new_data
def get_imu_data(self):
if self._imu_new_data:
# data assign
self._imu_new_data = False
return self.imu
else:
return None
def get_odom_data(self):
if self._odom_new_data:
# data assign
self._odom_new_data = False
return {"seq":self.odom_seq, "pos_dt":self.odom}
else:
return None
def get_cmd_data(self):
if self._cmd_new_data:
self._cmd_new_data = False
return {"seq":self.cmd_seq, "cmd":self.cmd}
else:
return None
'''*******************************************************************
send vel_cmd to vehicle
*******************************************************************'''
def send_vel_cmd(self, veh_cmd):
if self._serialOK:
serial_cmd = bytearray(b'\xFF\xFE')
serial_cmd.append(0x01) # base vector mode
clamp = lambda n, minn, maxn: max(min(maxn, n), minn)
serial_cmd += struct.pack('>h',clamp( veh_cmd[0], -37268, 32767 ) ) #2-bytes
serial_cmd += struct.pack('>h',clamp( veh_cmd[1], -37268, 32767 ) )
serial_cmd += struct.pack('>h',clamp( veh_cmd[2], -37268, 32767 ) )
#debug
#print(binascii.hexlify(serial_cmd))
self.serial.write( serial_cmd )
def stopThread(self):
self.t_stop.set()
if self._serialOK:
while self._serialOK:
time.sleep(0.1) # wait for thread to stop
self.serial.close() | en | 0.397368 | *******************************************************************
Independent thread that constantly checking Serial port
******************************************************************* # Serial initialization ### Note: try restart motor board if error ### # sent pause and start signal # time.sleep(0.01) # for Arduino to reboot # continuous packet recieve #========= imu data packet =========# #ser_in = self.serial.read(12) #debug #toHex = lambda x: "".join("{:02X}".format(ord(c)) for c in reading) #print(toHex(b'\x03\xac23\n')) #========= encoder data packet =========# #ser_in = self.serial.read(7) #========= command data packet =========# #ser_in = self.serial.read(13) #========= lost sync =========# # if loop breaks with an error flag # if threads ends here *******************************************************************
Decode imu data
******************************************************************* #https://docs.python.org/3/library/struct.html # signed short, 2B #debug #print("imu", self.seq, " t_micro:", self.t_micro) *******************************************************************
Decode odometry data
******************************************************************* #https://docs.python.org/3/library/struct.html # signed short 2B # unsigned byte #debug #print("odom", self.odom_seq, self.odom[0:3]) *******************************************************************
Decode 5hz data
******************************************************************* #https://docs.python.org/3/library/struct.html # int 4B # unsigned byte #debug #print("cmdA", self.cmd[0], "cmdB", self.cmd[1], "cmdC", self.cmd[2]) *******************************************************************
Module communication from outside
******************************************************************* # data assign # data assign *******************************************************************
send vel_cmd to vehicle
******************************************************************* # base vector mode #2-bytes #debug #print(binascii.hexlify(serial_cmd)) # wait for thread to stop | 2.392338 | 2 |
website/drawquest/tests/test_api.py | bopopescu/drawquest-web | 19 | 6617815 | from datetime import timedelta as td
import hashlib
from django.conf import settings
from django.conf.urls import url, patterns, include
from drawquest.api_cache import cached_api
from drawquest.apps.palettes.models import Color, ColorPack
from drawquest.tests.tests_helpers import (CanvasTestCase, create_content, create_user, create_group,
create_comment, create_staff,
create_quest, create_current_quest, create_quest_comment,
fake_api_request)
from drawquest.api_decorators import api_decorator
from drawquest.apps.brushes.models import Brush
from drawquest.apps.quest_comments.models import QuestComment
from drawquest.apps.drawquest_auth.models import User
from canvas.exceptions import ServiceError, ValidationError
from drawquest import knobs
from canvas import util
from apps.share_tracking.models import ShareTrackingUrl
from canvas.models import Visibility, FacebookUser
from services import Services, override_service
class TestProfile(CanvasTestCase):
def after_setUp(self):
self.user = create_user()
def test_bio(self):
bio = 'my new bio'
self.api_post('/api/user/change_profile', {'bio': bio}, user=self.user)
self.assertEqual(self.api_post('/api/user/profile', {'username': self.user.username})['bio'], bio)
def test_realtime_sync(self):
resp = self.api_post('/api/realtime/sync', user=self.user)
self.assertAPISuccess(resp)
self.assertTrue(self.user.redis.activity_stream_channel.channel in resp['channels'])
def test_create_share_url_for_channel_via_api(self):
cmt = create_quest_comment()
result = self.api_post('/api/share/create_for_channel',
{'comment_id': cmt.id, 'channel': 'testing'}, user=self.user)
self.assertAPISuccess(result)
url = result['share_url']
rmatch = '/s/{}'.format(util.base36encode(ShareTrackingUrl.objects.order_by('-id')[0].id))
self.assertEqual(url[url.rfind(rmatch):], rmatch)
def test_create_share_url_for_channel_has_message_for_twitter(self):
cmt = create_quest_comment()
result = self.api_post('/api/share/create_for_channel',
{'comment_id': cmt.id, 'channel': 'twitter'}, user=self.user)
self.assertTrue('message' in result)
def test_heavy_state_sync(self):
state = self.api_post('/api/heavy_state_sync', user=self.user)
self.assertAPISuccess(state)
self.assertEqual(state['user_profile']['user']['username'], self.user.username)
def test_heavy_state_sync_with_fb_user(self):
FacebookUser.objects.create(user=self.user, fb_uid='123')
state = self.api_post('/api/heavy_state_sync', user=self.user)
self.assertAPISuccess(state)
self.assertEqual(state['user_profile']['facebook_url'], 'https://facebook.com/123')
def test_heavy_state_sync_with_tab_badges(self):
state = self.api_post('/api/heavy_state_sync', {'tab_last_seen_timestamps': {'activity': 500}}, user=self.user)
self.assertAPISuccess(state)
self.assertTrue('tab_badges' in state)
def test_twitter_privacy_default(self):
state = self.api_post('/api/heavy_state_sync', user=create_user())
self.assertEqual(state['twitter_privacy'], None)
class TestEmailHashes(CanvasTestCase):
def test_successful(self):
email = '<EMAIL>'
user = create_user()
self.api_post('/api/user/change_profile', {'new_email': email}, user=user)
hashed = hashlib.sha1(email).hexdigest()
resp = self.api_post('/api/existing_users_by_email', {'email_hashes': [hashed]})
self.assertAPISuccess(resp)
self.assertEqual(resp['users'][0]['username'], user.username)
class TestFlags(CanvasTestCase):
def test_auto_moderation_from_flags(self):
cmt = create_quest_comment()
for i in range(1, knobs.AUTO_DISABLE_FLAGGED_COMMENTS_THRESHOLD[None] + 1):
resp = self.api_post('/api/comment/flag', {'comment_id': cmt.id})
cmt = QuestComment.all_objects.get(pk=cmt.pk)
getattr(self, 'assert' + str(i == knobs.AUTO_DISABLE_FLAGGED_COMMENTS_THRESHOLD[None]))(
cmt.visibility == Visibility.DISABLED)
self.assertTrue(cmt.id in [qc.id for qc in QuestComment.unjudged_flagged()])
def test_self_flag(self):
cmt = create_quest_comment()
resp = self.api_post('/api/comment/flag', {'comment_id': cmt.id}, user=cmt.author)
class TestCache(CanvasTestCase):
def after_setUp(self):
urls = patterns('')
self.api = api_decorator(urls)
def _api(self, api_func, data={}):
return util.loads(api_func(fake_api_request('', data=data)).content)
def test_cache_hit(self):
i = [0]
@self.api('test_cache')
@cached_api(td(days=2), key='test_cache')
def test_cache(request):
i[0] += 1
return {'i': i[0]}
for _ in range(2):
print self._api(test_cache)
self.assertEqual(self._api(test_cache)['i'], 1)
def test_uncached(self):
i = [0]
@self.api('test_uncached')
def test_cache(request):
i[0] += 1
return {'i': i[0]}
for j in range(1, 2):
self.assertEqual(self._api(test_cache)['i'], j)
class TestConfig(CanvasTestCase):
def test_fs(self):
self.assertTrue('drawquest' in settings.IMAGE_FS[1])
class TestKV(CanvasTestCase):
def test_set(self):
user = create_user()
items = {'saw_update_modal_for_version': 'bar'}
self.assertAPISuccess(self.api_post('/api/kv/set', {'items': items}, user=user))
state = self.api_post('/api/heavy_state_sync', user=user)
self.assertAPISuccess(state)
self.assertEqual(state['user_kv']['saw_update_modal_for_version'], 'bar')
class TestShop(CanvasTestCase):
def test_for_nothing_bought(self):
color = Color.objects.create(
red=255,
green=255,
blue=255,
owned_by_default=True,
label='White',
ordinal=1,
)
resp = self.api_post('/api/shop/all_items')
self.assertAPISuccess(resp)
self.assertEqual(resp['shop_colors'][0]['label'], 'White')
def test_brushes(self):
brush = Brush.objects.create(
ordinal=0,
canonical_name='paintbrush',
label='Paintbrush',
cost=50,
is_for_sale=True,
is_new=True,
red=0,
green=0,
blue=0,
)
resp = self.api_post('/api/shop/all_items')
self.assertAPISuccess(resp)
self.assertEqual(resp['shop_brushes'][0]['label'], 'Paintbrush')
| from datetime import timedelta as td
import hashlib
from django.conf import settings
from django.conf.urls import url, patterns, include
from drawquest.api_cache import cached_api
from drawquest.apps.palettes.models import Color, ColorPack
from drawquest.tests.tests_helpers import (CanvasTestCase, create_content, create_user, create_group,
create_comment, create_staff,
create_quest, create_current_quest, create_quest_comment,
fake_api_request)
from drawquest.api_decorators import api_decorator
from drawquest.apps.brushes.models import Brush
from drawquest.apps.quest_comments.models import QuestComment
from drawquest.apps.drawquest_auth.models import User
from canvas.exceptions import ServiceError, ValidationError
from drawquest import knobs
from canvas import util
from apps.share_tracking.models import ShareTrackingUrl
from canvas.models import Visibility, FacebookUser
from services import Services, override_service
class TestProfile(CanvasTestCase):
def after_setUp(self):
self.user = create_user()
def test_bio(self):
bio = 'my new bio'
self.api_post('/api/user/change_profile', {'bio': bio}, user=self.user)
self.assertEqual(self.api_post('/api/user/profile', {'username': self.user.username})['bio'], bio)
def test_realtime_sync(self):
resp = self.api_post('/api/realtime/sync', user=self.user)
self.assertAPISuccess(resp)
self.assertTrue(self.user.redis.activity_stream_channel.channel in resp['channels'])
def test_create_share_url_for_channel_via_api(self):
cmt = create_quest_comment()
result = self.api_post('/api/share/create_for_channel',
{'comment_id': cmt.id, 'channel': 'testing'}, user=self.user)
self.assertAPISuccess(result)
url = result['share_url']
rmatch = '/s/{}'.format(util.base36encode(ShareTrackingUrl.objects.order_by('-id')[0].id))
self.assertEqual(url[url.rfind(rmatch):], rmatch)
def test_create_share_url_for_channel_has_message_for_twitter(self):
cmt = create_quest_comment()
result = self.api_post('/api/share/create_for_channel',
{'comment_id': cmt.id, 'channel': 'twitter'}, user=self.user)
self.assertTrue('message' in result)
def test_heavy_state_sync(self):
state = self.api_post('/api/heavy_state_sync', user=self.user)
self.assertAPISuccess(state)
self.assertEqual(state['user_profile']['user']['username'], self.user.username)
def test_heavy_state_sync_with_fb_user(self):
FacebookUser.objects.create(user=self.user, fb_uid='123')
state = self.api_post('/api/heavy_state_sync', user=self.user)
self.assertAPISuccess(state)
self.assertEqual(state['user_profile']['facebook_url'], 'https://facebook.com/123')
def test_heavy_state_sync_with_tab_badges(self):
state = self.api_post('/api/heavy_state_sync', {'tab_last_seen_timestamps': {'activity': 500}}, user=self.user)
self.assertAPISuccess(state)
self.assertTrue('tab_badges' in state)
def test_twitter_privacy_default(self):
state = self.api_post('/api/heavy_state_sync', user=create_user())
self.assertEqual(state['twitter_privacy'], None)
class TestEmailHashes(CanvasTestCase):
def test_successful(self):
email = '<EMAIL>'
user = create_user()
self.api_post('/api/user/change_profile', {'new_email': email}, user=user)
hashed = hashlib.sha1(email).hexdigest()
resp = self.api_post('/api/existing_users_by_email', {'email_hashes': [hashed]})
self.assertAPISuccess(resp)
self.assertEqual(resp['users'][0]['username'], user.username)
class TestFlags(CanvasTestCase):
def test_auto_moderation_from_flags(self):
cmt = create_quest_comment()
for i in range(1, knobs.AUTO_DISABLE_FLAGGED_COMMENTS_THRESHOLD[None] + 1):
resp = self.api_post('/api/comment/flag', {'comment_id': cmt.id})
cmt = QuestComment.all_objects.get(pk=cmt.pk)
getattr(self, 'assert' + str(i == knobs.AUTO_DISABLE_FLAGGED_COMMENTS_THRESHOLD[None]))(
cmt.visibility == Visibility.DISABLED)
self.assertTrue(cmt.id in [qc.id for qc in QuestComment.unjudged_flagged()])
def test_self_flag(self):
cmt = create_quest_comment()
resp = self.api_post('/api/comment/flag', {'comment_id': cmt.id}, user=cmt.author)
class TestCache(CanvasTestCase):
def after_setUp(self):
urls = patterns('')
self.api = api_decorator(urls)
def _api(self, api_func, data={}):
return util.loads(api_func(fake_api_request('', data=data)).content)
def test_cache_hit(self):
i = [0]
@self.api('test_cache')
@cached_api(td(days=2), key='test_cache')
def test_cache(request):
i[0] += 1
return {'i': i[0]}
for _ in range(2):
print self._api(test_cache)
self.assertEqual(self._api(test_cache)['i'], 1)
def test_uncached(self):
i = [0]
@self.api('test_uncached')
def test_cache(request):
i[0] += 1
return {'i': i[0]}
for j in range(1, 2):
self.assertEqual(self._api(test_cache)['i'], j)
class TestConfig(CanvasTestCase):
def test_fs(self):
self.assertTrue('drawquest' in settings.IMAGE_FS[1])
class TestKV(CanvasTestCase):
def test_set(self):
user = create_user()
items = {'saw_update_modal_for_version': 'bar'}
self.assertAPISuccess(self.api_post('/api/kv/set', {'items': items}, user=user))
state = self.api_post('/api/heavy_state_sync', user=user)
self.assertAPISuccess(state)
self.assertEqual(state['user_kv']['saw_update_modal_for_version'], 'bar')
class TestShop(CanvasTestCase):
def test_for_nothing_bought(self):
color = Color.objects.create(
red=255,
green=255,
blue=255,
owned_by_default=True,
label='White',
ordinal=1,
)
resp = self.api_post('/api/shop/all_items')
self.assertAPISuccess(resp)
self.assertEqual(resp['shop_colors'][0]['label'], 'White')
def test_brushes(self):
brush = Brush.objects.create(
ordinal=0,
canonical_name='paintbrush',
label='Paintbrush',
cost=50,
is_for_sale=True,
is_new=True,
red=0,
green=0,
blue=0,
)
resp = self.api_post('/api/shop/all_items')
self.assertAPISuccess(resp)
self.assertEqual(resp['shop_brushes'][0]['label'], 'Paintbrush')
| none | 1 | 1.906781 | 2 | |
core/middleware.py | vintasoftware/awesomeboilerplates | 11 | 6617816 | from django.views.debug import technical_500_response
import sys
class UserBasedExceptionMiddleware(object):
def process_exception(self, request, exception):
if request.user.is_superuser:
return technical_500_response(request, *sys.exc_info())
| from django.views.debug import technical_500_response
import sys
class UserBasedExceptionMiddleware(object):
def process_exception(self, request, exception):
if request.user.is_superuser:
return technical_500_response(request, *sys.exc_info())
| none | 1 | 1.878204 | 2 | |
tests/test_cache.py | orest-d/liquer | 3 | 6617817 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Unit tests for LiQuer State object.
"""
import pytest
from liquer.cache import *
from liquer.state import State
import tempfile
class TestCache:
def test_nocache(self):
state = State().with_data(123)
state.query = "abc"
cache = NoCache()
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert not cache.contains("abc")
assert cache.get("abc") == None
assert not cache.contains("xyz")
assert cache.get("xyz") == None
cache.clean()
assert not cache.contains("abc")
assert cache.get("abc") == None
def test_memory(self):
state = State().with_data(123)
state.query = "abc"
cache = MemoryCache()
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc")["mymetafield"] == "Hello"
assert not cache.contains("xyz")
assert cache.get("xyz") == None
assert not cache.contains("xxx")
assert cache.store_metadata(dict(query="xxx", mymetafield="Hello"))
assert cache.contains("xxx")
assert sorted(cache.keys()) == ["abc", "xxx"]
cache.clean()
assert not cache.contains("abc")
assert list(cache.keys()) == []
assert cache.get("abc") == None
def test_sqlite(self):
state = State().with_data(123)
state.query = "abc"
cache = SQLCache.from_sqlite()
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc")["mymetafield"] == "Hello"
assert not cache.contains("xyz")
assert cache.get("xyz") == None
assert not cache.contains("xxx")
assert cache.store_metadata(dict(query="xxx", mymetafield="Hello"))
assert cache.contains("xxx")
assert sorted(cache.keys()) == ["abc", "xxx"]
cache.clean()
assert not cache.contains("abc")
assert list(cache.keys()) == []
assert cache.get("abc") == None
def test_sqlite_store_metadata_disabled(self):
state = State().with_data(123)
state.query = "abc"
cache = SQLCache.from_sqlite()
cache.remove("abc") # Try to remove key from empty cache
cache.store_metadata_enabled = False
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert not cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc").get("mymetafield") is None
def test_sqlite_string(self):
state = State().with_data(123)
state.query = "abc"
cache = SQLStringCache.from_sqlite()
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert cache.contains("abc")
assert cache.get("abc").get() == 123
assert not cache.contains("xyz")
assert cache.get("xyz") == None
cache.clean()
assert not cache.contains("abc")
assert cache.get("abc") == None
def test_filecache(self):
state = State().with_data(123)
state.query = "abc"
with tempfile.TemporaryDirectory() as cachepath:
cache = FileCache(cachepath)
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc")["mymetafield"] == "Hello"
assert not cache.contains("xyz")
assert cache.get("xyz") == None
assert not cache.contains("xxx")
assert cache.store_metadata(dict(query="xxx", mymetafield="Hello"))
assert cache.contains("xxx")
assert sorted(cache.keys()) == ["abc", "xxx"]
cache.clean()
assert not cache.contains("abc")
assert list(cache.keys()) == []
assert cache.get("abc") == None
def test_storecache(self):
from liquer.store import MemoryStore
state = State().with_data(123)
state.query = "abc"
for cache in [
StoreCache(MemoryStore(), path=""),
StoreCache(MemoryStore(), path="xx"),
StoreCache(MemoryStore(), path="xx", flat=True),
]:
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc")["mymetafield"] == "Hello"
assert not cache.contains("xyz")
assert cache.get("xyz") == None
assert not cache.contains("xxx")
assert cache.store_metadata(dict(query="xxx", mymetafield="Hello"))
assert cache.contains("xxx")
assert sorted(cache.keys()) == ["abc", "xxx"]
cache.clean()
assert not cache.contains("abc")
assert list(cache.keys()) == []
assert cache.get("abc") == None
def test_xor_file_cache(self):
state = State().with_data(123)
state.query = "abc"
with tempfile.TemporaryDirectory() as cachepath:
cache = XORFileCache(cachepath, b"**")
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert cache.contains("abc")
assert cache.get("abc").get() == 123
assert not cache.contains("xyz")
assert cache.get("xyz") == None
cache.clean()
assert not cache.contains("abc")
assert cache.get("abc") == None
def test_fernet_file_cache(self):
from cryptography.fernet import Fernet
fernet_key = Fernet.generate_key()
state = State().with_data(123)
state.query = "abc"
with tempfile.TemporaryDirectory() as cachepath:
cache = FernetFileCache(cachepath, fernet_key)
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert cache.contains("abc")
assert cache.get("abc").get() == 123
assert not cache.contains("xyz")
assert cache.get("xyz") == None
cache.clean()
assert not cache.contains("abc")
assert cache.get("abc") == None
def test_fernet_file_cache_bad_key(self):
from cryptography.fernet import Fernet
fernet_key = Fernet.generate_key()
new_fernet_key = Fernet.generate_key()
state = State().with_data(123)
state.query = "abc"
with tempfile.TemporaryDirectory() as cachepath:
cache = FernetFileCache(cachepath, fernet_key)
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert cache.contains("abc")
assert cache.get("abc").get() == 123
cache_with_new_key = FernetFileCache(cachepath, new_fernet_key)
assert not cache_with_new_key.contains("abc")
assert cache_with_new_key.get("abc") is None
cache_with_new_key.store(state)
assert cache_with_new_key.contains("abc")
assert cache_with_new_key.get("abc").get() == 123
def test_cached_part(self):
cache = MemoryCache()
state, remainder = cached_part("abc", cache)
assert remainder == "abc"
assert state.get() == None
state = State().with_data(123)
state.query = "abc"
cache.store(state)
state, remainder = cached_part("abc", cache)
assert remainder == ""
assert state.get() == 123
state, remainder = cached_part("/abc/def", cache)
assert remainder == "def"
assert state.get() == 123
def test_cached_part_nocache(self):
cache = NoCache()
state, remainder = cached_part("abc", cache)
assert remainder == "abc"
assert state.get() == None
state = State().with_data(123)
state.query = "abc"
cache.store(state)
state, remainder = cached_part("/abc/", cache)
assert remainder == "abc"
assert state.get() == None
state, remainder = cached_part("/abc/def/", cache)
assert remainder == "abc/def"
assert state.get() == None
def test_cache_rules(self):
from liquer import evaluate, first_command
cache1 = MemoryCache()
cache2 = MemoryCache()
cache3 = MemoryCache()
set_cache(cache1.if_contains("abc") + cache2.if_not_contains("xyz") + cache3)
@first_command(abc=True)
def command1():
return "C1"
@first_command(xyz=True)
def command2():
return "C2"
@first_command
def command3():
return "C3"
evaluate("command1")
evaluate("command2")
evaluate("command3")
assert "command1" in cache1.storage
assert "command1" not in cache2.storage
assert "command1" not in cache3.storage
assert cache1.storage["command1"].get() == "C1"
assert "command2" not in cache1.storage
assert "command2" not in cache2.storage
assert "command2" in cache3.storage
assert cache3.storage["command2"].get() == "C2"
assert "command3" not in cache1.storage
assert "command3" in cache2.storage
assert "command3" not in cache3.storage
assert cache2.storage["command3"].get() == "C3"
set_cache(None)
def test_cache_attribute_equality_rules(self):
from liquer import evaluate, first_command
cache1 = MemoryCache()
cache2 = MemoryCache()
cache3 = MemoryCache()
set_cache(
cache1.if_attribute_equal("abc", 123)
+ cache2.if_attribute_not_equal("xyz", 456)
+ cache3
)
@first_command(abc=123)
def command1a():
return "C1"
@first_command(xyz=456)
def command2a():
return "C2"
@first_command
def command3a():
return "C3"
evaluate("command1a")
evaluate("command2a")
evaluate("command3a")
assert "command1a" in cache1.storage
assert "command1a" not in cache2.storage
assert "command1a" not in cache3.storage
assert cache1.storage["command1a"].get() == "C1"
assert "command2a" not in cache1.storage
assert "command2a" not in cache2.storage
assert "command2a" in cache3.storage
assert cache3.storage["command2a"].get() == "C2"
assert "command3a" not in cache1.storage
assert "command3a" in cache2.storage
assert "command3a" not in cache3.storage
assert cache2.storage["command3a"].get() == "C3"
set_cache(None)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Unit tests for LiQuer State object.
"""
import pytest
from liquer.cache import *
from liquer.state import State
import tempfile
class TestCache:
def test_nocache(self):
state = State().with_data(123)
state.query = "abc"
cache = NoCache()
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert not cache.contains("abc")
assert cache.get("abc") == None
assert not cache.contains("xyz")
assert cache.get("xyz") == None
cache.clean()
assert not cache.contains("abc")
assert cache.get("abc") == None
def test_memory(self):
state = State().with_data(123)
state.query = "abc"
cache = MemoryCache()
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc")["mymetafield"] == "Hello"
assert not cache.contains("xyz")
assert cache.get("xyz") == None
assert not cache.contains("xxx")
assert cache.store_metadata(dict(query="xxx", mymetafield="Hello"))
assert cache.contains("xxx")
assert sorted(cache.keys()) == ["abc", "xxx"]
cache.clean()
assert not cache.contains("abc")
assert list(cache.keys()) == []
assert cache.get("abc") == None
def test_sqlite(self):
state = State().with_data(123)
state.query = "abc"
cache = SQLCache.from_sqlite()
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc")["mymetafield"] == "Hello"
assert not cache.contains("xyz")
assert cache.get("xyz") == None
assert not cache.contains("xxx")
assert cache.store_metadata(dict(query="xxx", mymetafield="Hello"))
assert cache.contains("xxx")
assert sorted(cache.keys()) == ["abc", "xxx"]
cache.clean()
assert not cache.contains("abc")
assert list(cache.keys()) == []
assert cache.get("abc") == None
def test_sqlite_store_metadata_disabled(self):
state = State().with_data(123)
state.query = "abc"
cache = SQLCache.from_sqlite()
cache.remove("abc") # Try to remove key from empty cache
cache.store_metadata_enabled = False
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert not cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc").get("mymetafield") is None
def test_sqlite_string(self):
state = State().with_data(123)
state.query = "abc"
cache = SQLStringCache.from_sqlite()
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert cache.contains("abc")
assert cache.get("abc").get() == 123
assert not cache.contains("xyz")
assert cache.get("xyz") == None
cache.clean()
assert not cache.contains("abc")
assert cache.get("abc") == None
def test_filecache(self):
state = State().with_data(123)
state.query = "abc"
with tempfile.TemporaryDirectory() as cachepath:
cache = FileCache(cachepath)
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc")["mymetafield"] == "Hello"
assert not cache.contains("xyz")
assert cache.get("xyz") == None
assert not cache.contains("xxx")
assert cache.store_metadata(dict(query="xxx", mymetafield="Hello"))
assert cache.contains("xxx")
assert sorted(cache.keys()) == ["abc", "xxx"]
cache.clean()
assert not cache.contains("abc")
assert list(cache.keys()) == []
assert cache.get("abc") == None
def test_storecache(self):
from liquer.store import MemoryStore
state = State().with_data(123)
state.query = "abc"
for cache in [
StoreCache(MemoryStore(), path=""),
StoreCache(MemoryStore(), path="xx"),
StoreCache(MemoryStore(), path="xx", flat=True),
]:
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
assert list(cache.keys()) == []
cache.store(state)
assert cache.contains("abc")
assert list(cache.keys()) == ["abc"]
assert cache.get("abc").get() == 123
assert cache.get_metadata("abc")["query"] == "abc"
assert cache.store_metadata(dict(query="abc", mymetafield="Hello"))
assert cache.get_metadata("abc")["mymetafield"] == "Hello"
assert not cache.contains("xyz")
assert cache.get("xyz") == None
assert not cache.contains("xxx")
assert cache.store_metadata(dict(query="xxx", mymetafield="Hello"))
assert cache.contains("xxx")
assert sorted(cache.keys()) == ["abc", "xxx"]
cache.clean()
assert not cache.contains("abc")
assert list(cache.keys()) == []
assert cache.get("abc") == None
def test_xor_file_cache(self):
state = State().with_data(123)
state.query = "abc"
with tempfile.TemporaryDirectory() as cachepath:
cache = XORFileCache(cachepath, b"**")
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert cache.contains("abc")
assert cache.get("abc").get() == 123
assert not cache.contains("xyz")
assert cache.get("xyz") == None
cache.clean()
assert not cache.contains("abc")
assert cache.get("abc") == None
def test_fernet_file_cache(self):
from cryptography.fernet import Fernet
fernet_key = Fernet.generate_key()
state = State().with_data(123)
state.query = "abc"
with tempfile.TemporaryDirectory() as cachepath:
cache = FernetFileCache(cachepath, fernet_key)
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert cache.contains("abc")
assert cache.get("abc").get() == 123
assert not cache.contains("xyz")
assert cache.get("xyz") == None
cache.clean()
assert not cache.contains("abc")
assert cache.get("abc") == None
def test_fernet_file_cache_bad_key(self):
from cryptography.fernet import Fernet
fernet_key = Fernet.generate_key()
new_fernet_key = Fernet.generate_key()
state = State().with_data(123)
state.query = "abc"
with tempfile.TemporaryDirectory() as cachepath:
cache = FernetFileCache(cachepath, fernet_key)
cache.remove("abc") # Try to remove key from empty cache
assert not cache.contains("abc")
cache.store(state)
assert cache.contains("abc")
assert cache.get("abc").get() == 123
cache_with_new_key = FernetFileCache(cachepath, new_fernet_key)
assert not cache_with_new_key.contains("abc")
assert cache_with_new_key.get("abc") is None
cache_with_new_key.store(state)
assert cache_with_new_key.contains("abc")
assert cache_with_new_key.get("abc").get() == 123
def test_cached_part(self):
cache = MemoryCache()
state, remainder = cached_part("abc", cache)
assert remainder == "abc"
assert state.get() == None
state = State().with_data(123)
state.query = "abc"
cache.store(state)
state, remainder = cached_part("abc", cache)
assert remainder == ""
assert state.get() == 123
state, remainder = cached_part("/abc/def", cache)
assert remainder == "def"
assert state.get() == 123
def test_cached_part_nocache(self):
cache = NoCache()
state, remainder = cached_part("abc", cache)
assert remainder == "abc"
assert state.get() == None
state = State().with_data(123)
state.query = "abc"
cache.store(state)
state, remainder = cached_part("/abc/", cache)
assert remainder == "abc"
assert state.get() == None
state, remainder = cached_part("/abc/def/", cache)
assert remainder == "abc/def"
assert state.get() == None
def test_cache_rules(self):
from liquer import evaluate, first_command
cache1 = MemoryCache()
cache2 = MemoryCache()
cache3 = MemoryCache()
set_cache(cache1.if_contains("abc") + cache2.if_not_contains("xyz") + cache3)
@first_command(abc=True)
def command1():
return "C1"
@first_command(xyz=True)
def command2():
return "C2"
@first_command
def command3():
return "C3"
evaluate("command1")
evaluate("command2")
evaluate("command3")
assert "command1" in cache1.storage
assert "command1" not in cache2.storage
assert "command1" not in cache3.storage
assert cache1.storage["command1"].get() == "C1"
assert "command2" not in cache1.storage
assert "command2" not in cache2.storage
assert "command2" in cache3.storage
assert cache3.storage["command2"].get() == "C2"
assert "command3" not in cache1.storage
assert "command3" in cache2.storage
assert "command3" not in cache3.storage
assert cache2.storage["command3"].get() == "C3"
set_cache(None)
def test_cache_attribute_equality_rules(self):
from liquer import evaluate, first_command
cache1 = MemoryCache()
cache2 = MemoryCache()
cache3 = MemoryCache()
set_cache(
cache1.if_attribute_equal("abc", 123)
+ cache2.if_attribute_not_equal("xyz", 456)
+ cache3
)
@first_command(abc=123)
def command1a():
return "C1"
@first_command(xyz=456)
def command2a():
return "C2"
@first_command
def command3a():
return "C3"
evaluate("command1a")
evaluate("command2a")
evaluate("command3a")
assert "command1a" in cache1.storage
assert "command1a" not in cache2.storage
assert "command1a" not in cache3.storage
assert cache1.storage["command1a"].get() == "C1"
assert "command2a" not in cache1.storage
assert "command2a" not in cache2.storage
assert "command2a" in cache3.storage
assert cache3.storage["command2a"].get() == "C2"
assert "command3a" not in cache1.storage
assert "command3a" in cache2.storage
assert "command3a" not in cache3.storage
assert cache2.storage["command3a"].get() == "C3"
set_cache(None)
| en | 0.676776 | #!/usr/bin/python # -*- coding: utf-8 -*- Unit tests for LiQuer State object. # Try to remove key from empty cache # Try to remove key from empty cache # Try to remove key from empty cache # Try to remove key from empty cache # Try to remove key from empty cache # Try to remove key from empty cache # Try to remove key from empty cache # Try to remove key from empty cache # Try to remove key from empty cache # Try to remove key from empty cache | 2.597292 | 3 |
thermd/fluid/separators.py | sgolle/python-thermd | 1 | 6617818 | # -*- coding: utf-8 -*-
"""Dokumentation.
Beschreibung
"""
from __future__ import annotations
import numpy as np
from thermd.core import (
# BaseSignalClass,
BaseStateClass,
# MediumBase,
# MediumHumidAir,
)
from thermd.fluid.core import BaseFluidOneInletTwoOutlets
from thermd.media.coolprop import CoolPropFluid, CoolPropPureFluids, MediumCoolProp
from thermd.helper import get_logger
# Initialize global logger
logger = get_logger(__name__)
# Sensor classes
class SeparatorWater(BaseFluidOneInletTwoOutlets):
"""SeparatorWater class.
The SeparatorWater class separates liquid and solid water from a humid air flow.
"""
def __init__(
self: SeparatorWater,
name: str,
state0: BaseStateClass,
eta: np.float64 = np.float64(1.0),
):
"""Initialize class.
Init function of the class.
"""
super().__init__(name=name, state0=state0)
# Separator parameters
self._eta = eta
# Set water outlet state
state_water = MediumCoolProp.from_pT(
p=state0.p,
T=state0.T,
fluid=CoolPropFluid.new_pure_fluid(fluid=CoolPropPureFluids.WATER),
)
self._ports[self._port_b2_name].state = state_water
def check_self(self: SeparatorWater) -> bool:
return True
def equation(self: SeparatorWater):
# Stop criterions
self._last_hmass = self._ports[self._port_a_name].state.hmass
self._last_m_flow = self._ports[self._port_a_name].state.m_flow
# New state
ws = self._ports[self._port_a_name].state.ws
if self._ports[self._port_a_name].state.w >= ws:
self._ports[self._port_b1_name].state = self._ports[self._port_a_name].state
else:
self._ports[self._port_b1_name].state = self._ports[self._port_a_name].state
# New Signal
self._ports[self._port_d_name].signal.value = self._ports[
self._port_a_name
].state.p
if __name__ == "__main__":
logger.info("This is the file for the separator model classes.")
| # -*- coding: utf-8 -*-
"""Dokumentation.
Beschreibung
"""
from __future__ import annotations
import numpy as np
from thermd.core import (
# BaseSignalClass,
BaseStateClass,
# MediumBase,
# MediumHumidAir,
)
from thermd.fluid.core import BaseFluidOneInletTwoOutlets
from thermd.media.coolprop import CoolPropFluid, CoolPropPureFluids, MediumCoolProp
from thermd.helper import get_logger
# Initialize global logger
logger = get_logger(__name__)
# Sensor classes
class SeparatorWater(BaseFluidOneInletTwoOutlets):
"""SeparatorWater class.
The SeparatorWater class separates liquid and solid water from a humid air flow.
"""
def __init__(
self: SeparatorWater,
name: str,
state0: BaseStateClass,
eta: np.float64 = np.float64(1.0),
):
"""Initialize class.
Init function of the class.
"""
super().__init__(name=name, state0=state0)
# Separator parameters
self._eta = eta
# Set water outlet state
state_water = MediumCoolProp.from_pT(
p=state0.p,
T=state0.T,
fluid=CoolPropFluid.new_pure_fluid(fluid=CoolPropPureFluids.WATER),
)
self._ports[self._port_b2_name].state = state_water
def check_self(self: SeparatorWater) -> bool:
return True
def equation(self: SeparatorWater):
# Stop criterions
self._last_hmass = self._ports[self._port_a_name].state.hmass
self._last_m_flow = self._ports[self._port_a_name].state.m_flow
# New state
ws = self._ports[self._port_a_name].state.ws
if self._ports[self._port_a_name].state.w >= ws:
self._ports[self._port_b1_name].state = self._ports[self._port_a_name].state
else:
self._ports[self._port_b1_name].state = self._ports[self._port_a_name].state
# New Signal
self._ports[self._port_d_name].signal.value = self._ports[
self._port_a_name
].state.p
if __name__ == "__main__":
logger.info("This is the file for the separator model classes.")
| en | 0.509038 | # -*- coding: utf-8 -*- Dokumentation. Beschreibung # BaseSignalClass, # MediumBase, # MediumHumidAir, # Initialize global logger # Sensor classes SeparatorWater class. The SeparatorWater class separates liquid and solid water from a humid air flow. Initialize class. Init function of the class. # Separator parameters # Set water outlet state # Stop criterions # New state # New Signal | 2.305949 | 2 |
src/safecasttiles/settings.py | monkut/safecasttiles | 0 | 6617819 | """
Django settings for safecasttiles project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.staticfiles',
'django.contrib.gis',
'safecasttiles.measurements',
)
MIDDLEWARE_CLASSES = (
)
ROOT_URLCONF = 'safecasttiles.urls'
WSGI_APPLICATION = 'safecasttiles.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': "safecasttiles",
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
| """
Django settings for safecasttiles project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.staticfiles',
'django.contrib.gis',
'safecasttiles.measurements',
)
MIDDLEWARE_CLASSES = (
)
ROOT_URLCONF = 'safecasttiles.urls'
WSGI_APPLICATION = 'safecasttiles.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': "safecasttiles",
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
| en | 0.621802 | Django settings for safecasttiles project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ | 1.698492 | 2 |
tests/test_client.py | allenporter/rtsp-to-webrtc-client | 2 | 6617820 | <reponame>allenporter/rtsp-to-webrtc-client
from __future__ import annotations
import asyncio
import base64
from collections.abc import Awaitable, Callable
from typing import Any, cast
import aiohttp
import pytest
from aiohttp import ClientSession, web
from aiohttp.test_utils import TestClient, TestServer
from rtsp_to_webrtc.client import get_adaptive_client, get_diagnostics
from rtsp_to_webrtc.exceptions import ClientError
OFFER_SDP = "v=0\r\no=carol 28908764872 28908764872 IN IP4 172.16.58.3\r\n..."
ANSWER_SDP = "v=0\r\no=bob 2890844730 2890844730 IN IP4 h.example.com\r\n..."
ANSWER_PAYLOAD = base64.b64encode(ANSWER_SDP.encode("utf-8")).decode("utf-8")
RTSP_URL = "rtsp://example"
STREAM_1 = {
"name": "test video",
"channels": {
"0": {
"name": "ch1",
"url": "rtsp://example",
},
"1": {
"name": "ch2",
"url": "rtsp://example",
},
},
}
SUCCESS_RESPONSE = {
"status": 1,
"payload": "success",
}
@pytest.fixture
def event_loop() -> Any:
loop = asyncio.get_event_loop()
yield loop
@pytest.fixture
def cli_cb(
loop: Any,
app: web.Application,
aiohttp_client: Callable[[web.Application], Awaitable[TestClient]],
) -> Callable[[], Awaitable[TestClient]]:
"""Creates a fake aiohttp client."""
async def func() -> TestClient:
return await aiohttp_client(app)
return func
async def test_adaptive_web_client(
cli_cb: Callable[[], Awaitable[TestClient]],
app: web.Application,
request_handler: Callable[[aiohttp.web.Request], Awaitable[aiohttp.web.Response]],
) -> None:
"""Test adapative client picks Web when both succeed."""
app.router.add_get("/streams", request_handler)
app.router.add_get("/static", request_handler)
app.router.add_post("/stream/{stream_id}/add", request_handler)
app.router.add_post(
"/stream/{stream_id}/channel/{channel_id}/webrtc", request_handler
)
cli = await cli_cb()
assert isinstance(cli.server, TestServer)
# Web heartbeat
cli.server.app["response"].append(
aiohttp.web.json_response(
{
"status": 1,
"payload": {
"demo1": STREAM_1,
},
}
)
)
# WebRTC heartbeat
cli.server.app["response"].append(
aiohttp.web.Response(status=404),
)
# List call
cli.server.app["response"].append(
aiohttp.web.json_response(
{
"status": 1,
"payload": {},
}
)
)
# Add stream
cli.server.app["response"].append(aiohttp.web.json_response(SUCCESS_RESPONSE))
# Web Offer
cli.server.app["response"].append(aiohttp.web.Response(body=ANSWER_PAYLOAD))
client = await get_adaptive_client(cast(ClientSession, cli))
answer_sdp = await client.offer(OFFER_SDP, RTSP_URL)
assert answer_sdp == ANSWER_SDP
assert get_diagnostics() == {
"discovery": {"attempt": 1, "web.success": 1, "webrtc.failure": 1},
"web": {
"add_stream.request": 1,
"add_stream.success": 1,
"heartbeat.request": 1,
"heartbeat.success": 1,
"list_streams.request": 1,
"list_streams.success": 1,
"webrtc.request": 1,
"webrtc.success": 1,
},
"webrtc": {"heartbeat.request": 1, "heartbeat.response_error": 1},
}
async def test_adaptive_both_succeed_web_client(
cli_cb: Callable[[], Awaitable[TestClient]],
app: web.Application,
request_handler: Callable[[aiohttp.web.Request], Awaitable[aiohttp.web.Response]],
) -> None:
"""Test adapative client picks Web when both succeed."""
app.router.add_get("/streams", request_handler)
app.router.add_get("/static", request_handler)
app.router.add_post("/stream/{stream_id}/add", request_handler)
app.router.add_post(
"/stream/{stream_id}/channel/{channel_id}/webrtc", request_handler
)
cli = await cli_cb()
assert isinstance(cli.server, TestServer)
# Web heartbeat
cli.server.app["response"].append(
aiohttp.web.json_response(
{
"status": 1,
"payload": {
"demo1": STREAM_1,
},
}
)
)
# WebRTC heartbeat
cli.server.app["response"].append(
aiohttp.web.Response(status=200),
)
# List call
cli.server.app["response"].append(
aiohttp.web.json_response(
{
"status": 1,
"payload": {},
}
)
)
# Add stream
cli.server.app["response"].append(aiohttp.web.json_response(SUCCESS_RESPONSE))
# Web Offer
cli.server.app["response"].append(aiohttp.web.Response(body=ANSWER_PAYLOAD))
client = await get_adaptive_client(cast(ClientSession, cli))
answer_sdp = await client.offer(OFFER_SDP, RTSP_URL)
assert answer_sdp == ANSWER_SDP
async def test_adaptive_webrtc_client(
cli_cb: Callable[[], Awaitable[TestClient]],
app: web.Application,
request_handler: Callable[[aiohttp.web.Request], Awaitable[aiohttp.web.Response]],
) -> None:
"""Test List Streams calls."""
app.router.add_get("/streams", request_handler)
app.router.add_get("/static", request_handler)
app.router.add_post("/stream", request_handler)
cli = await cli_cb()
assert isinstance(cli.server, TestServer)
# Web heartbeat fails
cli.server.app["response"].append(aiohttp.web.Response(status=404))
# WebRTC heartbeat succeeds
cli.server.app["response"].append(
aiohttp.web.Response(status=200),
)
# WebRTC offer
cli.server.app["response"].append(
aiohttp.web.json_response({"sdp64": ANSWER_PAYLOAD})
)
client = await get_adaptive_client(cast(ClientSession, cli))
answer_sdp = await client.offer(OFFER_SDP, RTSP_URL)
assert answer_sdp == ANSWER_SDP
assert get_diagnostics() == {
"discovery": {"attempt": 1, "webrtc.success": 1, "web.failure": 1},
"web": {"heartbeat.request": 1, "heartbeat.response_error": 1},
"webrtc": {
"stream.request": 1,
"stream.success": 1,
"heartbeat.request": 1,
"heartbeat.success": 1,
},
}
async def test_adaptive_both_fail(
cli_cb: Callable[[], Awaitable[TestClient]],
app: web.Application,
) -> None:
"""Test successful response from RTSPtoWebRTC server."""
cli = await cli_cb()
assert isinstance(cli.server, TestServer)
with pytest.raises(ClientError):
await get_adaptive_client(cast(ClientSession, cli))
| from __future__ import annotations
import asyncio
import base64
from collections.abc import Awaitable, Callable
from typing import Any, cast
import aiohttp
import pytest
from aiohttp import ClientSession, web
from aiohttp.test_utils import TestClient, TestServer
from rtsp_to_webrtc.client import get_adaptive_client, get_diagnostics
from rtsp_to_webrtc.exceptions import ClientError
OFFER_SDP = "v=0\r\no=carol 28908764872 28908764872 IN IP4 172.16.58.3\r\n..."
ANSWER_SDP = "v=0\r\no=bob 2890844730 2890844730 IN IP4 h.example.com\r\n..."
ANSWER_PAYLOAD = base64.b64encode(ANSWER_SDP.encode("utf-8")).decode("utf-8")
RTSP_URL = "rtsp://example"
STREAM_1 = {
"name": "test video",
"channels": {
"0": {
"name": "ch1",
"url": "rtsp://example",
},
"1": {
"name": "ch2",
"url": "rtsp://example",
},
},
}
SUCCESS_RESPONSE = {
"status": 1,
"payload": "success",
}
@pytest.fixture
def event_loop() -> Any:
loop = asyncio.get_event_loop()
yield loop
@pytest.fixture
def cli_cb(
loop: Any,
app: web.Application,
aiohttp_client: Callable[[web.Application], Awaitable[TestClient]],
) -> Callable[[], Awaitable[TestClient]]:
"""Creates a fake aiohttp client."""
async def func() -> TestClient:
return await aiohttp_client(app)
return func
async def test_adaptive_web_client(
cli_cb: Callable[[], Awaitable[TestClient]],
app: web.Application,
request_handler: Callable[[aiohttp.web.Request], Awaitable[aiohttp.web.Response]],
) -> None:
"""Test adapative client picks Web when both succeed."""
app.router.add_get("/streams", request_handler)
app.router.add_get("/static", request_handler)
app.router.add_post("/stream/{stream_id}/add", request_handler)
app.router.add_post(
"/stream/{stream_id}/channel/{channel_id}/webrtc", request_handler
)
cli = await cli_cb()
assert isinstance(cli.server, TestServer)
# Web heartbeat
cli.server.app["response"].append(
aiohttp.web.json_response(
{
"status": 1,
"payload": {
"demo1": STREAM_1,
},
}
)
)
# WebRTC heartbeat
cli.server.app["response"].append(
aiohttp.web.Response(status=404),
)
# List call
cli.server.app["response"].append(
aiohttp.web.json_response(
{
"status": 1,
"payload": {},
}
)
)
# Add stream
cli.server.app["response"].append(aiohttp.web.json_response(SUCCESS_RESPONSE))
# Web Offer
cli.server.app["response"].append(aiohttp.web.Response(body=ANSWER_PAYLOAD))
client = await get_adaptive_client(cast(ClientSession, cli))
answer_sdp = await client.offer(OFFER_SDP, RTSP_URL)
assert answer_sdp == ANSWER_SDP
assert get_diagnostics() == {
"discovery": {"attempt": 1, "web.success": 1, "webrtc.failure": 1},
"web": {
"add_stream.request": 1,
"add_stream.success": 1,
"heartbeat.request": 1,
"heartbeat.success": 1,
"list_streams.request": 1,
"list_streams.success": 1,
"webrtc.request": 1,
"webrtc.success": 1,
},
"webrtc": {"heartbeat.request": 1, "heartbeat.response_error": 1},
}
async def test_adaptive_both_succeed_web_client(
cli_cb: Callable[[], Awaitable[TestClient]],
app: web.Application,
request_handler: Callable[[aiohttp.web.Request], Awaitable[aiohttp.web.Response]],
) -> None:
"""Test adapative client picks Web when both succeed."""
app.router.add_get("/streams", request_handler)
app.router.add_get("/static", request_handler)
app.router.add_post("/stream/{stream_id}/add", request_handler)
app.router.add_post(
"/stream/{stream_id}/channel/{channel_id}/webrtc", request_handler
)
cli = await cli_cb()
assert isinstance(cli.server, TestServer)
# Web heartbeat
cli.server.app["response"].append(
aiohttp.web.json_response(
{
"status": 1,
"payload": {
"demo1": STREAM_1,
},
}
)
)
# WebRTC heartbeat
cli.server.app["response"].append(
aiohttp.web.Response(status=200),
)
# List call
cli.server.app["response"].append(
aiohttp.web.json_response(
{
"status": 1,
"payload": {},
}
)
)
# Add stream
cli.server.app["response"].append(aiohttp.web.json_response(SUCCESS_RESPONSE))
# Web Offer
cli.server.app["response"].append(aiohttp.web.Response(body=ANSWER_PAYLOAD))
client = await get_adaptive_client(cast(ClientSession, cli))
answer_sdp = await client.offer(OFFER_SDP, RTSP_URL)
assert answer_sdp == ANSWER_SDP
async def test_adaptive_webrtc_client(
cli_cb: Callable[[], Awaitable[TestClient]],
app: web.Application,
request_handler: Callable[[aiohttp.web.Request], Awaitable[aiohttp.web.Response]],
) -> None:
"""Test List Streams calls."""
app.router.add_get("/streams", request_handler)
app.router.add_get("/static", request_handler)
app.router.add_post("/stream", request_handler)
cli = await cli_cb()
assert isinstance(cli.server, TestServer)
# Web heartbeat fails
cli.server.app["response"].append(aiohttp.web.Response(status=404))
# WebRTC heartbeat succeeds
cli.server.app["response"].append(
aiohttp.web.Response(status=200),
)
# WebRTC offer
cli.server.app["response"].append(
aiohttp.web.json_response({"sdp64": ANSWER_PAYLOAD})
)
client = await get_adaptive_client(cast(ClientSession, cli))
answer_sdp = await client.offer(OFFER_SDP, RTSP_URL)
assert answer_sdp == ANSWER_SDP
assert get_diagnostics() == {
"discovery": {"attempt": 1, "webrtc.success": 1, "web.failure": 1},
"web": {"heartbeat.request": 1, "heartbeat.response_error": 1},
"webrtc": {
"stream.request": 1,
"stream.success": 1,
"heartbeat.request": 1,
"heartbeat.success": 1,
},
}
async def test_adaptive_both_fail(
cli_cb: Callable[[], Awaitable[TestClient]],
app: web.Application,
) -> None:
"""Test successful response from RTSPtoWebRTC server."""
cli = await cli_cb()
assert isinstance(cli.server, TestServer)
with pytest.raises(ClientError):
await get_adaptive_client(cast(ClientSession, cli)) | en | 0.694838 | Creates a fake aiohttp client. Test adapative client picks Web when both succeed. # Web heartbeat # WebRTC heartbeat # List call # Add stream # Web Offer Test adapative client picks Web when both succeed. # Web heartbeat # WebRTC heartbeat # List call # Add stream # Web Offer Test List Streams calls. # Web heartbeat fails # WebRTC heartbeat succeeds # WebRTC offer Test successful response from RTSPtoWebRTC server. | 2.083126 | 2 |
pysmfscrape/scraper.py | aayushagra/smf-scraper | 2 | 6617821 | <reponame>aayushagra/smf-scraper
import mechanize
from bs4 import BeautifulSoup
import traceback
import time
from utils import is_login_page, pages_in_thread
pagecount = 0
def login(br, username, password):
br.select_form("frmLogin")
br["user"] = username
br["passwrd"] = password
for i in range(0, len(br.find_control(type="checkbox").items)):
br.find_control(type="checkbox").items[i].selected =True
response = br.submit()
with open("login.html", "wb") as file:
file.write(response.read())
def fetch_page(br, baseurl, threadno, pageno, delay):
time.sleep(delay)
url = baseurl + str(threadno) + "." + str(pageno*15)
global pagecount
pagecount += 1
print "[%d - %d] URL: %s" % (threadno, pagecount, url)
return br.open(url, timeout=10).read()
def fetch_error(threadno, pageno, baseurl, exception, enable_traceback):
url = baseurl + str(threadno) + "." + str(pageno*15)
print "Error fetching URL: %s" % (url)
if enable_traceback is True:
print exception
else:
print "Use flag --traceback to print full traceback"
def downloadThread(threadno, baseurl, br, username, password, path, delay, enable_traceback, pageno = 0):
try:
page = fetch_page(br=br, baseurl=baseurl, threadno=threadno, pageno=pageno, delay=delay)
except KeyboardInterrupt:
raise(KeyboardInterrupt)
except:
return fetch_error(threadno, pageno, baseurl, traceback.format_exc(), enable_traceback)
soup = BeautifulSoup(page, "html.parser")
if is_login_page(soup):
if username is not None:
login(br, username, password)
downloadThread(threadno=threadno, baseurl=baseurl, br=br, username=username, password=password, path=path, delay=delay, enable_traceback=enable_traceback, pageno=pageno)
else:
with open("%s/%s-%s.html" % (path, str(threadno), str(pageno)), "wb") as file:
file.write(page)
if pageno == 0:
pages = pages_in_thread(soup)
for j in range(1, pages):
downloadThread(threadno=threadno, baseurl=baseurl, br=br, username=username, password=password, path=path, delay=delay, enable_traceback=enable_traceback, pageno=j)
br.clear_history()
def scrape(baseurl, start, end, delay, path, username, password, enable_traceback):
br = mechanize.Browser()
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) # Follows refresh 0 but not hangs on refresh > 0
import time
start_time = time.time()
for i in range(start, end+1):
downloadThread(threadno=i, baseurl=baseurl, br=br, username=username, password=password, path=path, delay=delay, enable_traceback=enable_traceback)
time_taken = time.time() - start_time
num_threads = end-start
print "Processed %s threads in %s seconds, average time: %s" % (num_threads, time_taken, time_taken/num_threads) | import mechanize
from bs4 import BeautifulSoup
import traceback
import time
from utils import is_login_page, pages_in_thread
pagecount = 0
def login(br, username, password):
br.select_form("frmLogin")
br["user"] = username
br["passwrd"] = password
for i in range(0, len(br.find_control(type="checkbox").items)):
br.find_control(type="checkbox").items[i].selected =True
response = br.submit()
with open("login.html", "wb") as file:
file.write(response.read())
def fetch_page(br, baseurl, threadno, pageno, delay):
time.sleep(delay)
url = baseurl + str(threadno) + "." + str(pageno*15)
global pagecount
pagecount += 1
print "[%d - %d] URL: %s" % (threadno, pagecount, url)
return br.open(url, timeout=10).read()
def fetch_error(threadno, pageno, baseurl, exception, enable_traceback):
url = baseurl + str(threadno) + "." + str(pageno*15)
print "Error fetching URL: %s" % (url)
if enable_traceback is True:
print exception
else:
print "Use flag --traceback to print full traceback"
def downloadThread(threadno, baseurl, br, username, password, path, delay, enable_traceback, pageno = 0):
try:
page = fetch_page(br=br, baseurl=baseurl, threadno=threadno, pageno=pageno, delay=delay)
except KeyboardInterrupt:
raise(KeyboardInterrupt)
except:
return fetch_error(threadno, pageno, baseurl, traceback.format_exc(), enable_traceback)
soup = BeautifulSoup(page, "html.parser")
if is_login_page(soup):
if username is not None:
login(br, username, password)
downloadThread(threadno=threadno, baseurl=baseurl, br=br, username=username, password=password, path=path, delay=delay, enable_traceback=enable_traceback, pageno=pageno)
else:
with open("%s/%s-%s.html" % (path, str(threadno), str(pageno)), "wb") as file:
file.write(page)
if pageno == 0:
pages = pages_in_thread(soup)
for j in range(1, pages):
downloadThread(threadno=threadno, baseurl=baseurl, br=br, username=username, password=password, path=path, delay=delay, enable_traceback=enable_traceback, pageno=j)
br.clear_history()
def scrape(baseurl, start, end, delay, path, username, password, enable_traceback):
br = mechanize.Browser()
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) # Follows refresh 0 but not hangs on refresh > 0
import time
start_time = time.time()
for i in range(start, end+1):
downloadThread(threadno=i, baseurl=baseurl, br=br, username=username, password=password, path=path, delay=delay, enable_traceback=enable_traceback)
time_taken = time.time() - start_time
num_threads = end-start
print "Processed %s threads in %s seconds, average time: %s" % (num_threads, time_taken, time_taken/num_threads) | en | 0.826089 | # Follows refresh 0 but not hangs on refresh > 0 | 2.796497 | 3 |
ger/TTP/SV3/3748.py | aLagoG/kygerand | 1 | 6617822 | <gh_stars>1-10
from fractions import Fraction
t = 1
while t:
v1, d1, v2, d2 = [int(i) for i in raw_input().split()]
if v1 == d1 == v2 == d2 == 0: break
myTime = Fraction(d1, v1)
hisTime = Fraction(d2, v2)
if myTime < hisTime:
print "Case #" + str(t) + ": You owe me a beer!"
else:
print "Case #" + str(t) + ": No beer for the captain."
print "Avg. arrival time:",((myTime + hisTime)/2)
t += 1
| from fractions import Fraction
t = 1
while t:
v1, d1, v2, d2 = [int(i) for i in raw_input().split()]
if v1 == d1 == v2 == d2 == 0: break
myTime = Fraction(d1, v1)
hisTime = Fraction(d2, v2)
if myTime < hisTime:
print "Case #" + str(t) + ": You owe me a beer!"
else:
print "Case #" + str(t) + ": No beer for the captain."
print "Avg. arrival time:",((myTime + hisTime)/2)
t += 1 | en | 0.348749 | #" + str(t) + ": You owe me a beer!" #" + str(t) + ": No beer for the captain." | 3.640062 | 4 |
agnes/common/schedules.py | rotinov/CITUS | 24 | 6617823 | from torch.optim.lr_scheduler import _LRScheduler
from agnes.algos.base import _BaseAlgo
class LinearAnnealingLR(_LRScheduler):
def __init__(self, optimizer, eta_min=0.0, to_epoch=1000):
self.eta_min = eta_min
self.to_epoch = to_epoch
last_epoch = -1
super(LinearAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.eta_min + max(0, (base_lr - self.eta_min) * (1 - self._step_count / self.to_epoch))
for base_lr in self.base_lrs]
def get_count(self):
return self._step_count
class LinearSchedule:
_step_count = 0
def __init__(self, val_fun, eta_min=0.0, to_epoch=1000):
self.eta_min = eta_min
self.to_epoch = to_epoch
self.val_fun = val_fun
def step(self):
self._step_count += 1
def get_v(self):
return self.val_fun(self._get_k())
def _get_k(self):
return self.eta_min + max(0.,
(1. - self.eta_min) * (1. - self._step_count / self.to_epoch)
)
class Saver:
filename: str = None
frames_period: int = None
_counter: int = 0
_active: bool = False
def __init__(self, filename: str = None, frames_period: int = None):
if filename is not None:
self.filename = filename
self.frames_period = frames_period
self._active = True
def save(self, algo: _BaseAlgo, frames_now: int):
if not self._active:
return
if self.frames_period * self._counter < frames_now:
if self._counter != 0:
algo.save(self.filename)
self._counter += 1
| from torch.optim.lr_scheduler import _LRScheduler
from agnes.algos.base import _BaseAlgo
class LinearAnnealingLR(_LRScheduler):
def __init__(self, optimizer, eta_min=0.0, to_epoch=1000):
self.eta_min = eta_min
self.to_epoch = to_epoch
last_epoch = -1
super(LinearAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.eta_min + max(0, (base_lr - self.eta_min) * (1 - self._step_count / self.to_epoch))
for base_lr in self.base_lrs]
def get_count(self):
return self._step_count
class LinearSchedule:
_step_count = 0
def __init__(self, val_fun, eta_min=0.0, to_epoch=1000):
self.eta_min = eta_min
self.to_epoch = to_epoch
self.val_fun = val_fun
def step(self):
self._step_count += 1
def get_v(self):
return self.val_fun(self._get_k())
def _get_k(self):
return self.eta_min + max(0.,
(1. - self.eta_min) * (1. - self._step_count / self.to_epoch)
)
class Saver:
filename: str = None
frames_period: int = None
_counter: int = 0
_active: bool = False
def __init__(self, filename: str = None, frames_period: int = None):
if filename is not None:
self.filename = filename
self.frames_period = frames_period
self._active = True
def save(self, algo: _BaseAlgo, frames_now: int):
if not self._active:
return
if self.frames_period * self._counter < frames_now:
if self._counter != 0:
algo.save(self.filename)
self._counter += 1
| none | 1 | 2.162143 | 2 | |
wdae/wdae/enrichment_api/tests/test_enrichment_serializer.py | iossifovlab/gpf | 0 | 6617824 | import pytest
from dae.enrichment_tool.event_counters import EnrichmentResult
pytestmark = pytest.mark.usefixtures(
"wdae_gpf_instance", "dae_calc_gene_sets")
# @pytest.mark.xfail(reason="[gene models] wrong annotation")
def test_serialize(enrichment_serializer):
serialize = enrichment_serializer.serialize()
assert len(serialize) == 2
assert serialize[0]["selector"] == "phenotype 1"
assert serialize[0]["peopleGroupId"] == "phenotype"
assert len(serialize[0]["childrenStats"]) == 2
assert serialize[0]["childrenStats"]["M"] == 1
assert serialize[0]["childrenStats"]["F"] == 1
all_serialized = serialize[0]["missense"]["all"]
assert all_serialized["name"] == "all"
assert all_serialized["count"] == 2
assert all_serialized["overlapped"] == 2
assert all_serialized["expected"] == 2
assert all_serialized["pvalue"] == 1
assert all_serialized["countFilter"]["datasetId"] == "f1_trio"
assert all_serialized["countFilter"]["effectTypes"] == ["Missense"]
assert all_serialized["countFilter"]["gender"] == [
"male",
"female",
"unspecified",
]
assert all_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert all_serialized["countFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert all_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert all_serialized["countFilter"]["studyTypes"] == ["we"]
assert all_serialized["countFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert all_serialized["overlapFilter"]["datasetId"] == "f1_trio"
assert all_serialized["overlapFilter"]["effectTypes"] == ["Missense"]
assert all_serialized["overlapFilter"]["gender"] == [
"male",
"female",
"unspecified",
]
assert all_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert all_serialized["overlapFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert all_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert all_serialized["overlapFilter"]["studyTypes"] == ["we"]
assert all_serialized["overlapFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert all_serialized["overlapFilter"]["geneSymbols"] == {"SAMD11"}
rec_serialized = serialize[0]["missense"]["rec"]
assert rec_serialized["name"] == "rec"
assert rec_serialized["count"] == 1
assert rec_serialized["overlapped"] == 1
assert rec_serialized["expected"] == 1
assert rec_serialized["pvalue"] == 1
assert rec_serialized["countFilter"]["datasetId"] == "f1_trio"
assert rec_serialized["countFilter"]["effectTypes"] == ["Missense"]
assert rec_serialized["countFilter"]["gender"] == ["male", "female"]
assert rec_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert rec_serialized["countFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert rec_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert rec_serialized["countFilter"]["studyTypes"] == ["we"]
assert rec_serialized["countFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert rec_serialized["overlapFilter"]["datasetId"] == "f1_trio"
assert rec_serialized["overlapFilter"]["effectTypes"] == ["Missense"]
assert rec_serialized["overlapFilter"]["gender"] == ["male", "female"]
assert rec_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert rec_serialized["overlapFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert rec_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert rec_serialized["overlapFilter"]["studyTypes"] == ["we"]
assert rec_serialized["overlapFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert rec_serialized["overlapFilter"]["geneSymbols"] == {"SAMD11"}
male_serialized = serialize[0]["missense"]["male"]
assert male_serialized["name"] == "male"
assert male_serialized["count"] == 1
assert male_serialized["overlapped"] == 1
assert male_serialized["expected"] == 1
assert male_serialized["pvalue"] == 1
assert male_serialized["countFilter"]["datasetId"] == "f1_trio"
assert male_serialized["countFilter"]["effectTypes"] == ["Missense"]
assert male_serialized["countFilter"]["gender"] == ["male"]
assert male_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert male_serialized["countFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert male_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert male_serialized["countFilter"]["studyTypes"] == ["we"]
assert male_serialized["countFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert male_serialized["overlapFilter"]["datasetId"] == "f1_trio"
assert male_serialized["overlapFilter"]["effectTypes"] == ["Missense"]
assert male_serialized["overlapFilter"]["gender"] == ["male"]
assert male_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert male_serialized["overlapFilter"]["peopleGroup"][
"checkedValues"
] == ["phenotype1"]
assert male_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert male_serialized["overlapFilter"]["studyTypes"] == ["we"]
assert male_serialized["overlapFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert male_serialized["overlapFilter"]["geneSymbols"] == {"SAMD11"}
female_serialized = serialize[0]["missense"]["female"]
assert female_serialized["name"] == "female"
assert female_serialized["count"] == 1
assert female_serialized["overlapped"] == 1
assert female_serialized["expected"] == 1
assert female_serialized["pvalue"] == 1
assert female_serialized["countFilter"]["datasetId"] == "f1_trio"
assert female_serialized["countFilter"]["effectTypes"] == ["Missense"]
assert female_serialized["countFilter"]["gender"] == ["female"]
assert female_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert female_serialized["countFilter"]["peopleGroup"][
"checkedValues"
] == ["phenotype1"]
assert female_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert female_serialized["countFilter"]["studyTypes"] == ["we"]
assert female_serialized["countFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert female_serialized["overlapFilter"]["datasetId"] == "f1_trio"
assert female_serialized["overlapFilter"]["effectTypes"] == ["Missense"]
assert female_serialized["overlapFilter"]["gender"] == ["female"]
assert (
female_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
)
assert female_serialized["overlapFilter"]["peopleGroup"][
"checkedValues"
] == ["phenotype1"]
assert (
female_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
)
assert female_serialized["overlapFilter"]["studyTypes"] == ["we"]
assert female_serialized["overlapFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert female_serialized["overlapFilter"]["geneSymbols"] == {"SAMD11"}
def test_serialize_error(f1_trio, enrichment_builder, enrichment_serializer):
male_er = EnrichmentResult("male")
male_er.events = [["SAMD11"], ["SAMD11"], ["POGZ"]]
male_er.overlapped = [["SAMD11"]]
male_er.expected = 3
male_er.pvalue = 0.5
all_er = EnrichmentResult("all")
all_er.events = [["SAMD11"], ["SAMD11"], ["POGZ"]]
all_er.overlapped = [["SAMD11"]]
all_er.expected = 3
all_er.pvalue = 0.5
person_set_collection = f1_trio.get_person_set_collection("phenotype")
with pytest.raises(KeyError):
enrichment_builder.build_people_group_selector(
["missense"],
person_set_collection.person_sets["autism"]
)
def test_serialize_enrichment_result(db, enrichment_serializer):
enrichment_result = EnrichmentResult("all")
enrichment_result.events = [["SAMD11"], ["SAMD11"], ["POGZ"]]
enrichment_result.overlapped = [["SAMD11"]]
enrichment_result.expected = 3
enrichment_result.pvalue = 0.5
res = enrichment_serializer.serialize_enrichment_result(enrichment_result)
assert len(res) == 5
assert res["name"] == "all"
assert res["count"] == 3
assert res["overlapped"] == 1
assert res["expected"] == 3
assert res["pvalue"] == 0.5
with pytest.raises(AssertionError):
enrichment_serializer.serialize_enrichment_result({})
| import pytest
from dae.enrichment_tool.event_counters import EnrichmentResult
pytestmark = pytest.mark.usefixtures(
"wdae_gpf_instance", "dae_calc_gene_sets")
# @pytest.mark.xfail(reason="[gene models] wrong annotation")
def test_serialize(enrichment_serializer):
serialize = enrichment_serializer.serialize()
assert len(serialize) == 2
assert serialize[0]["selector"] == "phenotype 1"
assert serialize[0]["peopleGroupId"] == "phenotype"
assert len(serialize[0]["childrenStats"]) == 2
assert serialize[0]["childrenStats"]["M"] == 1
assert serialize[0]["childrenStats"]["F"] == 1
all_serialized = serialize[0]["missense"]["all"]
assert all_serialized["name"] == "all"
assert all_serialized["count"] == 2
assert all_serialized["overlapped"] == 2
assert all_serialized["expected"] == 2
assert all_serialized["pvalue"] == 1
assert all_serialized["countFilter"]["datasetId"] == "f1_trio"
assert all_serialized["countFilter"]["effectTypes"] == ["Missense"]
assert all_serialized["countFilter"]["gender"] == [
"male",
"female",
"unspecified",
]
assert all_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert all_serialized["countFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert all_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert all_serialized["countFilter"]["studyTypes"] == ["we"]
assert all_serialized["countFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert all_serialized["overlapFilter"]["datasetId"] == "f1_trio"
assert all_serialized["overlapFilter"]["effectTypes"] == ["Missense"]
assert all_serialized["overlapFilter"]["gender"] == [
"male",
"female",
"unspecified",
]
assert all_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert all_serialized["overlapFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert all_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert all_serialized["overlapFilter"]["studyTypes"] == ["we"]
assert all_serialized["overlapFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert all_serialized["overlapFilter"]["geneSymbols"] == {"SAMD11"}
rec_serialized = serialize[0]["missense"]["rec"]
assert rec_serialized["name"] == "rec"
assert rec_serialized["count"] == 1
assert rec_serialized["overlapped"] == 1
assert rec_serialized["expected"] == 1
assert rec_serialized["pvalue"] == 1
assert rec_serialized["countFilter"]["datasetId"] == "f1_trio"
assert rec_serialized["countFilter"]["effectTypes"] == ["Missense"]
assert rec_serialized["countFilter"]["gender"] == ["male", "female"]
assert rec_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert rec_serialized["countFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert rec_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert rec_serialized["countFilter"]["studyTypes"] == ["we"]
assert rec_serialized["countFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert rec_serialized["overlapFilter"]["datasetId"] == "f1_trio"
assert rec_serialized["overlapFilter"]["effectTypes"] == ["Missense"]
assert rec_serialized["overlapFilter"]["gender"] == ["male", "female"]
assert rec_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert rec_serialized["overlapFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert rec_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert rec_serialized["overlapFilter"]["studyTypes"] == ["we"]
assert rec_serialized["overlapFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert rec_serialized["overlapFilter"]["geneSymbols"] == {"SAMD11"}
male_serialized = serialize[0]["missense"]["male"]
assert male_serialized["name"] == "male"
assert male_serialized["count"] == 1
assert male_serialized["overlapped"] == 1
assert male_serialized["expected"] == 1
assert male_serialized["pvalue"] == 1
assert male_serialized["countFilter"]["datasetId"] == "f1_trio"
assert male_serialized["countFilter"]["effectTypes"] == ["Missense"]
assert male_serialized["countFilter"]["gender"] == ["male"]
assert male_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert male_serialized["countFilter"]["peopleGroup"]["checkedValues"] == [
"phenotype1"
]
assert male_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert male_serialized["countFilter"]["studyTypes"] == ["we"]
assert male_serialized["countFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert male_serialized["overlapFilter"]["datasetId"] == "f1_trio"
assert male_serialized["overlapFilter"]["effectTypes"] == ["Missense"]
assert male_serialized["overlapFilter"]["gender"] == ["male"]
assert male_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert male_serialized["overlapFilter"]["peopleGroup"][
"checkedValues"
] == ["phenotype1"]
assert male_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
assert male_serialized["overlapFilter"]["studyTypes"] == ["we"]
assert male_serialized["overlapFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert male_serialized["overlapFilter"]["geneSymbols"] == {"SAMD11"}
female_serialized = serialize[0]["missense"]["female"]
assert female_serialized["name"] == "female"
assert female_serialized["count"] == 1
assert female_serialized["overlapped"] == 1
assert female_serialized["expected"] == 1
assert female_serialized["pvalue"] == 1
assert female_serialized["countFilter"]["datasetId"] == "f1_trio"
assert female_serialized["countFilter"]["effectTypes"] == ["Missense"]
assert female_serialized["countFilter"]["gender"] == ["female"]
assert female_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert female_serialized["countFilter"]["peopleGroup"][
"checkedValues"
] == ["phenotype1"]
assert female_serialized["countFilter"]["peopleGroup"]["id"] == "phenotype"
assert female_serialized["countFilter"]["studyTypes"] == ["we"]
assert female_serialized["countFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert female_serialized["overlapFilter"]["datasetId"] == "f1_trio"
assert female_serialized["overlapFilter"]["effectTypes"] == ["Missense"]
assert female_serialized["overlapFilter"]["gender"] == ["female"]
assert (
female_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
)
assert female_serialized["overlapFilter"]["peopleGroup"][
"checkedValues"
] == ["phenotype1"]
assert (
female_serialized["overlapFilter"]["peopleGroup"]["id"] == "phenotype"
)
assert female_serialized["overlapFilter"]["studyTypes"] == ["we"]
assert female_serialized["overlapFilter"]["variantTypes"] == [
"ins",
"sub",
"del",
]
assert female_serialized["overlapFilter"]["geneSymbols"] == {"SAMD11"}
def test_serialize_error(f1_trio, enrichment_builder, enrichment_serializer):
male_er = EnrichmentResult("male")
male_er.events = [["SAMD11"], ["SAMD11"], ["POGZ"]]
male_er.overlapped = [["SAMD11"]]
male_er.expected = 3
male_er.pvalue = 0.5
all_er = EnrichmentResult("all")
all_er.events = [["SAMD11"], ["SAMD11"], ["POGZ"]]
all_er.overlapped = [["SAMD11"]]
all_er.expected = 3
all_er.pvalue = 0.5
person_set_collection = f1_trio.get_person_set_collection("phenotype")
with pytest.raises(KeyError):
enrichment_builder.build_people_group_selector(
["missense"],
person_set_collection.person_sets["autism"]
)
def test_serialize_enrichment_result(db, enrichment_serializer):
enrichment_result = EnrichmentResult("all")
enrichment_result.events = [["SAMD11"], ["SAMD11"], ["POGZ"]]
enrichment_result.overlapped = [["SAMD11"]]
enrichment_result.expected = 3
enrichment_result.pvalue = 0.5
res = enrichment_serializer.serialize_enrichment_result(enrichment_result)
assert len(res) == 5
assert res["name"] == "all"
assert res["count"] == 3
assert res["overlapped"] == 1
assert res["expected"] == 3
assert res["pvalue"] == 0.5
with pytest.raises(AssertionError):
enrichment_serializer.serialize_enrichment_result({})
| en | 0.626846 | # @pytest.mark.xfail(reason="[gene models] wrong annotation") | 2.119385 | 2 |
src/rightClickHelper/component/notice/elePyMessageBox.py | NWYLZW/right-click-helper | 24 | 6617825 | <reponame>NWYLZW/right-click-helper<filename>src/rightClickHelper/component/notice/elePyMessageBox.py<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from enum import Enum
from typing import Callable, Any
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtWidgets import QHBoxLayout, QWidget, QVBoxLayout, QTextBrowser
from src.rightClickHelper.component.elePyDialog import ElePyDialog
from src.rightClickHelper.component.form.elePyButton import ElePyButton
from src.rightClickHelper.component.label.elePyIcon import ElePyIcon
from src.rightClickHelper.component.label.elePyLabel import ElePyLabel
from src.rightClickHelper.tool.animationTool import AnimationTool
from src.rightClickHelper.tool.widgetTool import WidgetTool
class AlertAction(Enum):
CANCEL = 0x000
CONFIRM = 0x001
class ElePyMessageBox(
ElePyDialog
):
__instance__ = None
@staticmethod
def instance() -> 'ElePyMessageBox':
if hasattr(ElePyMessageBox, '__instance__'):
setattr(ElePyMessageBox, '__instance__', ElePyMessageBox())
return ElePyMessageBox.__instance__
def __init__(self, parent=None, properties: dict = None):
super().__init__(parent, properties)
def _initUi(self):
super(ElePyMessageBox, self)._initUi()
self.setLayout(QHBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.setSQSS(
self.__class__.getResource('sqss/component/ele_py_message_box.sqss')
)
self.setFixedSize(360, 180)
mainWidget = QWidget(self)
mainWidget.setFixedSize(self.size())
self.layout().addWidget(mainWidget)
self.mainWidget = mainWidget
mainWidget.setLayout(QVBoxLayout())
mainWidget.layout().setSpacing(0)
top = QWidget()
top.setProperty('class', 'top')
top.setFixedHeight(40)
top.setLayout(QHBoxLayout())
top.setCursor(Qt.SizeAllCursor)
self.top = top
icon = ElePyIcon(top)
icon.setFontPixel(20)
top.layout().addWidget(icon)
self.leftIcon = icon
title = ElePyLabel(top)
WidgetTool.setFont(title, size=12)
title.setTextInteractionFlags(
Qt.TextSelectableByMouse
)
title.setCursor(
Qt.IBeamCursor
)
top.layout().addWidget(title)
self.title = title
deleteIcon = ElePyIcon(top, {'text': ''})
deleteIcon.setObjectName('delete')
deleteIcon.setFontPixel(20)
deleteIcon.setCursor(Qt.PointingHandCursor)
deleteIcon.clicked.connect(self.close)
top.layout().addWidget(deleteIcon)
mainWidget.layout().addWidget(top)
content = QWidget()
content.setProperty('class', 'content')
content.setLayout(QHBoxLayout())
contentText = QTextBrowser(content)
WidgetTool.setFont(contentText)
contentText.viewport().setCursor(
Qt.IBeamCursor
)
content.layout().addWidget(contentText)
self.content = content
self.contentText = contentText
mainWidget.layout().addWidget(content)
bottom = QWidget()
bottom.setProperty('class', 'bottom')
bottom.setFixedHeight(60)
bottom.setLayout(QHBoxLayout())
self.bottom = bottom
self.cancelBtn = self.pushBtn()
self.confirmBtn = self.pushBtn(properties={
'type': ElePyButton.Type.PRIMARY
})
mainWidget.layout().addWidget(bottom)
def _initData(self):
self.mDrag = False
def _initEvent(self):
def mousePressEvent(event):
self.mDragPosition = event.globalPos() - self.pos()
if event.button() == Qt.LeftButton:
self.mDrag = True
event.accept()
def mouseMoveEvent(event):
if event.buttons() == Qt.LeftButton and self.mDrag:
self.move(event.globalPos() - self.mDragPosition)
event.accept()
def mouseReleaseEvent(QMouseEvent):
self.mDrag = False
self.top.mousePressEvent = mousePressEvent
self.top.mouseMoveEvent = mouseMoveEvent
self.top.mouseReleaseEvent = mouseReleaseEvent
self.contentText.setOpenLinks(False)
self.contentText.anchorClicked.connect(
lambda url: QDesktopServices.openUrl(url)
)
def exec(self) -> int:
self.setWindowModality(Qt.ApplicationModal)
self.show()
def show(self) -> None:
super(ElePyMessageBox, self).show()
AnimationTool.createReverse(
self, False
)()
def close(self) -> bool:
AnimationTool.createReverse(
self, True, super(ElePyMessageBox, self).close
)()
return True
def setBaseData(
self
, content: str, title: str = 'Please confirm'
, leftIcon: str = ''
, contentWidth: int = -1
, contentHeight: int = -1
):
self.contentText.setHtml(content)
if contentWidth != -1:
self.setFixedWidth(contentWidth + 40)
if contentHeight != -1:
self.setFixedHeight(contentHeight + 100)
self.mainWidget.setFixedSize(self.size())
self.title.setText(title)
self.leftIcon.setText(leftIcon)
def pushBtn(
self
, text: str = ''
, properties: dict = {}
) -> ElePyButton:
self.bottom.layout().setAlignment(
Qt.AlignRight
)
btn = ElePyButton(self.bottom, {
'text': text,
'el-size': ElePyButton.Size.SMALL.value
, **properties
})
self.bottom.layout().addWidget(btn)
return btn
def setBtn(
self, btn: ElePyButton, isShow: bool = True
, text: str = ''
, callback: Callable[[AlertAction], Any] = None
, alertAction: AlertAction = AlertAction.CONFIRM
):
def clicked():
if callback:
val = callback(alertAction)
if val is None or val is True:
self.close()
else:
self.close()
btn.setVisible(isShow)
btn.setText(text)
btn.clicked.connect(clicked)
def alert(
self
, content: str, title: str = 'Please confirm'
, leftIcon: str = ''
, confirmBtnText: str = 'confirm'
, callback: Callable[[AlertAction], Any] = None
, contentWidth: int = -1
, contentHeight: int = -1
):
self.setBaseData(
content, title, leftIcon
, contentWidth=contentWidth, contentHeight=contentHeight
)
self.setBtn(
self.cancelBtn, False)
self.setBtn(
self.confirmBtn, True
, confirmBtnText, callback
, AlertAction.CONFIRM)
self.exec()
def confirm(
self
, content: str, title: str = 'Please confirm'
, leftIcon: str = ''
, confirmBtnText: str = 'confirm'
, confirmCallback: Callable[[AlertAction], Any] = None
, cancelBtnText: str = 'cancel'
, cancelCallback: Callable[[AlertAction], Any] = None
, contentWidth: int = -1
, contentHeight: int = -1
):
self.setBaseData(
content, title, leftIcon
, contentWidth=contentWidth, contentHeight=contentHeight
)
self.setBtn(
self.confirmBtn, True
, confirmBtnText, confirmCallback
, AlertAction.CONFIRM)
self.setBtn(
self.cancelBtn, True
, cancelBtnText, cancelCallback
, AlertAction.CANCEL)
self.exec()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from enum import Enum
from typing import Callable, Any
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtWidgets import QHBoxLayout, QWidget, QVBoxLayout, QTextBrowser
from src.rightClickHelper.component.elePyDialog import ElePyDialog
from src.rightClickHelper.component.form.elePyButton import ElePyButton
from src.rightClickHelper.component.label.elePyIcon import ElePyIcon
from src.rightClickHelper.component.label.elePyLabel import ElePyLabel
from src.rightClickHelper.tool.animationTool import AnimationTool
from src.rightClickHelper.tool.widgetTool import WidgetTool
class AlertAction(Enum):
CANCEL = 0x000
CONFIRM = 0x001
class ElePyMessageBox(
ElePyDialog
):
__instance__ = None
@staticmethod
def instance() -> 'ElePyMessageBox':
if hasattr(ElePyMessageBox, '__instance__'):
setattr(ElePyMessageBox, '__instance__', ElePyMessageBox())
return ElePyMessageBox.__instance__
def __init__(self, parent=None, properties: dict = None):
super().__init__(parent, properties)
def _initUi(self):
super(ElePyMessageBox, self)._initUi()
self.setLayout(QHBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.setSQSS(
self.__class__.getResource('sqss/component/ele_py_message_box.sqss')
)
self.setFixedSize(360, 180)
mainWidget = QWidget(self)
mainWidget.setFixedSize(self.size())
self.layout().addWidget(mainWidget)
self.mainWidget = mainWidget
mainWidget.setLayout(QVBoxLayout())
mainWidget.layout().setSpacing(0)
top = QWidget()
top.setProperty('class', 'top')
top.setFixedHeight(40)
top.setLayout(QHBoxLayout())
top.setCursor(Qt.SizeAllCursor)
self.top = top
icon = ElePyIcon(top)
icon.setFontPixel(20)
top.layout().addWidget(icon)
self.leftIcon = icon
title = ElePyLabel(top)
WidgetTool.setFont(title, size=12)
title.setTextInteractionFlags(
Qt.TextSelectableByMouse
)
title.setCursor(
Qt.IBeamCursor
)
top.layout().addWidget(title)
self.title = title
deleteIcon = ElePyIcon(top, {'text': ''})
deleteIcon.setObjectName('delete')
deleteIcon.setFontPixel(20)
deleteIcon.setCursor(Qt.PointingHandCursor)
deleteIcon.clicked.connect(self.close)
top.layout().addWidget(deleteIcon)
mainWidget.layout().addWidget(top)
content = QWidget()
content.setProperty('class', 'content')
content.setLayout(QHBoxLayout())
contentText = QTextBrowser(content)
WidgetTool.setFont(contentText)
contentText.viewport().setCursor(
Qt.IBeamCursor
)
content.layout().addWidget(contentText)
self.content = content
self.contentText = contentText
mainWidget.layout().addWidget(content)
bottom = QWidget()
bottom.setProperty('class', 'bottom')
bottom.setFixedHeight(60)
bottom.setLayout(QHBoxLayout())
self.bottom = bottom
self.cancelBtn = self.pushBtn()
self.confirmBtn = self.pushBtn(properties={
'type': ElePyButton.Type.PRIMARY
})
mainWidget.layout().addWidget(bottom)
def _initData(self):
self.mDrag = False
def _initEvent(self):
def mousePressEvent(event):
self.mDragPosition = event.globalPos() - self.pos()
if event.button() == Qt.LeftButton:
self.mDrag = True
event.accept()
def mouseMoveEvent(event):
if event.buttons() == Qt.LeftButton and self.mDrag:
self.move(event.globalPos() - self.mDragPosition)
event.accept()
def mouseReleaseEvent(QMouseEvent):
self.mDrag = False
self.top.mousePressEvent = mousePressEvent
self.top.mouseMoveEvent = mouseMoveEvent
self.top.mouseReleaseEvent = mouseReleaseEvent
self.contentText.setOpenLinks(False)
self.contentText.anchorClicked.connect(
lambda url: QDesktopServices.openUrl(url)
)
def exec(self) -> int:
self.setWindowModality(Qt.ApplicationModal)
self.show()
def show(self) -> None:
super(ElePyMessageBox, self).show()
AnimationTool.createReverse(
self, False
)()
def close(self) -> bool:
AnimationTool.createReverse(
self, True, super(ElePyMessageBox, self).close
)()
return True
def setBaseData(
self
, content: str, title: str = 'Please confirm'
, leftIcon: str = ''
, contentWidth: int = -1
, contentHeight: int = -1
):
self.contentText.setHtml(content)
if contentWidth != -1:
self.setFixedWidth(contentWidth + 40)
if contentHeight != -1:
self.setFixedHeight(contentHeight + 100)
self.mainWidget.setFixedSize(self.size())
self.title.setText(title)
self.leftIcon.setText(leftIcon)
def pushBtn(
self
, text: str = ''
, properties: dict = {}
) -> ElePyButton:
self.bottom.layout().setAlignment(
Qt.AlignRight
)
btn = ElePyButton(self.bottom, {
'text': text,
'el-size': ElePyButton.Size.SMALL.value
, **properties
})
self.bottom.layout().addWidget(btn)
return btn
def setBtn(
self, btn: ElePyButton, isShow: bool = True
, text: str = ''
, callback: Callable[[AlertAction], Any] = None
, alertAction: AlertAction = AlertAction.CONFIRM
):
def clicked():
if callback:
val = callback(alertAction)
if val is None or val is True:
self.close()
else:
self.close()
btn.setVisible(isShow)
btn.setText(text)
btn.clicked.connect(clicked)
def alert(
self
, content: str, title: str = 'Please confirm'
, leftIcon: str = ''
, confirmBtnText: str = 'confirm'
, callback: Callable[[AlertAction], Any] = None
, contentWidth: int = -1
, contentHeight: int = -1
):
self.setBaseData(
content, title, leftIcon
, contentWidth=contentWidth, contentHeight=contentHeight
)
self.setBtn(
self.cancelBtn, False)
self.setBtn(
self.confirmBtn, True
, confirmBtnText, callback
, AlertAction.CONFIRM)
self.exec()
def confirm(
self
, content: str, title: str = 'Please confirm'
, leftIcon: str = ''
, confirmBtnText: str = 'confirm'
, confirmCallback: Callable[[AlertAction], Any] = None
, cancelBtnText: str = 'cancel'
, cancelCallback: Callable[[AlertAction], Any] = None
, contentWidth: int = -1
, contentHeight: int = -1
):
self.setBaseData(
content, title, leftIcon
, contentWidth=contentWidth, contentHeight=contentHeight
)
self.setBtn(
self.confirmBtn, True
, confirmBtnText, confirmCallback
, AlertAction.CONFIRM)
self.setBtn(
self.cancelBtn, True
, cancelBtnText, cancelCallback
, AlertAction.CANCEL)
self.exec() | zh | 0.237801 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- #xe65e;'}) #xe6a8;' #xe6a8;' #xe6a8;' | 1.860998 | 2 |
parseMISO_MXE_MT.py | TaliaferroLab/AnalysisScripts | 0 | 6617826 | import gzip
import subprocess
import sys
import pdb
import os
### sample cmd line:
#parseMISO_MXE.py /net/afterthefact/data/athma/MouseEncode/RNAseq/MISO/AFE Testis_Rep1_AFE.psi
##if submitting from inside folder:
#parseMISO_MXE.py `pwd` ${tissue}_Rep${rep}_${i}.psi
## inputs:
# (1) tissue/rep specific summary file
## outputs (bed file):
# (1) chr
# (2) AFE start
# (3) AFE end
# (4) gene;PSI;CI.low;CI.high;Assigned.Counts
#example input file:
# /net/afterthefact/data/athma/MouseEncode/RNASeq/MISO/AFE/Testis_Rep1_AFE.psi/summary_output/summary/Testis_Rep1_AFE.psi.miso_summary')
def parseMISO_MXE(FOLDER, SUMNAME):
#FOLDER=('/net/afterthefact/data/athma/MouseEncode/RNAseq/MISO/AFE')
#SUMNAME=('Testis_Rep1_AFE.psi')
#SUMNAME=('Testis_Rep1_AFE.psi/summary_output/summary/Testis_Rep1_AFE.psi.miso_summary')
sys.stderr.write('This is the error for ' + FOLDER +'/'+ SUMNAME +'\n')
# Open folder and flush header line
summary_info=open(FOLDER+'/'+SUMNAME+'/summary_output/summary/'+SUMNAME+'.miso_summary')
summaryinfo=summary_info.readline().split()
### Examples lines
#ENSMUSG00000066151@4:61961182:62021610 0.70 0.24 0.99 'ENSMUSG00000066151@4:61961182:62021610.A.0','ENSMUSG00000066151@4:61961182:62021610.B.0' (0,0):28,(1,0):1 0:1 chr4 - 62021404,61965225 62021610,61966057
# Open output folder
outfile=open(FOLDER +'/'+ SUMNAME +'_MXE.bed','w') #CHANGE THIS BASED ON EVENT TYPE
counter=0
while ('TRUE'):
summaryline=summary_info.readline().split()
if not summaryline:
break
counter=counter+1
chrom=summaryline[7]
gene=summaryline[0]
#4 psi values here.
psis=summaryline[1].split(',')
lows=summaryline[2].split(',')
highs=summaryline[3].split(',')
counts=summaryline[6].split(',')
starts=summaryline[9].split(',')
ends=summaryline[10].split(',')
#Only consider lines with 4 psi values. Only having one psi value doesn't make sense.
if len(psis) == 4:
confint = []
for idx, high in enumerate(highs):
confint.append(float(high) - float(lows[idx]))
outfile.write(('\t').join([chrom, starts[0], ends[0], gene + '_none', psis[0], lows[0], highs[0], str(confint[0])]) + '\n')
outfile.write(('\t').join([chrom, starts[1], ends[1], gene + '_mxe1', psis[1], lows[1], highs[1], str(confint[1])]) + '\n')
outfile.write(('\t').join([chrom, starts[2], ends[2], gene + '_mxe2', psis[2], lows[2], highs[2], str(confint[2])]) + '\n')
outfile.write(('\t').join([chrom, starts[3], ends[3], gene + '_both', psis[3], lows[3], highs[3], str(confint[3])]) + '\n')
outfile.close()
sys.stderr.write(str(counter) + ' genes processed...' + '\n')
sys.stderr.write('ALL DONE!')
for directory in os.listdir(sys.argv[1]):
if os.path.isdir(directory):
parseMISO_MXE(os.path.abspath(sys.argv[1]), os.path.basename(directory))
| import gzip
import subprocess
import sys
import pdb
import os
### sample cmd line:
#parseMISO_MXE.py /net/afterthefact/data/athma/MouseEncode/RNAseq/MISO/AFE Testis_Rep1_AFE.psi
##if submitting from inside folder:
#parseMISO_MXE.py `pwd` ${tissue}_Rep${rep}_${i}.psi
## inputs:
# (1) tissue/rep specific summary file
## outputs (bed file):
# (1) chr
# (2) AFE start
# (3) AFE end
# (4) gene;PSI;CI.low;CI.high;Assigned.Counts
#example input file:
# /net/afterthefact/data/athma/MouseEncode/RNASeq/MISO/AFE/Testis_Rep1_AFE.psi/summary_output/summary/Testis_Rep1_AFE.psi.miso_summary')
def parseMISO_MXE(FOLDER, SUMNAME):
#FOLDER=('/net/afterthefact/data/athma/MouseEncode/RNAseq/MISO/AFE')
#SUMNAME=('Testis_Rep1_AFE.psi')
#SUMNAME=('Testis_Rep1_AFE.psi/summary_output/summary/Testis_Rep1_AFE.psi.miso_summary')
sys.stderr.write('This is the error for ' + FOLDER +'/'+ SUMNAME +'\n')
# Open folder and flush header line
summary_info=open(FOLDER+'/'+SUMNAME+'/summary_output/summary/'+SUMNAME+'.miso_summary')
summaryinfo=summary_info.readline().split()
### Examples lines
#ENSMUSG00000066151@4:61961182:62021610 0.70 0.24 0.99 'ENSMUSG00000066151@4:61961182:62021610.A.0','ENSMUSG00000066151@4:61961182:62021610.B.0' (0,0):28,(1,0):1 0:1 chr4 - 62021404,61965225 62021610,61966057
# Open output folder
outfile=open(FOLDER +'/'+ SUMNAME +'_MXE.bed','w') #CHANGE THIS BASED ON EVENT TYPE
counter=0
while ('TRUE'):
summaryline=summary_info.readline().split()
if not summaryline:
break
counter=counter+1
chrom=summaryline[7]
gene=summaryline[0]
#4 psi values here.
psis=summaryline[1].split(',')
lows=summaryline[2].split(',')
highs=summaryline[3].split(',')
counts=summaryline[6].split(',')
starts=summaryline[9].split(',')
ends=summaryline[10].split(',')
#Only consider lines with 4 psi values. Only having one psi value doesn't make sense.
if len(psis) == 4:
confint = []
for idx, high in enumerate(highs):
confint.append(float(high) - float(lows[idx]))
outfile.write(('\t').join([chrom, starts[0], ends[0], gene + '_none', psis[0], lows[0], highs[0], str(confint[0])]) + '\n')
outfile.write(('\t').join([chrom, starts[1], ends[1], gene + '_mxe1', psis[1], lows[1], highs[1], str(confint[1])]) + '\n')
outfile.write(('\t').join([chrom, starts[2], ends[2], gene + '_mxe2', psis[2], lows[2], highs[2], str(confint[2])]) + '\n')
outfile.write(('\t').join([chrom, starts[3], ends[3], gene + '_both', psis[3], lows[3], highs[3], str(confint[3])]) + '\n')
outfile.close()
sys.stderr.write(str(counter) + ' genes processed...' + '\n')
sys.stderr.write('ALL DONE!')
for directory in os.listdir(sys.argv[1]):
if os.path.isdir(directory):
parseMISO_MXE(os.path.abspath(sys.argv[1]), os.path.basename(directory))
| en | 0.38347 | ### sample cmd line: #parseMISO_MXE.py /net/afterthefact/data/athma/MouseEncode/RNAseq/MISO/AFE Testis_Rep1_AFE.psi ##if submitting from inside folder: #parseMISO_MXE.py `pwd` ${tissue}_Rep${rep}_${i}.psi ## inputs: # (1) tissue/rep specific summary file ## outputs (bed file): # (1) chr # (2) AFE start # (3) AFE end # (4) gene;PSI;CI.low;CI.high;Assigned.Counts #example input file: # /net/afterthefact/data/athma/MouseEncode/RNASeq/MISO/AFE/Testis_Rep1_AFE.psi/summary_output/summary/Testis_Rep1_AFE.psi.miso_summary') #FOLDER=('/net/afterthefact/data/athma/MouseEncode/RNAseq/MISO/AFE') #SUMNAME=('Testis_Rep1_AFE.psi') #SUMNAME=('Testis_Rep1_AFE.psi/summary_output/summary/Testis_Rep1_AFE.psi.miso_summary') # Open folder and flush header line ### Examples lines #ENSMUSG00000066151@4:61961182:62021610 0.70 0.24 0.99 'ENSMUSG00000066151@4:61961182:62021610.A.0','ENSMUSG00000066151@4:61961182:62021610.B.0' (0,0):28,(1,0):1 0:1 chr4 - 62021404,61965225 62021610,61966057 # Open output folder #CHANGE THIS BASED ON EVENT TYPE #4 psi values here. #Only consider lines with 4 psi values. Only having one psi value doesn't make sense. | 2.416739 | 2 |
nxcompile/flow/compile.py | Intuity/nexus | 6 | 6617827 | # Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from statistics import mean
from ..models.constant import Constant
from ..models.flop import Flop
from ..models.gate import Gate, Operation
from nxconstants import Instruction as NXInstruction
log = logging.getLogger("compiler.compile")
class Input:
""" Represents a boundary input to the logic """
def __init__(self, bit, targets):
self.bit = bit
self.targets = targets
def __repr__(self): return f"<Input {self.bit}>"
class Output:
""" Represents a boundary output from the logic """
def __init__(self, bit, source):
self.bit = bit
self.source = source
def __repr__(self): return f"<Output {self.bit}>"
class State:
def __init__(self, bit, source, targets):
self.bit = bit
self.source = source
self.targets = targets
class Instruction:
def __init__(self, op, sources, targets, node):
self.op = op
self.sources = sources
self.targets = targets
self.node = node
class Node:
"""
Represents a logic node within the mesh, keeps track of input, output, and
instruction slot usage. Also performs compilation of operations into encoded
values, generation of input handling and output handling.
"""
def __init__(
self, mesh, row, column, inputs=8, outputs=8, slots=12, registers=8
):
""" Initialise the Node.
Args:
mesh : Pointer to the mesh
row : Row position within the mesh
column : Column position within the mesh
inputs : Number of input positions
outputs : Number of output positions
slots : Maximum number of operations
registers: Number of working registers
"""
# Keep a reference to the mesh
self.mesh = mesh
# Position within the mesh
self.position = (row, column)
# Keep a record of available resources
self.__num_inputs = inputs
self.__num_outputs = outputs
self.__num_slots = slots
self.__num_registers = registers
# Keep track of how many of each type of resource is consumed
self.__used_inputs = 0
self.__used_outputs = 0
self.__used_registers = []
# Keep a list of all operations
self.__ops = []
def __repr__(self):
return (
f"<Node {self.position} - "
f"In: {self.__used_inputs}/{self.__num_inputs}, "
f"Out: {self.__used_outputs}/{self.__num_outputs}, "
f"Ops: {len(self.__ops)}/{self.__num_slots}>"
)
@property
def input_usage(self): return (self.__used_inputs / self.__num_inputs)
@property
def output_usage(self): return (self.__used_outputs / self.__num_outputs)
@property
def slot_usage(self): return (len(self.__ops) / self.__num_slots)
@property
def ops(self): return self.__ops[:]
@property
def usage(self):
return max(self.input_usage, self.output_usage, self.slot_usage)
@property
def capacity(self):
return 1 - self.usage
def add_op(self, op):
assert not self.contains_op(op)
assert op.node == None
# Attach operation to node
self.__ops.append(op)
op.node = self
# Update counts for used inputs and used outputs
self.recount()
def count_op_input_usage(self, *ops):
op_inputs = []
for op in ops:
op_inputs += [
x for x in op.sources if isinstance(x, State) or
(isinstance(x, Instruction) and x.node != self)
]
return len(set(op_inputs))
def count_op_output_usage(self, *ops):
op_outputs = 0
for op in ops:
for tgt in op.targets:
if (
isinstance(tgt, State) or
(isinstance(tgt, Instruction) and tgt.node != self)
):
op_outputs += 1
break
return op_outputs
def count_op_usage(self, *ops):
op_inputs, op_outputs = 0, 0
for op in ops:
op_inputs += self.count_op_input_usage(op)
op_outputs += self.count_op_output_usage(op)
return op_inputs, op_outputs
def recount(self):
# Count how many inputs and outputs are required
self.__used_inputs, self.__used_outputs = self.count_op_usage(*self.ops)
# Check that resources haven't been exceeded
assert self.__used_inputs <= self.__num_inputs
assert self.__used_outputs <= self.__num_outputs
assert len(self.__ops) <= self.__num_slots
def remove_op(self, op):
assert self.contains_op(op)
self.__ops.remove(op)
op.node = None
def contains_op(self, op):
assert isinstance(op, Instruction)
return op in self.__ops
def space_for_op(self, *ops):
new_inputs, new_outputs = self.count_op_usage(*self.ops, *ops)
return (
(new_inputs < self.__num_inputs ) and
(new_outputs < self.__num_outputs) and
((len(ops) + len(self.ops)) < self.__num_slots )
)
def encode(self, op, sources, tgt_reg, output):
assert len(sources) <= 2
sources += [(0, 0)] * (2 - len(sources)) if len(sources) < 2 else []
# Truth tables:
# - Bit [2] (+4) : Controlled by input A
# - Bit [1] (+2) : Controlled by input B
# - Bit [0] (+1) : Controlled by input C
instr = NXInstruction()
instr.truth = {
Operation.INVERT: 0b0000_1111,
Operation.AND : 0b1100_0000,
Operation.NAND : 0b0011_1111,
Operation.OR : 0b1111_1100,
Operation.NOR : 0b0000_0011,
Operation.XOR : 0b0011_1100,
Operation.XNOR : 0b1100_0011,
}[op.op.op]
instr.src_a = sources[0][1]
instr.src_a_ip = 1 if sources[0][0] else 0
instr.src_b = sources[1][1]
instr.src_b_ip = 1 if sources[1][0] else 0
instr.tgt_reg = tgt_reg
instr.gen_out = 1 if output else 0
return instr.pack()
def decode(self, op):
assert isinstance(op, int)
is_in_a = (op >> 12) & 0x1
is_in_b = (op >> 6) & 0x1
return {
"OPCODE" : Operation((op >> 12) & 0x7).name,
"SOURCE A" : ("INPUT[" if is_in_a else "REG[") + str((op >> 13) & 0x1F) + "]",
"SOURCE B" : ("INPUT[" if is_in_b else "REG[") + str((op >> 7) & 0x1F) + "]",
"TGT REG" : f"REG[{(op >> 1) & 0x1F}]",
"OUTPUT" : "YES" if ((op >> 0) & 0x1) else "NO",
}
def compile_operations(self):
""" Compile operations allocated to this node into encoded values
Returns: Tuple of input allocation map, output allocation map, bytecode
encoded operations
"""
# Sort all of the operations based on dependencies
unordered = self.ops[:]
ordered = []
while unordered:
for op in unordered:
satisified = True
for src in op.sources:
satisified &= (
# It must be an instruction to affect order and...
(not isinstance(src, Instruction)) or
# ...it must be an instruction of this node...
(src not in self.ops ) or
# ...it must be pending placement
(src in ordered )
)
# If not satisfied, move on
if not satisified: break
# If satisfied, place this operation
if satisified:
ordered.append(op)
unordered.remove(op)
break
assert len(unordered) == 0, f"Failed to order {len(unordered)} ops"
# Allocate outputs to instructions
outputs = [None] * self.__num_outputs
for op_idx, op in enumerate(ordered):
# If this op doesn't generate an output, skip it
if not self.count_op_output_usage(op): continue
# Check for the next available slot
assert None in outputs, f"Run out of outputs for node {self.position}"
slot_idx = outputs.index(None)
# Allocate the output
log.debug(
f"{self.position} - {op_idx}/{len(ordered)}: OUT[{slot_idx}]"
)
outputs[slot_idx] = op
# Allocate loopback inputs (using the same position as matching output)
inputs = [None] * self.__num_inputs
for op_idx, op in enumerate(ordered):
for src in op.sources:
# Skip sources that are already placed
if src in inputs: continue
# Skip allocation of constants and instructions (only want state)
if type(src) in (Constant, Instruction): continue
# Test if the state is fed by an output of this node
assert isinstance(src, State), \
f"{self.position}: Got a non-stateful source"
if src.source not in outputs: continue
# Place this input in the same position
op_idx = outputs.index(src.source)
assert inputs[op_idx] == None, \
f"{self.position}: Input {op_idx} already taken"
inputs[op_idx] = src
# Allocate input, output, and register usage
regs = [None] * self.__num_registers
encoded = []
for op_idx, op in enumerate(ordered):
# If no free registers, raise an exception
if None not in regs:
raise Exception(f"Run out of registers in node {self.position}")
# Does this operation need any external inputs?
op_sources = []
for src in op.sources:
# Is this source already placed?
if src in inputs:
op_sources.append((True, inputs.index(src)))
continue
# If this is a registered value, use it
if src in regs:
op_sources.append((False, regs.index(src)))
continue
# If this is a constant, ignore it
if isinstance(src, Constant): continue
# If this is an internal instruction, raise an error
if isinstance(src, Instruction) and src in ordered:
raise Exception(
f"{self.position} - {op_idx}/{len(ordered)}: Could not"
f" locate source '{src.op.id}' for '{op.op.id}'"
)
# Otherwise, allocate the first free slot
if None not in inputs:
raise Exception(f"Run out of inputs in node {self.position}")
use_input = inputs.index(None)
log.debug(
f"{self.position} - {op_idx}/{len(ordered)}: IN[{use_input}]"
)
inputs[use_input] = src
op_sources.append((True, inputs.index(src)))
# Use the first free register as temporary storage
use_reg = regs.index(None)
log.debug(
f"{self.position} - {op_idx}/{len(ordered)}: REG[{use_reg}]"
)
regs[use_reg] = op
# Encode the instruction
encoded.append(self.encode(op, op_sources, use_reg, op in outputs))
# Check for any registers that have freed up
required = sum([x.sources for x in ordered[op_idx+1:]], [])
for reg_idx, reg in enumerate(regs):
if reg and reg not in required:
log.debug(
f"{self.position} - {op_idx}/{len(ordered)}: evicting "
f"{reg.op.id} from REG[{reg_idx}]"
)
regs[reg_idx] = None
# Return I/O mappings and the bytecode instruction stream
return inputs, outputs, encoded
class Mesh:
""" Mesh of node models to suppport allocation and scheduling of operations """
def __init__(self, rows=4, columns=4, **params):
""" Initialise the Mesh.
Args:
rows : Number of rows in the mesh
columns: Number of columns in the mesh
params : Other parameters
"""
# Create the main mesh of nodes
self.nodes = [
[
Node(
self, x, y,
**{k.replace("node_", "", 1): v for k, v in params.items() if k.startswith("node_")}
) for y in range(columns)
] for x in range(rows)
]
# Create a special reserved output node
self.output = Node(self, rows, 0)
def __getitem__(self, key):
if isinstance(key, tuple):
node = self.nodes
for item in key: node = node[item]
return node
else:
return self.nodes[key]
@property
def all_nodes(self):
for row in self.nodes:
for node in row:
yield node
def find_input(self, bit):
""" Find nodes where a certain PortBit is being used as an input.
Args:
bit: The PortBit to locate
"""
usage = []
for node in self.all_nodes:
if bit in [x.bit for x in node.inputs if isinstance(x, Input)]:
usage.append(node)
return usage
def find_first_vacant(
self, op=None, start_row=0, start_column=0, **options
):
"""
Find the first vacant node in the mesh - the search has two priorities
(1) the node with the highest remaining capacity, (2) the earliest row
in the mesh.
Args:
op : Operation to fit into the node (defaults to None)
start_row : Only search from row X onwards (defaults to 0)
start_column: Only search from column Y onwards (defaults to 0)
options : Options to pass to 'space_for_op'
Returns: The best matching candidate node, or None if no matches found
"""
best_cap = 0
viable = None
for row in self.nodes[start_row:]:
for node in row[start_column:]:
if (
(node.capacity > best_cap ) and
(not op or node.space_for_op(op, **options))
):
viable = node
best_cap = node.capacity
if viable: break
return viable
def show_utilisation(self, metric="summary"):
""" Print out a utilisation table for different metrics.
Args:
metric: Which metric to tabulate (default: summary)
"""
print("=" * 80)
print(f"{metric.capitalize()} Usage:")
print("")
print(" " + " ".join([f"{x:^5d}" for x in range(len(self.nodes[0]))]))
print("------" + "-".join(["-----" for x in range(len(self.nodes[0]))]))
values = []
for r_idx, row in enumerate(self.nodes):
row_str = ""
for node in row:
u_val = 0
if metric == "input" : u_val = node.input_usage
elif metric == "output" : u_val = node.output_usage
elif metric == "slot" : u_val = node.slot_usage
elif metric == "summary": u_val = node.usage
else: raise Exception(f"Unknown metric {metric}")
row_str += f"{u_val:01.03f} "
values.append(u_val)
print(f"{r_idx:3d} | {row_str}")
print("")
print(f"Max: {max(values):.02f}, Min: {min(values):.02f}, Mean: {mean(values):.02f}")
print("=" * 80)
def report_state(self, compiled_inputs):
""" Produce a report on where state (flops) has been located.
Args:
compiled_inputs: Dictionary of compiled inputs for the whole mesh
Returns: Keys are mesh position of the input, value is the flop
"""
mapping = {}
for node in self.all_nodes:
inputs = compiled_inputs[node.position]
for index, source in enumerate(inputs):
if not source or not isinstance(source, State): continue
mapping[
node.position[0], node.position[1], index
] = source.bit.port.parent
return mapping
def report_outputs(self, compiled_msgs):
""" Produce a report on where top-level boundary outputs are generated.
Args:
compiled_msgs: Dictionary of compiled messages for the whole mesh
Returns: Keys are the mesh position of the output, value is the output
"""
mapping = {}
for node in self.all_nodes:
for index, messages in enumerate(compiled_msgs[node.position]):
for target, tgt_node in messages:
if tgt_node != self.output: continue
mapping[
node.position[0], node.position[1], index
] = target.bit
return mapping
def compile(
module,
rows=4, columns=4,
node_inputs=32, node_outputs=32, node_registers=8, node_slots=512,
):
"""
Manage the compilation process - converting the logical model of the design
into operations, messages, and handling configurations.
Args:
module : The logic module to compile
rows : Number of rows in the mesh (default: 4)
columns : Number of columns in the mesh (default: 4)
node_inputs : Number of inputs per node
node_outputs : Number of outputs per node
node_registers: Number of registers per node
node_slots : Number of instruction slots per node
"""
# Create a mesh of the requested configuration
mesh = Mesh(
rows=rows, columns=columns,
node_inputs=node_inputs, node_outputs=node_outputs,
node_registers=node_registers, node_slots=node_slots,
)
# Convert gates to instructions, flops to state objects
terms = {}
bit_map = {}
for item in module.children.values():
if isinstance(item, Gate):
assert item.id not in bit_map
assert str(item) not in terms
bit_map[item.id] = terms[str(item)] = Instruction(item, [], [], None)
elif isinstance(item, Flop):
assert item.input[0].id not in bit_map
bit_map[item.input[0].id] = state = State(item.input[0], None, [])
if item.output:
assert item.output[0].id not in bit_map
bit_map[item.output[0].id] = state
if item.output_inv:
assert item.output_inv[0].id not in bit_map
bit_map[item.output_inv[0].id] = state
else:
raise Exception(f"Unsupported child type: {item}")
# Build boundary I/O
for port in module.ports.values():
assert port.is_input or port.is_output
for bit in port.bits:
bit_map[bit.id] = (Input if port.is_input else Output)(bit, [])
# Link instruction I/O
for op in (x for x in bit_map.values() if isinstance(x, Instruction)):
for input in op.op.inputs:
op.sources.append(bit_map[input.id])
for output in op.op.outputs:
op.targets.append(bit_map[output.id])
# Link state I/O
for state in (x for x in bit_map.values() if isinstance(x, State)):
state.source = bit_map[state.bit.driver.id]
if state.bit.port.parent.output:
for tgt in state.bit.port.parent.output[0].targets:
state.targets.append(bit_map[tgt.id])
if state.bit.port.parent.output_inv:
for tgt in state.bit.port.parent.output_inv[0].targets:
state.targets.append(bit_map[tgt.id])
# Link boundary I/O
for port in module.ports.values():
for bit in port.bits:
if port.is_input:
for tgt in bit.targets:
if tgt.id not in bit_map: continue
bit_map[bit.id].targets.append(bit_map[tgt.id])
elif port.is_output:
bit_map[bit.id].source = bit_map[bit.driver.id]
# Place operations into the mesh, starting with the most used
log.info("Starting to schedule operations into mesh")
to_place = list(terms.values())
stall_count = 0
while to_place:
# Detect placement deadlock and abort
if stall_count > len(to_place):
perc = (1 - (len(to_place) / len(terms.values()))) * 100
log.info("Unplaced operations:")
for idx, op in enumerate(to_place):
src_ops = [x for x in op.sources if isinstance(x, Instruction)]
log.info(
f"[{idx:03d}] {type(op.op).__name__}_{op.op.id} requires " +
", ".join([f"{type(x.op).__name__}_{x.op.id}" for x in src_ops])
)
raise Exception(
f"Deadlock detected with {len(to_place)} operations left unplaced "
f"from a total of {len(terms.values())} ({perc:.01f}% complete)"
)
# Pop the next term to place
op = to_place.pop(0)
assert isinstance(op, Instruction)
# Find the set of nodes that hold the sources
src_ops = [x for x in op.sources if isinstance(x, Instruction)]
src_nodes = list(set([x.node for x in src_ops]))
# If we're not ready to place, postpone
if None in src_nodes:
to_place.append(op)
stall_count += 1
continue
# Reset the stall count to zero if a placement is successful
stall_count = 0
# Try to identify a suitable node
node = None
to_move = []
# - If there are no instruction dependencies, place anywhere
if not src_ops:
node = mesh.find_first_vacant(op)
# - If inner terms exist, place in the same node or one in the next row
else:
# If all sources are in one node, is there space for a new entry?
if len(src_nodes) == 1 and src_nodes[0].space_for_op(op):
node = src_nodes[0]
# Otherwise, can all sub-terms be moved into one node?
if not node and len(src_nodes) > 1:
for src_node in src_nodes:
if src_node.space_for_op(op, *src_ops):
node = src_node
to_move = [x for x in src_ops if x not in node.ops]
break
# Otherwise, need to find a node in the next row down
if not node:
last_row = max([x.position[0] for x in src_nodes])
node = mesh.find_first_vacant(
op, start_row=(last_row + 1)
)
# If still no node found, place anywhere
if not node: node = mesh.find_first_vacant(op)
# Check a node was found
if not node:
mesh.show_utilisation()
raise Exception(f"No node has capacity for term {op.op}")
# Move any supporting terms
for item in to_move:
old_node = item.node
old_node.remove_op(item)
node.add_op(item)
assert item not in old_node.ops
assert item in node.ops
# Place the term into the node
node.add_op(op)
# Trigger usage recounts on source nodes
for src_node in set([x.node for x in src_ops]): src_node.recount()
# Work out where every operation has been placed
gate_map = {}
for node in mesh.all_nodes:
for op_idx, op in enumerate(node.ops):
gate_map[op.op.id] = (node, op_idx)
# Debugging information
mesh.show_utilisation()
mesh.show_utilisation("input")
mesh.show_utilisation("output")
mesh.show_utilisation("slot")
# Compile operations for every node
compiled_inputs = {}
compiled_outputs = {}
compiled_instrs = {}
for node in mesh.all_nodes:
(
compiled_inputs[node.position],
compiled_outputs[node.position],
compiled_instrs[node.position],
) = node.compile_operations()
# Compile signal state updates
compiled_loopback = {}
compiled_msgs = {}
for (tgt_row, tgt_col), inputs in compiled_inputs.items():
# Compile loopbacks
compiled_loopback[tgt_row, tgt_col] = 0
for idx_input, input in enumerate(inputs):
# Skip non-stateful inputs
if not isinstance(input, State): continue
# Check if this is a loopback
if input.source not in compiled_outputs[tgt_row, tgt_col]: continue
# Append to the loopback mask
compiled_loopback[tgt_row, tgt_col] |= (1 << idx_input)
# Compile messages between nodes
for idx_input, input in enumerate(inputs):
# Skip unpopulated inputs
if not input: continue
# Detect if input is flopped
is_stateful = isinstance(input, State)
# Resolve the instruction driving the connection
true_source = input.source if is_stateful else input
# Get source row and column
src_row, src_col = true_source.node.position
# Skip loopbacks (handled separately)
if (src_row == tgt_row) and (src_col == tgt_col): continue
# Get the output index for the source
src_idx = compiled_outputs[src_row, src_col].index(true_source)
# Ensure message storage exists for the source node
if (src_row, src_col) not in compiled_msgs:
compiled_msgs[src_row, src_col] = [[] for _ in range(node_outputs)]
# Append an internal message
compiled_msgs[src_row, src_col][src_idx].append({
"row": tgt_row, "column": tgt_col, "index": idx_input,
"is_seq": is_stateful
})
# Build a report of where outputs are generated from, and insert messages
# TODO: Temporarily using 'fake' nodes to receive outputs, in the longer
# term will separate 'internal' and 'external' messaging
output_counter = 0
output_drivers = {}
for port in module.outputs:
output_drivers[port.name] = []
for bit in port.bits:
driver = bit_map[bit.driver.id]
is_stateful = isinstance(driver, State)
src_row, src_col = driver.source.node.position
src_idx = compiled_outputs[src_row, src_col].index(driver.source)
# Target an unused node input
node_offset = output_counter // node_inputs
input_offset = output_counter % node_inputs
row_offset = node_offset // columns
col_offset = node_offset % columns
tgt_row = rows + row_offset
# Record where this output will be sent to
output_drivers[port.name].append((
src_row, src_col, src_idx, tgt_row, col_offset, input_offset,
is_stateful
))
# Ensure message storage exists for the source node
if (src_row, src_col) not in compiled_msgs:
compiled_msgs[src_row, src_col] = [[] for _ in range(node_outputs)]
# Setup a message for this output on the source node
compiled_msgs[src_row, src_col][src_idx].append({
"row": tgt_row, "column": col_offset, "index": input_offset,
"is_seq": is_stateful
})
# Increment the output counter
output_counter += 1
# Accumulate message statistics
msg_counts = [sum([len(y) for y in x]) for x in compiled_msgs.values()]
log.info(f"Total messages {sum(msg_counts)}")
log.info(f" - Max count: {max(msg_counts)}")
log.info(f" - Min count: {min(msg_counts)}")
log.info(f" - Avg count: {mean(msg_counts)}")
# Return instruction sequences, input handling, output handling
return (
compiled_instrs, compiled_loopback, compiled_msgs,
mesh.report_state(compiled_inputs), output_drivers
)
| # Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from statistics import mean
from ..models.constant import Constant
from ..models.flop import Flop
from ..models.gate import Gate, Operation
from nxconstants import Instruction as NXInstruction
log = logging.getLogger("compiler.compile")
class Input:
""" Represents a boundary input to the logic """
def __init__(self, bit, targets):
self.bit = bit
self.targets = targets
def __repr__(self): return f"<Input {self.bit}>"
class Output:
""" Represents a boundary output from the logic """
def __init__(self, bit, source):
self.bit = bit
self.source = source
def __repr__(self): return f"<Output {self.bit}>"
class State:
def __init__(self, bit, source, targets):
self.bit = bit
self.source = source
self.targets = targets
class Instruction:
def __init__(self, op, sources, targets, node):
self.op = op
self.sources = sources
self.targets = targets
self.node = node
class Node:
"""
Represents a logic node within the mesh, keeps track of input, output, and
instruction slot usage. Also performs compilation of operations into encoded
values, generation of input handling and output handling.
"""
def __init__(
self, mesh, row, column, inputs=8, outputs=8, slots=12, registers=8
):
""" Initialise the Node.
Args:
mesh : Pointer to the mesh
row : Row position within the mesh
column : Column position within the mesh
inputs : Number of input positions
outputs : Number of output positions
slots : Maximum number of operations
registers: Number of working registers
"""
# Keep a reference to the mesh
self.mesh = mesh
# Position within the mesh
self.position = (row, column)
# Keep a record of available resources
self.__num_inputs = inputs
self.__num_outputs = outputs
self.__num_slots = slots
self.__num_registers = registers
# Keep track of how many of each type of resource is consumed
self.__used_inputs = 0
self.__used_outputs = 0
self.__used_registers = []
# Keep a list of all operations
self.__ops = []
def __repr__(self):
return (
f"<Node {self.position} - "
f"In: {self.__used_inputs}/{self.__num_inputs}, "
f"Out: {self.__used_outputs}/{self.__num_outputs}, "
f"Ops: {len(self.__ops)}/{self.__num_slots}>"
)
@property
def input_usage(self): return (self.__used_inputs / self.__num_inputs)
@property
def output_usage(self): return (self.__used_outputs / self.__num_outputs)
@property
def slot_usage(self): return (len(self.__ops) / self.__num_slots)
@property
def ops(self): return self.__ops[:]
@property
def usage(self):
return max(self.input_usage, self.output_usage, self.slot_usage)
@property
def capacity(self):
return 1 - self.usage
def add_op(self, op):
assert not self.contains_op(op)
assert op.node == None
# Attach operation to node
self.__ops.append(op)
op.node = self
# Update counts for used inputs and used outputs
self.recount()
def count_op_input_usage(self, *ops):
op_inputs = []
for op in ops:
op_inputs += [
x for x in op.sources if isinstance(x, State) or
(isinstance(x, Instruction) and x.node != self)
]
return len(set(op_inputs))
def count_op_output_usage(self, *ops):
op_outputs = 0
for op in ops:
for tgt in op.targets:
if (
isinstance(tgt, State) or
(isinstance(tgt, Instruction) and tgt.node != self)
):
op_outputs += 1
break
return op_outputs
def count_op_usage(self, *ops):
op_inputs, op_outputs = 0, 0
for op in ops:
op_inputs += self.count_op_input_usage(op)
op_outputs += self.count_op_output_usage(op)
return op_inputs, op_outputs
def recount(self):
# Count how many inputs and outputs are required
self.__used_inputs, self.__used_outputs = self.count_op_usage(*self.ops)
# Check that resources haven't been exceeded
assert self.__used_inputs <= self.__num_inputs
assert self.__used_outputs <= self.__num_outputs
assert len(self.__ops) <= self.__num_slots
def remove_op(self, op):
assert self.contains_op(op)
self.__ops.remove(op)
op.node = None
def contains_op(self, op):
assert isinstance(op, Instruction)
return op in self.__ops
def space_for_op(self, *ops):
new_inputs, new_outputs = self.count_op_usage(*self.ops, *ops)
return (
(new_inputs < self.__num_inputs ) and
(new_outputs < self.__num_outputs) and
((len(ops) + len(self.ops)) < self.__num_slots )
)
def encode(self, op, sources, tgt_reg, output):
assert len(sources) <= 2
sources += [(0, 0)] * (2 - len(sources)) if len(sources) < 2 else []
# Truth tables:
# - Bit [2] (+4) : Controlled by input A
# - Bit [1] (+2) : Controlled by input B
# - Bit [0] (+1) : Controlled by input C
instr = NXInstruction()
instr.truth = {
Operation.INVERT: 0b0000_1111,
Operation.AND : 0b1100_0000,
Operation.NAND : 0b0011_1111,
Operation.OR : 0b1111_1100,
Operation.NOR : 0b0000_0011,
Operation.XOR : 0b0011_1100,
Operation.XNOR : 0b1100_0011,
}[op.op.op]
instr.src_a = sources[0][1]
instr.src_a_ip = 1 if sources[0][0] else 0
instr.src_b = sources[1][1]
instr.src_b_ip = 1 if sources[1][0] else 0
instr.tgt_reg = tgt_reg
instr.gen_out = 1 if output else 0
return instr.pack()
def decode(self, op):
assert isinstance(op, int)
is_in_a = (op >> 12) & 0x1
is_in_b = (op >> 6) & 0x1
return {
"OPCODE" : Operation((op >> 12) & 0x7).name,
"SOURCE A" : ("INPUT[" if is_in_a else "REG[") + str((op >> 13) & 0x1F) + "]",
"SOURCE B" : ("INPUT[" if is_in_b else "REG[") + str((op >> 7) & 0x1F) + "]",
"TGT REG" : f"REG[{(op >> 1) & 0x1F}]",
"OUTPUT" : "YES" if ((op >> 0) & 0x1) else "NO",
}
def compile_operations(self):
""" Compile operations allocated to this node into encoded values
Returns: Tuple of input allocation map, output allocation map, bytecode
encoded operations
"""
# Sort all of the operations based on dependencies
unordered = self.ops[:]
ordered = []
while unordered:
for op in unordered:
satisified = True
for src in op.sources:
satisified &= (
# It must be an instruction to affect order and...
(not isinstance(src, Instruction)) or
# ...it must be an instruction of this node...
(src not in self.ops ) or
# ...it must be pending placement
(src in ordered )
)
# If not satisfied, move on
if not satisified: break
# If satisfied, place this operation
if satisified:
ordered.append(op)
unordered.remove(op)
break
assert len(unordered) == 0, f"Failed to order {len(unordered)} ops"
# Allocate outputs to instructions
outputs = [None] * self.__num_outputs
for op_idx, op in enumerate(ordered):
# If this op doesn't generate an output, skip it
if not self.count_op_output_usage(op): continue
# Check for the next available slot
assert None in outputs, f"Run out of outputs for node {self.position}"
slot_idx = outputs.index(None)
# Allocate the output
log.debug(
f"{self.position} - {op_idx}/{len(ordered)}: OUT[{slot_idx}]"
)
outputs[slot_idx] = op
# Allocate loopback inputs (using the same position as matching output)
inputs = [None] * self.__num_inputs
for op_idx, op in enumerate(ordered):
for src in op.sources:
# Skip sources that are already placed
if src in inputs: continue
# Skip allocation of constants and instructions (only want state)
if type(src) in (Constant, Instruction): continue
# Test if the state is fed by an output of this node
assert isinstance(src, State), \
f"{self.position}: Got a non-stateful source"
if src.source not in outputs: continue
# Place this input in the same position
op_idx = outputs.index(src.source)
assert inputs[op_idx] == None, \
f"{self.position}: Input {op_idx} already taken"
inputs[op_idx] = src
# Allocate input, output, and register usage
regs = [None] * self.__num_registers
encoded = []
for op_idx, op in enumerate(ordered):
# If no free registers, raise an exception
if None not in regs:
raise Exception(f"Run out of registers in node {self.position}")
# Does this operation need any external inputs?
op_sources = []
for src in op.sources:
# Is this source already placed?
if src in inputs:
op_sources.append((True, inputs.index(src)))
continue
# If this is a registered value, use it
if src in regs:
op_sources.append((False, regs.index(src)))
continue
# If this is a constant, ignore it
if isinstance(src, Constant): continue
# If this is an internal instruction, raise an error
if isinstance(src, Instruction) and src in ordered:
raise Exception(
f"{self.position} - {op_idx}/{len(ordered)}: Could not"
f" locate source '{src.op.id}' for '{op.op.id}'"
)
# Otherwise, allocate the first free slot
if None not in inputs:
raise Exception(f"Run out of inputs in node {self.position}")
use_input = inputs.index(None)
log.debug(
f"{self.position} - {op_idx}/{len(ordered)}: IN[{use_input}]"
)
inputs[use_input] = src
op_sources.append((True, inputs.index(src)))
# Use the first free register as temporary storage
use_reg = regs.index(None)
log.debug(
f"{self.position} - {op_idx}/{len(ordered)}: REG[{use_reg}]"
)
regs[use_reg] = op
# Encode the instruction
encoded.append(self.encode(op, op_sources, use_reg, op in outputs))
# Check for any registers that have freed up
required = sum([x.sources for x in ordered[op_idx+1:]], [])
for reg_idx, reg in enumerate(regs):
if reg and reg not in required:
log.debug(
f"{self.position} - {op_idx}/{len(ordered)}: evicting "
f"{reg.op.id} from REG[{reg_idx}]"
)
regs[reg_idx] = None
# Return I/O mappings and the bytecode instruction stream
return inputs, outputs, encoded
class Mesh:
""" Mesh of node models to suppport allocation and scheduling of operations """
def __init__(self, rows=4, columns=4, **params):
""" Initialise the Mesh.
Args:
rows : Number of rows in the mesh
columns: Number of columns in the mesh
params : Other parameters
"""
# Create the main mesh of nodes
self.nodes = [
[
Node(
self, x, y,
**{k.replace("node_", "", 1): v for k, v in params.items() if k.startswith("node_")}
) for y in range(columns)
] for x in range(rows)
]
# Create a special reserved output node
self.output = Node(self, rows, 0)
def __getitem__(self, key):
if isinstance(key, tuple):
node = self.nodes
for item in key: node = node[item]
return node
else:
return self.nodes[key]
@property
def all_nodes(self):
for row in self.nodes:
for node in row:
yield node
def find_input(self, bit):
""" Find nodes where a certain PortBit is being used as an input.
Args:
bit: The PortBit to locate
"""
usage = []
for node in self.all_nodes:
if bit in [x.bit for x in node.inputs if isinstance(x, Input)]:
usage.append(node)
return usage
def find_first_vacant(
self, op=None, start_row=0, start_column=0, **options
):
"""
Find the first vacant node in the mesh - the search has two priorities
(1) the node with the highest remaining capacity, (2) the earliest row
in the mesh.
Args:
op : Operation to fit into the node (defaults to None)
start_row : Only search from row X onwards (defaults to 0)
start_column: Only search from column Y onwards (defaults to 0)
options : Options to pass to 'space_for_op'
Returns: The best matching candidate node, or None if no matches found
"""
best_cap = 0
viable = None
for row in self.nodes[start_row:]:
for node in row[start_column:]:
if (
(node.capacity > best_cap ) and
(not op or node.space_for_op(op, **options))
):
viable = node
best_cap = node.capacity
if viable: break
return viable
def show_utilisation(self, metric="summary"):
""" Print out a utilisation table for different metrics.
Args:
metric: Which metric to tabulate (default: summary)
"""
print("=" * 80)
print(f"{metric.capitalize()} Usage:")
print("")
print(" " + " ".join([f"{x:^5d}" for x in range(len(self.nodes[0]))]))
print("------" + "-".join(["-----" for x in range(len(self.nodes[0]))]))
values = []
for r_idx, row in enumerate(self.nodes):
row_str = ""
for node in row:
u_val = 0
if metric == "input" : u_val = node.input_usage
elif metric == "output" : u_val = node.output_usage
elif metric == "slot" : u_val = node.slot_usage
elif metric == "summary": u_val = node.usage
else: raise Exception(f"Unknown metric {metric}")
row_str += f"{u_val:01.03f} "
values.append(u_val)
print(f"{r_idx:3d} | {row_str}")
print("")
print(f"Max: {max(values):.02f}, Min: {min(values):.02f}, Mean: {mean(values):.02f}")
print("=" * 80)
def report_state(self, compiled_inputs):
""" Produce a report on where state (flops) has been located.
Args:
compiled_inputs: Dictionary of compiled inputs for the whole mesh
Returns: Keys are mesh position of the input, value is the flop
"""
mapping = {}
for node in self.all_nodes:
inputs = compiled_inputs[node.position]
for index, source in enumerate(inputs):
if not source or not isinstance(source, State): continue
mapping[
node.position[0], node.position[1], index
] = source.bit.port.parent
return mapping
def report_outputs(self, compiled_msgs):
""" Produce a report on where top-level boundary outputs are generated.
Args:
compiled_msgs: Dictionary of compiled messages for the whole mesh
Returns: Keys are the mesh position of the output, value is the output
"""
mapping = {}
for node in self.all_nodes:
for index, messages in enumerate(compiled_msgs[node.position]):
for target, tgt_node in messages:
if tgt_node != self.output: continue
mapping[
node.position[0], node.position[1], index
] = target.bit
return mapping
def compile(
module,
rows=4, columns=4,
node_inputs=32, node_outputs=32, node_registers=8, node_slots=512,
):
"""
Manage the compilation process - converting the logical model of the design
into operations, messages, and handling configurations.
Args:
module : The logic module to compile
rows : Number of rows in the mesh (default: 4)
columns : Number of columns in the mesh (default: 4)
node_inputs : Number of inputs per node
node_outputs : Number of outputs per node
node_registers: Number of registers per node
node_slots : Number of instruction slots per node
"""
# Create a mesh of the requested configuration
mesh = Mesh(
rows=rows, columns=columns,
node_inputs=node_inputs, node_outputs=node_outputs,
node_registers=node_registers, node_slots=node_slots,
)
# Convert gates to instructions, flops to state objects
terms = {}
bit_map = {}
for item in module.children.values():
if isinstance(item, Gate):
assert item.id not in bit_map
assert str(item) not in terms
bit_map[item.id] = terms[str(item)] = Instruction(item, [], [], None)
elif isinstance(item, Flop):
assert item.input[0].id not in bit_map
bit_map[item.input[0].id] = state = State(item.input[0], None, [])
if item.output:
assert item.output[0].id not in bit_map
bit_map[item.output[0].id] = state
if item.output_inv:
assert item.output_inv[0].id not in bit_map
bit_map[item.output_inv[0].id] = state
else:
raise Exception(f"Unsupported child type: {item}")
# Build boundary I/O
for port in module.ports.values():
assert port.is_input or port.is_output
for bit in port.bits:
bit_map[bit.id] = (Input if port.is_input else Output)(bit, [])
# Link instruction I/O
for op in (x for x in bit_map.values() if isinstance(x, Instruction)):
for input in op.op.inputs:
op.sources.append(bit_map[input.id])
for output in op.op.outputs:
op.targets.append(bit_map[output.id])
# Link state I/O
for state in (x for x in bit_map.values() if isinstance(x, State)):
state.source = bit_map[state.bit.driver.id]
if state.bit.port.parent.output:
for tgt in state.bit.port.parent.output[0].targets:
state.targets.append(bit_map[tgt.id])
if state.bit.port.parent.output_inv:
for tgt in state.bit.port.parent.output_inv[0].targets:
state.targets.append(bit_map[tgt.id])
# Link boundary I/O
for port in module.ports.values():
for bit in port.bits:
if port.is_input:
for tgt in bit.targets:
if tgt.id not in bit_map: continue
bit_map[bit.id].targets.append(bit_map[tgt.id])
elif port.is_output:
bit_map[bit.id].source = bit_map[bit.driver.id]
# Place operations into the mesh, starting with the most used
log.info("Starting to schedule operations into mesh")
to_place = list(terms.values())
stall_count = 0
while to_place:
# Detect placement deadlock and abort
if stall_count > len(to_place):
perc = (1 - (len(to_place) / len(terms.values()))) * 100
log.info("Unplaced operations:")
for idx, op in enumerate(to_place):
src_ops = [x for x in op.sources if isinstance(x, Instruction)]
log.info(
f"[{idx:03d}] {type(op.op).__name__}_{op.op.id} requires " +
", ".join([f"{type(x.op).__name__}_{x.op.id}" for x in src_ops])
)
raise Exception(
f"Deadlock detected with {len(to_place)} operations left unplaced "
f"from a total of {len(terms.values())} ({perc:.01f}% complete)"
)
# Pop the next term to place
op = to_place.pop(0)
assert isinstance(op, Instruction)
# Find the set of nodes that hold the sources
src_ops = [x for x in op.sources if isinstance(x, Instruction)]
src_nodes = list(set([x.node for x in src_ops]))
# If we're not ready to place, postpone
if None in src_nodes:
to_place.append(op)
stall_count += 1
continue
# Reset the stall count to zero if a placement is successful
stall_count = 0
# Try to identify a suitable node
node = None
to_move = []
# - If there are no instruction dependencies, place anywhere
if not src_ops:
node = mesh.find_first_vacant(op)
# - If inner terms exist, place in the same node or one in the next row
else:
# If all sources are in one node, is there space for a new entry?
if len(src_nodes) == 1 and src_nodes[0].space_for_op(op):
node = src_nodes[0]
# Otherwise, can all sub-terms be moved into one node?
if not node and len(src_nodes) > 1:
for src_node in src_nodes:
if src_node.space_for_op(op, *src_ops):
node = src_node
to_move = [x for x in src_ops if x not in node.ops]
break
# Otherwise, need to find a node in the next row down
if not node:
last_row = max([x.position[0] for x in src_nodes])
node = mesh.find_first_vacant(
op, start_row=(last_row + 1)
)
# If still no node found, place anywhere
if not node: node = mesh.find_first_vacant(op)
# Check a node was found
if not node:
mesh.show_utilisation()
raise Exception(f"No node has capacity for term {op.op}")
# Move any supporting terms
for item in to_move:
old_node = item.node
old_node.remove_op(item)
node.add_op(item)
assert item not in old_node.ops
assert item in node.ops
# Place the term into the node
node.add_op(op)
# Trigger usage recounts on source nodes
for src_node in set([x.node for x in src_ops]): src_node.recount()
# Work out where every operation has been placed
gate_map = {}
for node in mesh.all_nodes:
for op_idx, op in enumerate(node.ops):
gate_map[op.op.id] = (node, op_idx)
# Debugging information
mesh.show_utilisation()
mesh.show_utilisation("input")
mesh.show_utilisation("output")
mesh.show_utilisation("slot")
# Compile operations for every node
compiled_inputs = {}
compiled_outputs = {}
compiled_instrs = {}
for node in mesh.all_nodes:
(
compiled_inputs[node.position],
compiled_outputs[node.position],
compiled_instrs[node.position],
) = node.compile_operations()
# Compile signal state updates
compiled_loopback = {}
compiled_msgs = {}
for (tgt_row, tgt_col), inputs in compiled_inputs.items():
# Compile loopbacks
compiled_loopback[tgt_row, tgt_col] = 0
for idx_input, input in enumerate(inputs):
# Skip non-stateful inputs
if not isinstance(input, State): continue
# Check if this is a loopback
if input.source not in compiled_outputs[tgt_row, tgt_col]: continue
# Append to the loopback mask
compiled_loopback[tgt_row, tgt_col] |= (1 << idx_input)
# Compile messages between nodes
for idx_input, input in enumerate(inputs):
# Skip unpopulated inputs
if not input: continue
# Detect if input is flopped
is_stateful = isinstance(input, State)
# Resolve the instruction driving the connection
true_source = input.source if is_stateful else input
# Get source row and column
src_row, src_col = true_source.node.position
# Skip loopbacks (handled separately)
if (src_row == tgt_row) and (src_col == tgt_col): continue
# Get the output index for the source
src_idx = compiled_outputs[src_row, src_col].index(true_source)
# Ensure message storage exists for the source node
if (src_row, src_col) not in compiled_msgs:
compiled_msgs[src_row, src_col] = [[] for _ in range(node_outputs)]
# Append an internal message
compiled_msgs[src_row, src_col][src_idx].append({
"row": tgt_row, "column": tgt_col, "index": idx_input,
"is_seq": is_stateful
})
# Build a report of where outputs are generated from, and insert messages
# TODO: Temporarily using 'fake' nodes to receive outputs, in the longer
# term will separate 'internal' and 'external' messaging
output_counter = 0
output_drivers = {}
for port in module.outputs:
output_drivers[port.name] = []
for bit in port.bits:
driver = bit_map[bit.driver.id]
is_stateful = isinstance(driver, State)
src_row, src_col = driver.source.node.position
src_idx = compiled_outputs[src_row, src_col].index(driver.source)
# Target an unused node input
node_offset = output_counter // node_inputs
input_offset = output_counter % node_inputs
row_offset = node_offset // columns
col_offset = node_offset % columns
tgt_row = rows + row_offset
# Record where this output will be sent to
output_drivers[port.name].append((
src_row, src_col, src_idx, tgt_row, col_offset, input_offset,
is_stateful
))
# Ensure message storage exists for the source node
if (src_row, src_col) not in compiled_msgs:
compiled_msgs[src_row, src_col] = [[] for _ in range(node_outputs)]
# Setup a message for this output on the source node
compiled_msgs[src_row, src_col][src_idx].append({
"row": tgt_row, "column": col_offset, "index": input_offset,
"is_seq": is_stateful
})
# Increment the output counter
output_counter += 1
# Accumulate message statistics
msg_counts = [sum([len(y) for y in x]) for x in compiled_msgs.values()]
log.info(f"Total messages {sum(msg_counts)}")
log.info(f" - Max count: {max(msg_counts)}")
log.info(f" - Min count: {min(msg_counts)}")
log.info(f" - Avg count: {mean(msg_counts)}")
# Return instruction sequences, input handling, output handling
return (
compiled_instrs, compiled_loopback, compiled_msgs,
mesh.report_state(compiled_inputs), output_drivers
)
| en | 0.771926 | # Copyright 2021, <NAME>, mailto:<EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Represents a boundary input to the logic Represents a boundary output from the logic Represents a logic node within the mesh, keeps track of input, output, and instruction slot usage. Also performs compilation of operations into encoded values, generation of input handling and output handling. Initialise the Node. Args: mesh : Pointer to the mesh row : Row position within the mesh column : Column position within the mesh inputs : Number of input positions outputs : Number of output positions slots : Maximum number of operations registers: Number of working registers # Keep a reference to the mesh # Position within the mesh # Keep a record of available resources # Keep track of how many of each type of resource is consumed # Keep a list of all operations # Attach operation to node # Update counts for used inputs and used outputs # Count how many inputs and outputs are required # Check that resources haven't been exceeded # Truth tables: # - Bit [2] (+4) : Controlled by input A # - Bit [1] (+2) : Controlled by input B # - Bit [0] (+1) : Controlled by input C Compile operations allocated to this node into encoded values Returns: Tuple of input allocation map, output allocation map, bytecode encoded operations # Sort all of the operations based on dependencies # It must be an instruction to affect order and... # ...it must be an instruction of this node... # ...it must be pending placement # If not satisfied, move on # If satisfied, place this operation # Allocate outputs to instructions # If this op doesn't generate an output, skip it # Check for the next available slot # Allocate the output # Allocate loopback inputs (using the same position as matching output) # Skip sources that are already placed # Skip allocation of constants and instructions (only want state) # Test if the state is fed by an output of this node # Place this input in the same position # Allocate input, output, and register usage # If no free registers, raise an exception # Does this operation need any external inputs? # Is this source already placed? # If this is a registered value, use it # If this is a constant, ignore it # If this is an internal instruction, raise an error # Otherwise, allocate the first free slot # Use the first free register as temporary storage # Encode the instruction # Check for any registers that have freed up # Return I/O mappings and the bytecode instruction stream Mesh of node models to suppport allocation and scheduling of operations Initialise the Mesh. Args: rows : Number of rows in the mesh columns: Number of columns in the mesh params : Other parameters # Create the main mesh of nodes # Create a special reserved output node Find nodes where a certain PortBit is being used as an input. Args: bit: The PortBit to locate Find the first vacant node in the mesh - the search has two priorities (1) the node with the highest remaining capacity, (2) the earliest row in the mesh. Args: op : Operation to fit into the node (defaults to None) start_row : Only search from row X onwards (defaults to 0) start_column: Only search from column Y onwards (defaults to 0) options : Options to pass to 'space_for_op' Returns: The best matching candidate node, or None if no matches found Print out a utilisation table for different metrics. Args: metric: Which metric to tabulate (default: summary) Produce a report on where state (flops) has been located. Args: compiled_inputs: Dictionary of compiled inputs for the whole mesh Returns: Keys are mesh position of the input, value is the flop Produce a report on where top-level boundary outputs are generated. Args: compiled_msgs: Dictionary of compiled messages for the whole mesh Returns: Keys are the mesh position of the output, value is the output Manage the compilation process - converting the logical model of the design into operations, messages, and handling configurations. Args: module : The logic module to compile rows : Number of rows in the mesh (default: 4) columns : Number of columns in the mesh (default: 4) node_inputs : Number of inputs per node node_outputs : Number of outputs per node node_registers: Number of registers per node node_slots : Number of instruction slots per node # Create a mesh of the requested configuration # Convert gates to instructions, flops to state objects # Build boundary I/O # Link instruction I/O # Link state I/O # Link boundary I/O # Place operations into the mesh, starting with the most used # Detect placement deadlock and abort # Pop the next term to place # Find the set of nodes that hold the sources # If we're not ready to place, postpone # Reset the stall count to zero if a placement is successful # Try to identify a suitable node # - If there are no instruction dependencies, place anywhere # - If inner terms exist, place in the same node or one in the next row # If all sources are in one node, is there space for a new entry? # Otherwise, can all sub-terms be moved into one node? # Otherwise, need to find a node in the next row down # If still no node found, place anywhere # Check a node was found # Move any supporting terms # Place the term into the node # Trigger usage recounts on source nodes # Work out where every operation has been placed # Debugging information # Compile operations for every node # Compile signal state updates # Compile loopbacks # Skip non-stateful inputs # Check if this is a loopback # Append to the loopback mask # Compile messages between nodes # Skip unpopulated inputs # Detect if input is flopped # Resolve the instruction driving the connection # Get source row and column # Skip loopbacks (handled separately) # Get the output index for the source # Ensure message storage exists for the source node # Append an internal message # Build a report of where outputs are generated from, and insert messages # TODO: Temporarily using 'fake' nodes to receive outputs, in the longer # term will separate 'internal' and 'external' messaging # Target an unused node input # Record where this output will be sent to # Ensure message storage exists for the source node # Setup a message for this output on the source node # Increment the output counter # Accumulate message statistics # Return instruction sequences, input handling, output handling | 2.744325 | 3 |
test.py | ulikoehler/pseudo-perseus | 1 | 6617828 | #!/usr/bin/env python3
import requests
teststring = """### Using the information to solve\n\nLet's plug $\\blueD{s(t_0)}=\\blueD{9}$ and $\\maroonD{s'(t_0)}=\\maroonD{-2}$ into the expression for $A'(t_0)$:\n\n$\\begin{align}\nA'(t_0)&=2\\blueD{s(t_0)}\\maroonD{s'(t_0)}\n\\\\\\\\\n&=2(\\blueD{9})(\\maroonD{-2})\n\\\\\\\\\n&=-36\n\\end{align}$"""
teststring = """$(100\\pi-200)\\text{ cm}^2$"""
response = requests.post('http://localhost:9613/api/render-perseus', data={"input": teststring})
print(response.json()["html"]) | #!/usr/bin/env python3
import requests
teststring = """### Using the information to solve\n\nLet's plug $\\blueD{s(t_0)}=\\blueD{9}$ and $\\maroonD{s'(t_0)}=\\maroonD{-2}$ into the expression for $A'(t_0)$:\n\n$\\begin{align}\nA'(t_0)&=2\\blueD{s(t_0)}\\maroonD{s'(t_0)}\n\\\\\\\\\n&=2(\\blueD{9})(\\maroonD{-2})\n\\\\\\\\\n&=-36\n\\end{align}$"""
teststring = """$(100\\pi-200)\\text{ cm}^2$"""
response = requests.post('http://localhost:9613/api/render-perseus', data={"input": teststring})
print(response.json()["html"]) | en | 0.552096 | #!/usr/bin/env python3 ### Using the information to solve\n\nLet's plug $\\blueD{s(t_0)}=\\blueD{9}$ and $\\maroonD{s'(t_0)}=\\maroonD{-2}$ into the expression for $A'(t_0)$:\n\n$\\begin{align}\nA'(t_0)&=2\\blueD{s(t_0)}\\maroonD{s'(t_0)}\n\\\\\\\\\n&=2(\\blueD{9})(\\maroonD{-2})\n\\\\\\\\\n&=-36\n\\end{align}$ $(100\\pi-200)\\text{ cm}^2$ | 2.901076 | 3 |
program_synthesis/algolisp/dataset/code_trace.py | kavigupta/program_synthesis | 123 | 6617829 |
class CodeTrace(object):
def __init__(self):
self.clear()
def _get_callable(self, func):
if func not in self.names:
self.names[func] = 'LAMBDA%d' % (len(self.names) + 1)
return self.names[func]
def add_call(self, func_call, args):
if func_call in ('lambda1', 'lambda2'):
return -1
args = [self._get_callable(arg) if callable(arg) else arg for arg in args]
self.history.append((func_call, args))
self.results.append(None)
return len(self.history) - 1
def add_result(self, id, result):
if id < 0:
return
self.results[id] = result
def clear(self):
self.history = []
self.results = []
self.names = {}
|
class CodeTrace(object):
def __init__(self):
self.clear()
def _get_callable(self, func):
if func not in self.names:
self.names[func] = 'LAMBDA%d' % (len(self.names) + 1)
return self.names[func]
def add_call(self, func_call, args):
if func_call in ('lambda1', 'lambda2'):
return -1
args = [self._get_callable(arg) if callable(arg) else arg for arg in args]
self.history.append((func_call, args))
self.results.append(None)
return len(self.history) - 1
def add_result(self, id, result):
if id < 0:
return
self.results[id] = result
def clear(self):
self.history = []
self.results = []
self.names = {}
| none | 1 | 2.995032 | 3 | |
bin/covmat/test_load_ds14a.py | mclaughlin6464/pearce | 0 | 6617830 | import numpy as np
from load_ds14 import load_ds14
import h5py
from time import time, sleep
from itertools import product
import sys
#ds14a_part = load_ds14('/scratch/users/swmclau2/Darksky/ds14_a_1.0000', 10)
ds14a_part = load_ds14('/oak/stanford/orgs/kipac/users/swmclau2/Darksky/ds14_a_1.0000', 10)
#particles = ds14a_part.getsubbox([0,0,0], 10, pad=0, fields=['x','y', 'z'])
# TODO should fail gracefully if memory is exceeded or if p is too small.
downsample_factor = 1e-2
#########################################################################vvvvvvvvvvvvvvvvvvvvvvDELETE Wvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
f = h5py.File('/scratch/users/swmclau2/Darksky/ds14_a_1.0000_%.3f_downsample_v4.hdf5'%downsample_factor, 'w')
try:
grp = f.create_group("particles")
except ValueError:
grp = f['particles']
f.close()
#print grp.keys()
R0 = 5.81342950e6
h = 0.6880620000000001
L = 8000 #Mpc
n_subboxes = 8 # per side
subbox_L = L*1.0/n_subboxes
np.random.seed(int(time())) # TODO pass in seed?
t0 = time()
level = 4
#for idx, (subbox_idx, _subbox) in enumerate(ds14a_part.itersubbox(level=10, pad=1.0, fields=['x','y','z'], return_index = True)):
start_subbox_idx = (0,0,0)
start_idx = 0
all_particles = np.array([], dtype='float32')
for idx, subbox_idx in enumerate(product(xrange(2**level), repeat=3)):
if subbox_idx[0] < start_subbox_idx[0] or (subbox_idx[0]== start_subbox_idx[0] and subbox_idx[1] < start_subbox_idx[1]):
start_idx+=1
continue # skip these other idxs
loop_t0 = time()
print 'A',
sys.stdout.flush()
_subbox = ds14a_part._loadsubbox(subbox_idx, level=level, pad=np.ones((3,)),\
fields=['x','y','z'], cut=None, cut_fields=None)
if len(_subbox) == 0: #empty
print idx, subbox_idx, 'DONE'
break #done!
subbox_unmod = _subbox.view(np.float64).reshape(_subbox.shape+(-1,))
subbox = (subbox_unmod + R0)*h/1000
#print idx, (time()-t0)/3600.0
#print subbox.min(axis=0), subbox.max(axis=0)
#print '*'*10
#print 'B', time()-loop_t0
print 'B',
if idx%100 == 0:
print
print idx,subbox_idx, (time()-t0)/3600.0
print all_particles.shape
if idx!=0 and idx!=start_idx+1:
#print 'AA', time()-loop_t0
f = h5py.File('/scratch/users/swmclau2/Darksky/ds14_a_1.0000_%.3f_downsample_v4.hdf5'%downsample_factor)
# all_particles[all_particles<0] = 0.0 # numerical errors
# all_particles[all_particles>L] = L # numerical errors
idxs = np.floor_divide(all_particles, subbox_L).astype(int)
#account for these at the index level instead
idxs[idxs == -1] = 0
idxs[idxs== n_subboxes] == n_subboxes-1
unique_subboxes = np.vstack({tuple(row) for row in idxs})
grp = f['particles']
#print 'AB', time()-loop_t0
for us in unique_subboxes:
x_in_sb = all_particles[np.all(idxs == us, axis =1)]
sb_key = '<KEY>'%tuple(us)
print sb_key, x_in_sb.min(axis=0), x_in_sb.max(axis=0)
if sb_key in grp.keys():
dset = grp[sb_key]
dset.resize( (dset.shape[0] + x_in_sb.shape[0], 3))
dset[-x_in_sb.shape[0]:] = x_in_sb
else:
dset = grp.create_dataset(sb_key, data = x_in_sb, maxshape = (None, 3), compression = 'gzip')
#print 'AC', time()-loop_t0
f.close()
print '*-'*20
print
all_particles = np.array([], dtype='float32')
downsample_idxs = np.random.choice(subbox.shape[0], size=int(subbox.shape[0] * downsample_factor))
particles = subbox[downsample_idxs]
print 'C',
#print 'C', time()-loop_t0
# edge case
if particles.shape[0] == 0:
continue
all_particles = np.resize(all_particles, (all_particles.shape[0] + particles.shape[0], 3 ))
all_particles[-particles.shape[0]:, ] = particles
print 'D'
sys.stdout.flush()
#print 'D', time()-loop_t0
#print '*'*20
print 'Done'
| import numpy as np
from load_ds14 import load_ds14
import h5py
from time import time, sleep
from itertools import product
import sys
#ds14a_part = load_ds14('/scratch/users/swmclau2/Darksky/ds14_a_1.0000', 10)
ds14a_part = load_ds14('/oak/stanford/orgs/kipac/users/swmclau2/Darksky/ds14_a_1.0000', 10)
#particles = ds14a_part.getsubbox([0,0,0], 10, pad=0, fields=['x','y', 'z'])
# TODO should fail gracefully if memory is exceeded or if p is too small.
downsample_factor = 1e-2
#########################################################################vvvvvvvvvvvvvvvvvvvvvvDELETE Wvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
f = h5py.File('/scratch/users/swmclau2/Darksky/ds14_a_1.0000_%.3f_downsample_v4.hdf5'%downsample_factor, 'w')
try:
grp = f.create_group("particles")
except ValueError:
grp = f['particles']
f.close()
#print grp.keys()
R0 = 5.81342950e6
h = 0.6880620000000001
L = 8000 #Mpc
n_subboxes = 8 # per side
subbox_L = L*1.0/n_subboxes
np.random.seed(int(time())) # TODO pass in seed?
t0 = time()
level = 4
#for idx, (subbox_idx, _subbox) in enumerate(ds14a_part.itersubbox(level=10, pad=1.0, fields=['x','y','z'], return_index = True)):
start_subbox_idx = (0,0,0)
start_idx = 0
all_particles = np.array([], dtype='float32')
for idx, subbox_idx in enumerate(product(xrange(2**level), repeat=3)):
if subbox_idx[0] < start_subbox_idx[0] or (subbox_idx[0]== start_subbox_idx[0] and subbox_idx[1] < start_subbox_idx[1]):
start_idx+=1
continue # skip these other idxs
loop_t0 = time()
print 'A',
sys.stdout.flush()
_subbox = ds14a_part._loadsubbox(subbox_idx, level=level, pad=np.ones((3,)),\
fields=['x','y','z'], cut=None, cut_fields=None)
if len(_subbox) == 0: #empty
print idx, subbox_idx, 'DONE'
break #done!
subbox_unmod = _subbox.view(np.float64).reshape(_subbox.shape+(-1,))
subbox = (subbox_unmod + R0)*h/1000
#print idx, (time()-t0)/3600.0
#print subbox.min(axis=0), subbox.max(axis=0)
#print '*'*10
#print 'B', time()-loop_t0
print 'B',
if idx%100 == 0:
print
print idx,subbox_idx, (time()-t0)/3600.0
print all_particles.shape
if idx!=0 and idx!=start_idx+1:
#print 'AA', time()-loop_t0
f = h5py.File('/scratch/users/swmclau2/Darksky/ds14_a_1.0000_%.3f_downsample_v4.hdf5'%downsample_factor)
# all_particles[all_particles<0] = 0.0 # numerical errors
# all_particles[all_particles>L] = L # numerical errors
idxs = np.floor_divide(all_particles, subbox_L).astype(int)
#account for these at the index level instead
idxs[idxs == -1] = 0
idxs[idxs== n_subboxes] == n_subboxes-1
unique_subboxes = np.vstack({tuple(row) for row in idxs})
grp = f['particles']
#print 'AB', time()-loop_t0
for us in unique_subboxes:
x_in_sb = all_particles[np.all(idxs == us, axis =1)]
sb_key = '<KEY>'%tuple(us)
print sb_key, x_in_sb.min(axis=0), x_in_sb.max(axis=0)
if sb_key in grp.keys():
dset = grp[sb_key]
dset.resize( (dset.shape[0] + x_in_sb.shape[0], 3))
dset[-x_in_sb.shape[0]:] = x_in_sb
else:
dset = grp.create_dataset(sb_key, data = x_in_sb, maxshape = (None, 3), compression = 'gzip')
#print 'AC', time()-loop_t0
f.close()
print '*-'*20
print
all_particles = np.array([], dtype='float32')
downsample_idxs = np.random.choice(subbox.shape[0], size=int(subbox.shape[0] * downsample_factor))
particles = subbox[downsample_idxs]
print 'C',
#print 'C', time()-loop_t0
# edge case
if particles.shape[0] == 0:
continue
all_particles = np.resize(all_particles, (all_particles.shape[0] + particles.shape[0], 3 ))
all_particles[-particles.shape[0]:, ] = particles
print 'D'
sys.stdout.flush()
#print 'D', time()-loop_t0
#print '*'*20
print 'Done'
| en | 0.644213 | #ds14a_part = load_ds14('/scratch/users/swmclau2/Darksky/ds14_a_1.0000', 10) #particles = ds14a_part.getsubbox([0,0,0], 10, pad=0, fields=['x','y', 'z']) # TODO should fail gracefully if memory is exceeded or if p is too small. #########################################################################vvvvvvvvvvvvvvvvvvvvvvDELETE Wvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv #print grp.keys() #Mpc # per side # TODO pass in seed? #for idx, (subbox_idx, _subbox) in enumerate(ds14a_part.itersubbox(level=10, pad=1.0, fields=['x','y','z'], return_index = True)): # skip these other idxs #empty #done! #print idx, (time()-t0)/3600.0 #print subbox.min(axis=0), subbox.max(axis=0) #print '*'*10 #print 'B', time()-loop_t0 #print 'AA', time()-loop_t0 # all_particles[all_particles<0] = 0.0 # numerical errors # all_particles[all_particles>L] = L # numerical errors #account for these at the index level instead #print 'AB', time()-loop_t0 #print 'AC', time()-loop_t0 #print 'C', time()-loop_t0 # edge case #print 'D', time()-loop_t0 #print '*'*20 | 1.951491 | 2 |
ymir/service/__init__.py | mattvonrocketstein/ymir | 3 | 6617831 | <reponame>mattvonrocketstein/ymir<filename>ymir/service/__init__.py
# -*- coding: utf-8 -*-
""" ymir.service
"""
from .base import AbstractService
from ._vagrant import VagrantService
from .amazon import AmazonService
__all__ = [
x.__name__ for x in
[AbstractService, VagrantService, AmazonService] ]
| # -*- coding: utf-8 -*-
""" ymir.service
"""
from .base import AbstractService
from ._vagrant import VagrantService
from .amazon import AmazonService
__all__ = [
x.__name__ for x in
[AbstractService, VagrantService, AmazonService] ] | en | 0.319214 | # -*- coding: utf-8 -*- ymir.service | 1.257996 | 1 |
utils/exception/exception.py | Dimas4/Image-Drive-AWS-Flask | 0 | 6617832 | <reponame>Dimas4/Image-Drive-AWS-Flask
class BackEndError(Exception):
pass
| class BackEndError(Exception):
pass | none | 1 | 1.098025 | 1 | |
gae/connector/dataflow.py | MAhsanAkhtar/BDA_TERM_PROJECT | 29 | 6617833 | <reponame>MAhsanAkhtar/BDA_TERM_PROJECT
#MIT License
#
#Copyright (c) 2017 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""Dataflow Service to run apache-beam jobs."""
import time
from operator import itemgetter
import googleapiclient.discovery as disco
class DataflowService(object):
"""Implements necessary methods to interact with dataflow jobs.
:type credentials: `google.auth.credentials.Credentials`
:param credentials: certificates to connect to GCP.
"""
def __init__(self, credentials):
self.con = disco.build('dataflow', 'v1b3', credentials=credentials)
def run_template(self, **kwargs):
"""Runs a templated job. REST API definition can be found here:
(https://cloud.google.com/dataflow/docs/reference/rest/v1b3/
projects.templates/create)
:kwargs:
:type project_id: str
:param project_id: project id for where to build the cluster.
:type zone: str
:param zone: zone where cluster will be located.
:type job_name: str
:param job_name: name of job to run. Notice that if two jobs with
the same name are initiated only the first one
will succeed to process.
:type template_location: str
:param template_location: GCS path where dataflow template is saved.
:type temp_location: str
:param temp_location: how many instances to build.
"""
project_id = kwargs['project_id']
job_name = kwargs['job_name']
template_location = kwargs['template_location']
temp_location = kwargs['temp_location']
zone = kwargs['zone']
max_workers = kwargs['max_workers']
machine_type = kwargs['machine_type']
body = {
"jobName": job_name,
"gcsPath": template_location,
"environment": {
"tempLocation": temp_location,
"zone": zone,
"maxWorkers": max_workers,
"machineType": machine_type
}
}
return self.con.projects().templates().create(
projectId=project_id, body=body).execute(num_retries=3)
| #MIT License
#
#Copyright (c) 2017 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""Dataflow Service to run apache-beam jobs."""
import time
from operator import itemgetter
import googleapiclient.discovery as disco
class DataflowService(object):
"""Implements necessary methods to interact with dataflow jobs.
:type credentials: `google.auth.credentials.Credentials`
:param credentials: certificates to connect to GCP.
"""
def __init__(self, credentials):
self.con = disco.build('dataflow', 'v1b3', credentials=credentials)
def run_template(self, **kwargs):
"""Runs a templated job. REST API definition can be found here:
(https://cloud.google.com/dataflow/docs/reference/rest/v1b3/
projects.templates/create)
:kwargs:
:type project_id: str
:param project_id: project id for where to build the cluster.
:type zone: str
:param zone: zone where cluster will be located.
:type job_name: str
:param job_name: name of job to run. Notice that if two jobs with
the same name are initiated only the first one
will succeed to process.
:type template_location: str
:param template_location: GCS path where dataflow template is saved.
:type temp_location: str
:param temp_location: how many instances to build.
"""
project_id = kwargs['project_id']
job_name = kwargs['job_name']
template_location = kwargs['template_location']
temp_location = kwargs['temp_location']
zone = kwargs['zone']
max_workers = kwargs['max_workers']
machine_type = kwargs['machine_type']
body = {
"jobName": job_name,
"gcsPath": template_location,
"environment": {
"tempLocation": temp_location,
"zone": zone,
"maxWorkers": max_workers,
"machineType": machine_type
}
}
return self.con.projects().templates().create(
projectId=project_id, body=body).execute(num_retries=3) | en | 0.770536 | #MIT License # #Copyright (c) 2017 <NAME> # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. Dataflow Service to run apache-beam jobs. Implements necessary methods to interact with dataflow jobs. :type credentials: `google.auth.credentials.Credentials` :param credentials: certificates to connect to GCP. Runs a templated job. REST API definition can be found here: (https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ projects.templates/create) :kwargs: :type project_id: str :param project_id: project id for where to build the cluster. :type zone: str :param zone: zone where cluster will be located. :type job_name: str :param job_name: name of job to run. Notice that if two jobs with the same name are initiated only the first one will succeed to process. :type template_location: str :param template_location: GCS path where dataflow template is saved. :type temp_location: str :param temp_location: how many instances to build. | 1.91408 | 2 |
conftest.py | tamirmatok/Beyond-07-team-1 | 1 | 6617834 | <reponame>tamirmatok/Beyond-07-team-1
import pytest
import datetime
from django.utils import timezone
from message.models import Message, AuthorOptions
from daycare.models import DayCare, Image
from dogowner.models import DogOwner
from orders.models import Order
from review.models import Review
DOG_OWNER_FIXTURE_PROFILE_PICTURE_URL = "https://www.akc.org/wp-content/uploads/2019/06/Bohemian-Shepherd.1.jpg"
DAYCARE_FIXTURE_PROFILE_PICTURE_URL_1 = "../../static/images/daycare_image_test_01.jpeg"
DAYCARE_FIXTURE_PROFILE_PICTURE_URL_2 = "../../static/images/daycare_image_test_02.jpeg"
@pytest.fixture
def create_order():
return Order.create(dog_owner_id=DogOwner.objects.get(id=1),
daycare_id=DayCare.objects.get(id=1),
start_date=timezone.now(),
end_date=timezone.now() + datetime.timedelta(days=3),
price_per_day=100,
)
@pytest.fixture
def dogowner_message_to_daycare1():
return Message.create(author=AuthorOptions.DogOwner,
dogowner_id=DogOwner.objects.get(pk=1),
daycare_id=DayCare.objects.get(pk=1),
text='Hello this is the test message1 from owner to day care')
@pytest.fixture
def daycare1_reply_to_dogonwer_message():
return Message.create(author=AuthorOptions.DayCare,
dogowner_id=DogOwner.objects.get(pk=1),
daycare_id=DayCare.objects.get(pk=1),
text='This is reply to first message from daycare to owner')
@pytest.fixture
def daycare2_message_to_dogowner():
return Message.create(author=AuthorOptions.DayCare,
dogowner_id=DogOwner.objects.get(pk=1),
daycare_id=DayCare.objects.get(pk=2),
text='Hello this new chat between daycare2 and dogowner')
@pytest.fixture
def daycare3_message_to_dogowner():
return Message.create(author=AuthorOptions.DayCare,
dogowner_id=DogOwner.objects.get(pk=1),
daycare_id=DayCare.objects.get(pk=3),
text='new chat between daycare3 and dogowner')
@pytest.fixture
def create_dog_owner_user():
return DogOwner.create(email='<EMAIL>',
username='testDogOwner',
password='<PASSWORD>',
dog_name='kliford',
first_name='NEW',
last_name='USER',
phone_number=1234567890,
dog_race='lavrador',
dog_picture_url=DOG_OWNER_FIXTURE_PROFILE_PICTURE_URL,
dog_age=10,
dog_weight=6,
dog_gender='M'
)
@pytest.fixture
def daycare_data():
pytest.EMAIL = "<EMAIL>"
pytest.DAYCARE_USERNAME = "testDayCare"
pytest.DAYCARE_PASSWORD = "<PASSWORD>"
pytest.NAME = "Puppies"
pytest.DESCRIPTION = "This is the first daycare test"
pytest.PRICE_PER_DAY = 10
pytest.CAPACITY = 50
pytest.AREA = "Merkaz"
pytest.CITY = "Tel-Aviv"
pytest.ADDRESS = "The best street 5"
@pytest.fixture
def create_daycare_user(daycare_data):
return DayCare.create(email=pytest.EMAIL, username=pytest.DAYCARE_USERNAME, password=pytest.DAYCARE_PASSWORD,
name=pytest.NAME, description=pytest.DESCRIPTION, price_per_day=pytest.CAPACITY,
capacity=pytest.CAPACITY, area=pytest.AREA, city=pytest.CITY, address=pytest.ADDRESS)
@pytest.fixture
def create_image1(create_daycare_user):
return Image.create(url=DAYCARE_FIXTURE_PROFILE_PICTURE_URL_1, daycare_id=create_daycare_user)
@pytest.fixture
def create_image2(create_daycare_user):
return Image.create(url=DAYCARE_FIXTURE_PROFILE_PICTURE_URL_2, daycare_id=create_daycare_user)
@pytest.fixture
def review_data(create_dog_owner_user, create_daycare_user):
pytest.REVIEW = 'sample review'
pytest.RATING = 5
pytest.DAY_CARE_ID = create_daycare_user.id
pytest.DOG_OWNER_ID = create_dog_owner_user.id
@pytest.fixture
def review(review_data):
return Review.create(review=pytest.REVIEW, rating=pytest.RATING, daycare_id=pytest.DAY_CARE_ID,
dogowner_id=pytest.DOG_OWNER_ID)
| import pytest
import datetime
from django.utils import timezone
from message.models import Message, AuthorOptions
from daycare.models import DayCare, Image
from dogowner.models import DogOwner
from orders.models import Order
from review.models import Review
DOG_OWNER_FIXTURE_PROFILE_PICTURE_URL = "https://www.akc.org/wp-content/uploads/2019/06/Bohemian-Shepherd.1.jpg"
DAYCARE_FIXTURE_PROFILE_PICTURE_URL_1 = "../../static/images/daycare_image_test_01.jpeg"
DAYCARE_FIXTURE_PROFILE_PICTURE_URL_2 = "../../static/images/daycare_image_test_02.jpeg"
@pytest.fixture
def create_order():
return Order.create(dog_owner_id=DogOwner.objects.get(id=1),
daycare_id=DayCare.objects.get(id=1),
start_date=timezone.now(),
end_date=timezone.now() + datetime.timedelta(days=3),
price_per_day=100,
)
@pytest.fixture
def dogowner_message_to_daycare1():
return Message.create(author=AuthorOptions.DogOwner,
dogowner_id=DogOwner.objects.get(pk=1),
daycare_id=DayCare.objects.get(pk=1),
text='Hello this is the test message1 from owner to day care')
@pytest.fixture
def daycare1_reply_to_dogonwer_message():
return Message.create(author=AuthorOptions.DayCare,
dogowner_id=DogOwner.objects.get(pk=1),
daycare_id=DayCare.objects.get(pk=1),
text='This is reply to first message from daycare to owner')
@pytest.fixture
def daycare2_message_to_dogowner():
return Message.create(author=AuthorOptions.DayCare,
dogowner_id=DogOwner.objects.get(pk=1),
daycare_id=DayCare.objects.get(pk=2),
text='Hello this new chat between daycare2 and dogowner')
@pytest.fixture
def daycare3_message_to_dogowner():
return Message.create(author=AuthorOptions.DayCare,
dogowner_id=DogOwner.objects.get(pk=1),
daycare_id=DayCare.objects.get(pk=3),
text='new chat between daycare3 and dogowner')
@pytest.fixture
def create_dog_owner_user():
return DogOwner.create(email='<EMAIL>',
username='testDogOwner',
password='<PASSWORD>',
dog_name='kliford',
first_name='NEW',
last_name='USER',
phone_number=1234567890,
dog_race='lavrador',
dog_picture_url=DOG_OWNER_FIXTURE_PROFILE_PICTURE_URL,
dog_age=10,
dog_weight=6,
dog_gender='M'
)
@pytest.fixture
def daycare_data():
pytest.EMAIL = "<EMAIL>"
pytest.DAYCARE_USERNAME = "testDayCare"
pytest.DAYCARE_PASSWORD = "<PASSWORD>"
pytest.NAME = "Puppies"
pytest.DESCRIPTION = "This is the first daycare test"
pytest.PRICE_PER_DAY = 10
pytest.CAPACITY = 50
pytest.AREA = "Merkaz"
pytest.CITY = "Tel-Aviv"
pytest.ADDRESS = "The best street 5"
@pytest.fixture
def create_daycare_user(daycare_data):
return DayCare.create(email=pytest.EMAIL, username=pytest.DAYCARE_USERNAME, password=pytest.DAYCARE_PASSWORD,
name=pytest.NAME, description=pytest.DESCRIPTION, price_per_day=pytest.CAPACITY,
capacity=pytest.CAPACITY, area=pytest.AREA, city=pytest.CITY, address=pytest.ADDRESS)
@pytest.fixture
def create_image1(create_daycare_user):
return Image.create(url=DAYCARE_FIXTURE_PROFILE_PICTURE_URL_1, daycare_id=create_daycare_user)
@pytest.fixture
def create_image2(create_daycare_user):
return Image.create(url=DAYCARE_FIXTURE_PROFILE_PICTURE_URL_2, daycare_id=create_daycare_user)
@pytest.fixture
def review_data(create_dog_owner_user, create_daycare_user):
pytest.REVIEW = 'sample review'
pytest.RATING = 5
pytest.DAY_CARE_ID = create_daycare_user.id
pytest.DOG_OWNER_ID = create_dog_owner_user.id
@pytest.fixture
def review(review_data):
return Review.create(review=pytest.REVIEW, rating=pytest.RATING, daycare_id=pytest.DAY_CARE_ID,
dogowner_id=pytest.DOG_OWNER_ID) | none | 1 | 2.079809 | 2 | |
src/Main/Menu.py | nicholasz2510/whack-a-mole-cp2 | 0 | 6617835 | class Menu:
def __init__(self, buttonArray, title):
| class Menu:
def __init__(self, buttonArray, title):
| none | 1 | 1.503787 | 2 | |
lib/yamddpp/yamddpp/Diffusion/mutual_diffusion/_directly.py | atomrq/simulab | 0 | 6617836 | import math
import numpy as np
from numba import cuda
# Mutual mean square displacements: \sum_{ij}\langle|r_i(t)-r_j(0)|^2\rangle
@cuda.jit("float64(float64[:], float64[:])", device=True)
def distance2(a, b):
tmp = 0
for i in range(a.shape[0]):
tmp += (a[i] - b[i]) ** 2
return tmp
@cuda.jit("void(float64[:,:,:], float64[:,:])")
def _cu_kernel(x, ret): # ret -> (n_particles, n_frames)
i, j = cuda.grid(2) # x -> (n_frames, n_particles, n_dim)
if i >= x.shape[0]:
return
if j >= x.shape[0] - i:
return
for k in range(x.shape[1] - 1):
pkt = x[i + j, k]
for l in range(k, x.shape[1]):
pl0 = x[i, l]
dr2 = distance2(pkt, pl0)
cuda.atomic.add(ret[k], j, dr2)
cuda.atomic.add(ret[l], j, dr2)
@cuda.jit("void(float64[:,:,:], float64[:])")
def _cu_kernel_cum(x, ret): # ret -> (n_frames,)
i, j = cuda.grid(2)
if i >= x.shape[0]:
return
if j >= x.shape[0] - i:
return
for k in range(x.shape[1] - 1):
pkt = x[i + j, k]
for l in range(k + 1, x.shape[1]):
pl0 = x[i, l]
dr2 = distance2(pkt, pl0)
cuda.atomic.add(ret, j, 2 * dr2)
def cu_mutual_diffusion(x, cum=True, gpu=0):
r"""Mutual diffusion calculation.
:param x: np.ndarray, nd-coordinates with (n_frame, n_particles, n_dimension)
:param cum: bool, summing over n_particles or not
:param gpu: int, choose gpu
:return: np.ndarray, mutual MSD
"""
x = x.astype(np.float64)
with cuda.gpus[gpu]:
device = cuda.get_current_device()
tpb = (device.WARP_SIZE,) * 2
bpg = (math.ceil(x.shape[0] / tpb[0]),
math.ceil(x.shape[0] / tpb[1]))
if not cum:
ret = np.zeros((x.shape[1], x.shape[0]),
dtype=np.float64)
_cu_kernel[bpg, tpb](x, ret)
ret = ret.T
else:
ret = np.zeros(x.shape[0], dtype=np.float64)
_cu_kernel_cum[bpg, tpb](x, ret)
return ret # ret -> (n_frames, n_particles) or (n_frames,)
| import math
import numpy as np
from numba import cuda
# Mutual mean square displacements: \sum_{ij}\langle|r_i(t)-r_j(0)|^2\rangle
@cuda.jit("float64(float64[:], float64[:])", device=True)
def distance2(a, b):
tmp = 0
for i in range(a.shape[0]):
tmp += (a[i] - b[i]) ** 2
return tmp
@cuda.jit("void(float64[:,:,:], float64[:,:])")
def _cu_kernel(x, ret): # ret -> (n_particles, n_frames)
i, j = cuda.grid(2) # x -> (n_frames, n_particles, n_dim)
if i >= x.shape[0]:
return
if j >= x.shape[0] - i:
return
for k in range(x.shape[1] - 1):
pkt = x[i + j, k]
for l in range(k, x.shape[1]):
pl0 = x[i, l]
dr2 = distance2(pkt, pl0)
cuda.atomic.add(ret[k], j, dr2)
cuda.atomic.add(ret[l], j, dr2)
@cuda.jit("void(float64[:,:,:], float64[:])")
def _cu_kernel_cum(x, ret): # ret -> (n_frames,)
i, j = cuda.grid(2)
if i >= x.shape[0]:
return
if j >= x.shape[0] - i:
return
for k in range(x.shape[1] - 1):
pkt = x[i + j, k]
for l in range(k + 1, x.shape[1]):
pl0 = x[i, l]
dr2 = distance2(pkt, pl0)
cuda.atomic.add(ret, j, 2 * dr2)
def cu_mutual_diffusion(x, cum=True, gpu=0):
r"""Mutual diffusion calculation.
:param x: np.ndarray, nd-coordinates with (n_frame, n_particles, n_dimension)
:param cum: bool, summing over n_particles or not
:param gpu: int, choose gpu
:return: np.ndarray, mutual MSD
"""
x = x.astype(np.float64)
with cuda.gpus[gpu]:
device = cuda.get_current_device()
tpb = (device.WARP_SIZE,) * 2
bpg = (math.ceil(x.shape[0] / tpb[0]),
math.ceil(x.shape[0] / tpb[1]))
if not cum:
ret = np.zeros((x.shape[1], x.shape[0]),
dtype=np.float64)
_cu_kernel[bpg, tpb](x, ret)
ret = ret.T
else:
ret = np.zeros(x.shape[0], dtype=np.float64)
_cu_kernel_cum[bpg, tpb](x, ret)
return ret # ret -> (n_frames, n_particles) or (n_frames,)
| en | 0.426141 | # Mutual mean square displacements: \sum_{ij}\langle|r_i(t)-r_j(0)|^2\rangle # ret -> (n_particles, n_frames) # x -> (n_frames, n_particles, n_dim) # ret -> (n_frames,) Mutual diffusion calculation. :param x: np.ndarray, nd-coordinates with (n_frame, n_particles, n_dimension) :param cum: bool, summing over n_particles or not :param gpu: int, choose gpu :return: np.ndarray, mutual MSD # ret -> (n_frames, n_particles) or (n_frames,) | 2.283762 | 2 |
ambari-common/src/main/python/resource_management/libraries/execution_command/cluster_settings.py | samyzh/ambari | 1,664 | 6617837 | <filename>ambari-common/src/main/python/resource_management/libraries/execution_command/cluster_settings.py<gh_stars>1000+
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["ClusterSettings"]
class ClusterSettings(object):
"""
This class maps to "configurations->cluster-env" in command.json which includes cluster setting information of a cluster
"""
def __init__(self, clusterSettings):
self.__cluster_settings = clusterSettings
def __get_value(self, key):
"""
Get corresponding value from the key
:param key:
:return: value if key exist else None
"""
return self.__cluster_settings.get(key)
def is_cluster_security_enabled(self):
"""
Check cluster security enabled or not
:return: True or False
"""
security_enabled = self.__get_value("security_enabled")
return security_enabled and security_enabled.lower() == "true"
def get_recovery_max_count(self):
"""
Retrieve cluster recovery count
:return: String, need to convert to int
"""
return int(self.__get_value("recovery_max_count"))
def check_recovery_enabled(self):
"""
Check if the cluster can be enabled or not
:return: True or False
"""
recovery_enabled = self.__get_value("recovery_enabled")
return recovery_enabled and recovery_enabled.lower() == "true"
def get_recovery_type(self):
"""
Retrieve cluster recovery type
:return: recovery type, i.e "AUTO_START"
"""
return self.__get_value("recovery_type")
def get_kerberos_domain(self):
"""
Retrieve kerberos domain
:return: String as kerberos domain
"""
return self.__get_value("kerberos_domain")
def get_smokeuser(self):
"""
Retrieve smokeuser
:return: smkeuser string
"""
return self.__get_value("smokeuser")
def get_user_group(self):
"""
Retrieve cluster usergroup
:return: usergroup string
"""
return self.__get_value("user_group")
def get_repo_suse_rhel_template(self):
"""
Retrieve template of suse and rhel repo
:return: template string
"""
return self.__get_value("repo_suse_rhel_template")
def get_repo_ubuntu_template(self):
"""
Retrieve template of ubuntu repo
:return: template string
"""
return self.__get_value("repo_ubuntu_template")
def check_override_uid(self):
"""
Check if override_uid is true or false
:return: True or False
"""
override_uid = self.__get_value("override_uid")
return override_uid and override_uid.lower() == "true"
def check_sysprep_skip_copy_fast_jar_hdfs(self):
"""
Check sysprep_skip_copy_fast_jar_hdfs is true or false
:return: True or False
"""
skip = self.__get_value("sysprep_skip_copy_fast_jar_hdfs")
return skip and skip.lower() == "true"
def check_sysprep_skip_lzo_package_operations(self):
"""
Check sysprep_skip_lzo_package_operations is true or false
:return: True or False
"""
skip = self.__get_value("sysprep_skip_lzo_package_operations")
return skip and skip.lower() == "true"
def check_sysprep_skip_setup_jce(self):
"""
Check sysprep_skip_setup_jce is true or false
:return: True or False
"""
skip = self.__get_value("sysprep_skip_setup_jce")
return skip and skip.lower() == "true"
def check_sysprep_skip_create_users_and_groups(self):
"""
Check sysprep_skip_copy_create_users_and_groups is true or false
:return: True or False
"""
skip = self.__get_value("sysprep_skip_create_users_and_groups")
return skip and skip.lower() == "true"
def check_ignore_groupsusers_create(self):
"""
Check ignore_groupsuers_create is true or false
:return: True or False
"""
ignored = self.__get_value("ignore_groupsusers_create")
return ignored and ignored.lower() == "true"
def check_fetch_nonlocal_groups(self):
"""
Check fetch_nonlocal_group is true or false
:return: True or False
"""
fetch_nonlocal_group = self.__get_value("fetch_nonlocal_groups")
return fetch_nonlocal_group and fetch_nonlocal_group.lower() == "true" | <filename>ambari-common/src/main/python/resource_management/libraries/execution_command/cluster_settings.py<gh_stars>1000+
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["ClusterSettings"]
class ClusterSettings(object):
"""
This class maps to "configurations->cluster-env" in command.json which includes cluster setting information of a cluster
"""
def __init__(self, clusterSettings):
self.__cluster_settings = clusterSettings
def __get_value(self, key):
"""
Get corresponding value from the key
:param key:
:return: value if key exist else None
"""
return self.__cluster_settings.get(key)
def is_cluster_security_enabled(self):
"""
Check cluster security enabled or not
:return: True or False
"""
security_enabled = self.__get_value("security_enabled")
return security_enabled and security_enabled.lower() == "true"
def get_recovery_max_count(self):
"""
Retrieve cluster recovery count
:return: String, need to convert to int
"""
return int(self.__get_value("recovery_max_count"))
def check_recovery_enabled(self):
"""
Check if the cluster can be enabled or not
:return: True or False
"""
recovery_enabled = self.__get_value("recovery_enabled")
return recovery_enabled and recovery_enabled.lower() == "true"
def get_recovery_type(self):
"""
Retrieve cluster recovery type
:return: recovery type, i.e "AUTO_START"
"""
return self.__get_value("recovery_type")
def get_kerberos_domain(self):
"""
Retrieve kerberos domain
:return: String as kerberos domain
"""
return self.__get_value("kerberos_domain")
def get_smokeuser(self):
"""
Retrieve smokeuser
:return: smkeuser string
"""
return self.__get_value("smokeuser")
def get_user_group(self):
"""
Retrieve cluster usergroup
:return: usergroup string
"""
return self.__get_value("user_group")
def get_repo_suse_rhel_template(self):
"""
Retrieve template of suse and rhel repo
:return: template string
"""
return self.__get_value("repo_suse_rhel_template")
def get_repo_ubuntu_template(self):
"""
Retrieve template of ubuntu repo
:return: template string
"""
return self.__get_value("repo_ubuntu_template")
def check_override_uid(self):
"""
Check if override_uid is true or false
:return: True or False
"""
override_uid = self.__get_value("override_uid")
return override_uid and override_uid.lower() == "true"
def check_sysprep_skip_copy_fast_jar_hdfs(self):
"""
Check sysprep_skip_copy_fast_jar_hdfs is true or false
:return: True or False
"""
skip = self.__get_value("sysprep_skip_copy_fast_jar_hdfs")
return skip and skip.lower() == "true"
def check_sysprep_skip_lzo_package_operations(self):
"""
Check sysprep_skip_lzo_package_operations is true or false
:return: True or False
"""
skip = self.__get_value("sysprep_skip_lzo_package_operations")
return skip and skip.lower() == "true"
def check_sysprep_skip_setup_jce(self):
"""
Check sysprep_skip_setup_jce is true or false
:return: True or False
"""
skip = self.__get_value("sysprep_skip_setup_jce")
return skip and skip.lower() == "true"
def check_sysprep_skip_create_users_and_groups(self):
"""
Check sysprep_skip_copy_create_users_and_groups is true or false
:return: True or False
"""
skip = self.__get_value("sysprep_skip_create_users_and_groups")
return skip and skip.lower() == "true"
def check_ignore_groupsusers_create(self):
"""
Check ignore_groupsuers_create is true or false
:return: True or False
"""
ignored = self.__get_value("ignore_groupsusers_create")
return ignored and ignored.lower() == "true"
def check_fetch_nonlocal_groups(self):
"""
Check fetch_nonlocal_group is true or false
:return: True or False
"""
fetch_nonlocal_group = self.__get_value("fetch_nonlocal_groups")
return fetch_nonlocal_group and fetch_nonlocal_group.lower() == "true" | en | 0.633354 | #!/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This class maps to "configurations->cluster-env" in command.json which includes cluster setting information of a cluster Get corresponding value from the key :param key: :return: value if key exist else None Check cluster security enabled or not :return: True or False Retrieve cluster recovery count :return: String, need to convert to int Check if the cluster can be enabled or not :return: True or False Retrieve cluster recovery type :return: recovery type, i.e "AUTO_START" Retrieve kerberos domain :return: String as kerberos domain Retrieve smokeuser :return: smkeuser string Retrieve cluster usergroup :return: usergroup string Retrieve template of suse and rhel repo :return: template string Retrieve template of ubuntu repo :return: template string Check if override_uid is true or false :return: True or False Check sysprep_skip_copy_fast_jar_hdfs is true or false :return: True or False Check sysprep_skip_lzo_package_operations is true or false :return: True or False Check sysprep_skip_setup_jce is true or false :return: True or False Check sysprep_skip_copy_create_users_and_groups is true or false :return: True or False Check ignore_groupsuers_create is true or false :return: True or False Check fetch_nonlocal_group is true or false :return: True or False | 1.990502 | 2 |
test/tests_unittest.py | Xevib/xml_presset_TI | 0 | 6617838 | import unittest
from lxml import etree
class TestXML(unittest.TestCase):
def setUp(self):
self.filename = 'put your file name here.xml'
def test_xsd(self):
f_preset = open(self.filename)
f_xsd = open('test/tagging-preset.xsd')
xmlschema_doc = etree.parse(f_xsd)
xmlschema = etree.XMLSchema(xmlschema_doc)
doc = etree.parse(f_preset)
xmlschema.assertValid(doc)
if __name__ == '__main__':
unittest.main()
| import unittest
from lxml import etree
class TestXML(unittest.TestCase):
def setUp(self):
self.filename = 'put your file name here.xml'
def test_xsd(self):
f_preset = open(self.filename)
f_xsd = open('test/tagging-preset.xsd')
xmlschema_doc = etree.parse(f_xsd)
xmlschema = etree.XMLSchema(xmlschema_doc)
doc = etree.parse(f_preset)
xmlschema.assertValid(doc)
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.996139 | 3 | |
sortfunc.py | cipang/chinese-sort | 5 | 6617839 | import sortdata
def chinese_sort_key(s):
"""
Accepts a string and generates a key for comparison.
"""
data = sortdata.data
l = list()
for c in s:
code = ord(c)
if c.isupper() or c.islower() or c.isspace() or c.isdigit():
l.append(0) # Strokes.
l.append(code) # Frequency.
else:
if code in data:
l.append(data[code]["kTotalStrokes"])
l.append(data[code]["kFrequency"])
else:
l.append(99)
l.append(99)
return tuple(l)
| import sortdata
def chinese_sort_key(s):
"""
Accepts a string and generates a key for comparison.
"""
data = sortdata.data
l = list()
for c in s:
code = ord(c)
if c.isupper() or c.islower() or c.isspace() or c.isdigit():
l.append(0) # Strokes.
l.append(code) # Frequency.
else:
if code in data:
l.append(data[code]["kTotalStrokes"])
l.append(data[code]["kFrequency"])
else:
l.append(99)
l.append(99)
return tuple(l)
| en | 0.779839 | Accepts a string and generates a key for comparison. # Strokes. # Frequency. | 3.323084 | 3 |
mikelint/analysers/scope.py | mike-fam/mikelint | 2 | 6617840 | <reponame>mike-fam/mikelint
"""
Analyse scope violations
"""
from astroid import Global, Const
from .analyser import Analyser, register_check
class ScopeAnalyser(Analyser):
"""Analyser checking for scope violations"""
MAGIC_WHITELIST = [0, 1, -1, 100]
@register_check("{}:{}: Globals used:\n\t{}")
def check_globals(self):
"""Checks if code has any global variables"""
# lineno, line
result: list[tuple[str, int, str]] = []
for filename, attr in self._sources.items():
for node in attr.tree.pre_order():
if not isinstance(node, Global):
continue
result.append((filename, node.lineno,
self.get_line(filename, node.lineno)))
return result
@register_check("{}:{}: Magic number used\n\t{}")
def check_magic_numbers_used(self):
"""Check if any magic number has been used """
# lineno, line
result: list[tuple[str, int, str]] = []
checked_lines = []
for filename, attr in self._sources.items():
for node in attr.tree.pre_order():
if not isinstance(node, Const):
continue
if node.value in self.MAGIC_WHITELIST:
continue
if not isinstance(node.value, int) and \
not isinstance(node.value, float):
continue
if node.lineno in checked_lines:
continue
result.append((filename, node.lineno,
self.get_line(filename, node.lineno)))
checked_lines.append(node.lineno)
return result
| """
Analyse scope violations
"""
from astroid import Global, Const
from .analyser import Analyser, register_check
class ScopeAnalyser(Analyser):
"""Analyser checking for scope violations"""
MAGIC_WHITELIST = [0, 1, -1, 100]
@register_check("{}:{}: Globals used:\n\t{}")
def check_globals(self):
"""Checks if code has any global variables"""
# lineno, line
result: list[tuple[str, int, str]] = []
for filename, attr in self._sources.items():
for node in attr.tree.pre_order():
if not isinstance(node, Global):
continue
result.append((filename, node.lineno,
self.get_line(filename, node.lineno)))
return result
@register_check("{}:{}: Magic number used\n\t{}")
def check_magic_numbers_used(self):
"""Check if any magic number has been used """
# lineno, line
result: list[tuple[str, int, str]] = []
checked_lines = []
for filename, attr in self._sources.items():
for node in attr.tree.pre_order():
if not isinstance(node, Const):
continue
if node.value in self.MAGIC_WHITELIST:
continue
if not isinstance(node.value, int) and \
not isinstance(node.value, float):
continue
if node.lineno in checked_lines:
continue
result.append((filename, node.lineno,
self.get_line(filename, node.lineno)))
checked_lines.append(node.lineno)
return result | en | 0.672803 | Analyse scope violations Analyser checking for scope violations Checks if code has any global variables # lineno, line Check if any magic number has been used # lineno, line | 2.805257 | 3 |
supriya/ugens/SendPeakRMS.py | deeuu/supriya | 0 | 6617841 | <reponame>deeuu/supriya
import collections
from supriya import CalculationRate
from supriya.synthdefs import UGen
class SendPeakRMS(UGen):
"""
Tracks peak and power of a signal for GUI applications.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.kr(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms
SendPeakRMS.kr()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Utility UGens"
_default_channel_count = 0
_ordered_input_names = collections.OrderedDict(
[("reply_rate", 20), ("peak_lag", 3), ("reply_id", -1)]
)
_unexpanded_argument_names = ("source",)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
command_name="/reply",
peak_lag=3,
reply_id=-1,
reply_rate=20,
source=None,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
peak_lag=peak_lag,
reply_id=reply_id,
reply_rate=reply_rate,
)
command_name = str(command_name)
if not isinstance(source, collections.Sequence):
source = (source,)
self._configure_input("source", len(source))
for input_ in source:
self._configure_input("source", input_)
self._configure_input("command_name", len(command_name))
for character in command_name:
self._configure_input("label", ord(character))
### PUBLIC METHODS ###
@classmethod
def ar(
cls, command_name="/reply", peak_lag=3, reply_id=-1, reply_rate=20, source=None
):
"""
Constructs an audio-rate SendPeakRMS.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.ar(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms
SendPeakRMS.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_single(
calculation_rate=calculation_rate,
command_name=command_name,
peak_lag=peak_lag,
reply_id=reply_id,
reply_rate=reply_rate,
source=source,
)
return ugen
@classmethod
def kr(
cls, command_name="/reply", peak_lag=3, reply_id=-1, reply_rate=20, source=None
):
"""
Constructs a control-rate SendPeakRMS.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.kr(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms
SendPeakRMS.kr()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.CONTROL
ugen = cls._new_single(
calculation_rate=calculation_rate,
command_name=command_name,
peak_lag=peak_lag,
reply_id=reply_id,
reply_rate=reply_rate,
source=source,
)
return ugen
# def new1(): ...
### PUBLIC PROPERTIES ###
@property
def command_name(self):
"""
Gets `command_name` input of SendPeakRMS.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.ar(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms.command_name
'/reply'
Returns ugen input.
"""
index = tuple(self._ordered_input_names).index("reply_id") + 1
source_length = int(self._inputs[index])
index += source_length + 2
characters = self._inputs[index:]
characters = [chr(int(_)) for _ in characters]
command_name = "".join(characters)
return command_name
@property
def source(self):
"""
Gets `source` input of SendPeakRMS.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.ar(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms.source
(In.ar()[0], In.ar()[1], In.ar()[2], In.ar()[3])
Returns ugen input.
"""
index = tuple(self._ordered_input_names).index("reply_id") + 1
source_length = int(self._inputs[index])
start = index + 1
stop = start + source_length
return tuple(self._inputs[start:stop])
| import collections
from supriya import CalculationRate
from supriya.synthdefs import UGen
class SendPeakRMS(UGen):
"""
Tracks peak and power of a signal for GUI applications.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.kr(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms
SendPeakRMS.kr()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Utility UGens"
_default_channel_count = 0
_ordered_input_names = collections.OrderedDict(
[("reply_rate", 20), ("peak_lag", 3), ("reply_id", -1)]
)
_unexpanded_argument_names = ("source",)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
command_name="/reply",
peak_lag=3,
reply_id=-1,
reply_rate=20,
source=None,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
peak_lag=peak_lag,
reply_id=reply_id,
reply_rate=reply_rate,
)
command_name = str(command_name)
if not isinstance(source, collections.Sequence):
source = (source,)
self._configure_input("source", len(source))
for input_ in source:
self._configure_input("source", input_)
self._configure_input("command_name", len(command_name))
for character in command_name:
self._configure_input("label", ord(character))
### PUBLIC METHODS ###
@classmethod
def ar(
cls, command_name="/reply", peak_lag=3, reply_id=-1, reply_rate=20, source=None
):
"""
Constructs an audio-rate SendPeakRMS.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.ar(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms
SendPeakRMS.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_single(
calculation_rate=calculation_rate,
command_name=command_name,
peak_lag=peak_lag,
reply_id=reply_id,
reply_rate=reply_rate,
source=source,
)
return ugen
@classmethod
def kr(
cls, command_name="/reply", peak_lag=3, reply_id=-1, reply_rate=20, source=None
):
"""
Constructs a control-rate SendPeakRMS.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.kr(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms
SendPeakRMS.kr()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.CONTROL
ugen = cls._new_single(
calculation_rate=calculation_rate,
command_name=command_name,
peak_lag=peak_lag,
reply_id=reply_id,
reply_rate=reply_rate,
source=source,
)
return ugen
# def new1(): ...
### PUBLIC PROPERTIES ###
@property
def command_name(self):
"""
Gets `command_name` input of SendPeakRMS.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.ar(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms.command_name
'/reply'
Returns ugen input.
"""
index = tuple(self._ordered_input_names).index("reply_id") + 1
source_length = int(self._inputs[index])
index += source_length + 2
characters = self._inputs[index:]
characters = [chr(int(_)) for _ in characters]
command_name = "".join(characters)
return command_name
@property
def source(self):
"""
Gets `source` input of SendPeakRMS.
::
>>> source = supriya.ugens.In.ar(channel_count=4)
>>> send_peak_rms = supriya.ugens.SendPeakRMS.ar(
... command_name='/reply',
... peak_lag=3,
... reply_id=-1,
... reply_rate=20,
... source=source,
... )
>>> send_peak_rms.source
(In.ar()[0], In.ar()[1], In.ar()[2], In.ar()[3])
Returns ugen input.
"""
index = tuple(self._ordered_input_names).index("reply_id") + 1
source_length = int(self._inputs[index])
start = index + 1
stop = start + source_length
return tuple(self._inputs[start:stop]) | en | 0.519038 | Tracks peak and power of a signal for GUI applications. :: >>> source = supriya.ugens.In.ar(channel_count=4) >>> send_peak_rms = supriya.ugens.SendPeakRMS.kr( ... command_name='/reply', ... peak_lag=3, ... reply_id=-1, ... reply_rate=20, ... source=source, ... ) >>> send_peak_rms SendPeakRMS.kr() ### CLASS VARIABLES ### ### INITIALIZER ### ### PUBLIC METHODS ### Constructs an audio-rate SendPeakRMS. :: >>> source = supriya.ugens.In.ar(channel_count=4) >>> send_peak_rms = supriya.ugens.SendPeakRMS.ar( ... command_name='/reply', ... peak_lag=3, ... reply_id=-1, ... reply_rate=20, ... source=source, ... ) >>> send_peak_rms SendPeakRMS.ar() Returns ugen graph. Constructs a control-rate SendPeakRMS. :: >>> source = supriya.ugens.In.ar(channel_count=4) >>> send_peak_rms = supriya.ugens.SendPeakRMS.kr( ... command_name='/reply', ... peak_lag=3, ... reply_id=-1, ... reply_rate=20, ... source=source, ... ) >>> send_peak_rms SendPeakRMS.kr() Returns ugen graph. # def new1(): ... ### PUBLIC PROPERTIES ### Gets `command_name` input of SendPeakRMS. :: >>> source = supriya.ugens.In.ar(channel_count=4) >>> send_peak_rms = supriya.ugens.SendPeakRMS.ar( ... command_name='/reply', ... peak_lag=3, ... reply_id=-1, ... reply_rate=20, ... source=source, ... ) >>> send_peak_rms.command_name '/reply' Returns ugen input. Gets `source` input of SendPeakRMS. :: >>> source = supriya.ugens.In.ar(channel_count=4) >>> send_peak_rms = supriya.ugens.SendPeakRMS.ar( ... command_name='/reply', ... peak_lag=3, ... reply_id=-1, ... reply_rate=20, ... source=source, ... ) >>> send_peak_rms.source (In.ar()[0], In.ar()[1], In.ar()[2], In.ar()[3]) Returns ugen input. | 2.417664 | 2 |
moco/training_tools/evaluator.py | stanfordmlgroup/MedAug | 3 | 6617842 | <reponame>stanfordmlgroup/MedAug
import argparse
import os
import random
import time
import warnings
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from sklearn.metrics import roc_auc_score
from scipy.special import softmax
from .meters import AverageMeter
from .meters import ProgressMeter
from .combiner import detach_tensor
'''
def pred_accuracy(output, target, k):
"""Computes the accuracy over the k top predictions for the specified values of k"""
output = detach_tensor(output)
target = detach_tensor(target)
batch_size = target.size(0)
argsorted_out = np.argsort(output)[:,-k:]
return np.asarray(np.any(argsorted_y.T == target, axis=0).mean(dtype='f')),
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res[0] # Seems like we only want the 1st
'''
def decorator_detach_tensor(function):
def wrapper(*args, **kwargs):
# TODO Find a simple way to handle this business ...
# If is eval, or if fast debug, or
# is train and not heavy, or is train and heavy
output = detach_tensor(args[0])
target = detach_tensor(args[1])
args = args[2:]
result = function(output, target, *args, **kwargs)
return result
return wrapper
@decorator_detach_tensor
def topk_acc(output, target, k):
"""Computes the accuracy over the k top predictions for the specified values of k"""
argsorted_out = np.argsort(output)[:,-k:]
matching = np.asarray(np.any(argsorted_out.T == target, axis=0))
return matching.mean(dtype='f')
@decorator_detach_tensor
def compute_auc_binary(output, target):
#assuming output and target are all vectors for binary case
try:
o = softmax(output, axis=1)
auc = roc_auc_score(target, o[:,1])
except:
return -1
return auc
class Evaluator:
def __init__(self, model, loss_func, metrics, loaders, args):
self.model = model
self.loss_func = loss_func
self.metrics = metrics
self.loaders = loaders
self.args = args
self.metric_best_vals = {metric: 0 for metric in self.metrics}
def evaluate(self, eval_type, epoch):
print(f'==> Evaluation for {eval_type}, epoch {epoch}')
loader = self.loaders[eval_type]
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
metric_meters = {metric: AverageMeter(metric, self.metrics[metric]['format']) \
for metric in self.metrics}
list_meters = [metric_meters[m] for m in metric_meters]
progress = ProgressMeter(
len(loader),
[batch_time, losses, *list_meters],
prefix=f'{eval_type}@Epoch {epoch}: ')
# switch to evaluate mode
self.model.eval()
all_output = []
all_gt = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(loader):
if self.args.gpu is not None:
images = images.cuda(self.args.gpu, non_blocking=True)
target = target.cuda(self.args.gpu, non_blocking=True)
all_gt.append(target.cpu())
# compute output
output = self.model(images)
all_output.append(output.cpu())
loss = self.loss_func(output, target)
# JBY: For simplicity do losses first
losses.update(loss.item(), images.size(0))
for metric in self.metrics:
args = [output, target, *self.metrics[metric]['args']]
metric_func = globals()[self.metrics[metric]['func']]
result = metric_func(*args)
metric_meters[metric].update(result, images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % self.args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
# print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
# .format(top1=top1, top5=top5))
progress.display(i + 1)
all_output = np.concatenate(all_output)
all_gt = np.concatenate(all_gt)
for metric in self.metrics:
args = [all_output, all_gt, *self.metrics[metric]['args']]
metric_func = globals()[self.metrics[metric]['func']]
result = metric_func(*args)
metric_meters[metric].update(result, images.size(0))
self.metric_best_vals[metric] = max(metric_meters[metric].avg,
self.metric_best_vals[metric])
progress.display(i + 1, summary=True) | import argparse
import os
import random
import time
import warnings
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from sklearn.metrics import roc_auc_score
from scipy.special import softmax
from .meters import AverageMeter
from .meters import ProgressMeter
from .combiner import detach_tensor
'''
def pred_accuracy(output, target, k):
"""Computes the accuracy over the k top predictions for the specified values of k"""
output = detach_tensor(output)
target = detach_tensor(target)
batch_size = target.size(0)
argsorted_out = np.argsort(output)[:,-k:]
return np.asarray(np.any(argsorted_y.T == target, axis=0).mean(dtype='f')),
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res[0] # Seems like we only want the 1st
'''
def decorator_detach_tensor(function):
def wrapper(*args, **kwargs):
# TODO Find a simple way to handle this business ...
# If is eval, or if fast debug, or
# is train and not heavy, or is train and heavy
output = detach_tensor(args[0])
target = detach_tensor(args[1])
args = args[2:]
result = function(output, target, *args, **kwargs)
return result
return wrapper
@decorator_detach_tensor
def topk_acc(output, target, k):
"""Computes the accuracy over the k top predictions for the specified values of k"""
argsorted_out = np.argsort(output)[:,-k:]
matching = np.asarray(np.any(argsorted_out.T == target, axis=0))
return matching.mean(dtype='f')
@decorator_detach_tensor
def compute_auc_binary(output, target):
#assuming output and target are all vectors for binary case
try:
o = softmax(output, axis=1)
auc = roc_auc_score(target, o[:,1])
except:
return -1
return auc
class Evaluator:
def __init__(self, model, loss_func, metrics, loaders, args):
self.model = model
self.loss_func = loss_func
self.metrics = metrics
self.loaders = loaders
self.args = args
self.metric_best_vals = {metric: 0 for metric in self.metrics}
def evaluate(self, eval_type, epoch):
print(f'==> Evaluation for {eval_type}, epoch {epoch}')
loader = self.loaders[eval_type]
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
metric_meters = {metric: AverageMeter(metric, self.metrics[metric]['format']) \
for metric in self.metrics}
list_meters = [metric_meters[m] for m in metric_meters]
progress = ProgressMeter(
len(loader),
[batch_time, losses, *list_meters],
prefix=f'{eval_type}@Epoch {epoch}: ')
# switch to evaluate mode
self.model.eval()
all_output = []
all_gt = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(loader):
if self.args.gpu is not None:
images = images.cuda(self.args.gpu, non_blocking=True)
target = target.cuda(self.args.gpu, non_blocking=True)
all_gt.append(target.cpu())
# compute output
output = self.model(images)
all_output.append(output.cpu())
loss = self.loss_func(output, target)
# JBY: For simplicity do losses first
losses.update(loss.item(), images.size(0))
for metric in self.metrics:
args = [output, target, *self.metrics[metric]['args']]
metric_func = globals()[self.metrics[metric]['func']]
result = metric_func(*args)
metric_meters[metric].update(result, images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % self.args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
# print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
# .format(top1=top1, top5=top5))
progress.display(i + 1)
all_output = np.concatenate(all_output)
all_gt = np.concatenate(all_gt)
for metric in self.metrics:
args = [all_output, all_gt, *self.metrics[metric]['args']]
metric_func = globals()[self.metrics[metric]['func']]
result = metric_func(*args)
metric_meters[metric].update(result, images.size(0))
self.metric_best_vals[metric] = max(metric_meters[metric].avg,
self.metric_best_vals[metric])
progress.display(i + 1, summary=True) | en | 0.556565 | def pred_accuracy(output, target, k):
"""Computes the accuracy over the k top predictions for the specified values of k"""
output = detach_tensor(output)
target = detach_tensor(target)
batch_size = target.size(0)
argsorted_out = np.argsort(output)[:,-k:]
return np.asarray(np.any(argsorted_y.T == target, axis=0).mean(dtype='f')),
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res[0] # Seems like we only want the 1st # TODO Find a simple way to handle this business ... # If is eval, or if fast debug, or # is train and not heavy, or is train and heavy Computes the accuracy over the k top predictions for the specified values of k #assuming output and target are all vectors for binary case # switch to evaluate mode # compute output # JBY: For simplicity do losses first # measure elapsed time # TODO: this should also be done with the ProgressMeter # print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}' # .format(top1=top1, top5=top5)) | 1.868216 | 2 |
rclone_sa_magic.py | taylorhill0413/gdrivelone | 49 | 6617843 | <filename>rclone_sa_magic.py
# auto rclone
#
# Author Telegram https://t.me/CodyDoby
# Inbox <EMAIL>
#
# can copy from
# - [x] publicly shared folder to Team Drive
# - [x] Team Drive to Team Drive
# - [ ] publicly shared folder to publicly shared folder (with write privilege)
# - [ ] Team Drive to publicly shared folder
# `python3 .\rclone_sa_magic.py -s SourceID -d DestinationID -dp DestinationPathName -b 10`
#
# - [x] local to Team Drive
# - [ ] local to private folder
# - [ ] private folder to any (think service accounts cannot do anything about private folder)
#
from __future__ import print_function
import argparse
import glob
import json
import os, io
import platform
import subprocess
import sys
import time
import distutils.spawn
from signal import signal, SIGINT
# =================modify here=================
logfile = "log_rclone.txt" # log file: tail -f log_rclone.txt
PID = 0
# parameters for this script
SIZE_GB_MAX = 735 # if one account has already copied 735GB, switch to next account
CNT_DEAD_RETRY = 100 # if there is no files be copied for 100 times, switch to next account
CNT_SA_EXIT = 3 # if continually switch account for 3 times stop script
# change it when u know what are u doing
# paramters for rclone.
# If TPSLIMITxTRANSFERS is too big, will cause 404 user rate limit error,
# especially for tasks with a lot of small files
TPSLIMIT = 3
TRANSFERS = 3
# =================modify here=================
def is_windows():
return platform.system() == 'Windows'
def handler(signal_received, frame):
global PID
if is_windows():
kill_cmd = 'taskkill /PID {} /F'.format(PID)
else:
kill_cmd = "kill -9 {}".format(PID)
try:
print("\n" + " " * 20 + " {}".format(time.strftime("%H:%M:%S")))
subprocess.check_call(kill_cmd, shell=True)
except:
pass
sys.exit(0)
def parse_args():
parser = argparse.ArgumentParser(description="Copy from source (local/publicly shared drive/Team Drive/) "
"to destination (publicly shared drive/Team Drive).")
parser.add_argument('-s', '--source_id', type=str,
help='the id of source. Team Drive id or publicly shared folder id')
parser.add_argument('-d', '--destination_id', type=str, required=True,
help='the id of destination. Team Drive id or publicly shared folder id')
parser.add_argument('-sp', '--source_path', type=str, default="",
help='the folder path of source. In Google Drive or local.')
parser.add_argument('-dp', '--destination_path', type=str, default="",
help='the folder path of destination. In Google Drive.')
# if there are some special symbols in source path, please use this
# path id (publicly shared folder or folder inside team drive)
parser.add_argument('-spi', '--source_path_id', type=str, default="",
help='the folder path id (rather than name) of source. In Google Drive.')
parser.add_argument('-sa', '--service_account', type=str, default="accounts",
help='the folder path of json files for service accounts.')
parser.add_argument('-cp', '--check_path', action="store_true",
help='if check src/dst path or not.')
parser.add_argument('-p', '--port', type=int, default=5572,
help='the port to run rclone rc. set it to different one if you want to run other instance.')
parser.add_argument('-b', '--begin_sa_id', type=int, default=1,
help='the begin id of sa for source')
parser.add_argument('-e', '--end_sa_id', type=int, default=600,
help='the end id of sa for destination')
parser.add_argument('-c', '--rclone_config_file', type=str,
help='config file path of rclone')
parser.add_argument('-test', '--test_only', action="store_true",
help='for test: make rclone print some more information.')
parser.add_argument('-t', '--dry_run', action="store_true",
help='for test: make rclone dry-run.')
parser.add_argument('--disable_list_r', action="store_true",
help='for debug. do not use this.')
parser.add_argument('--crypt', action="store_true",
help='for test: crypt remote destination.')
parser.add_argument('--cache', action="store_true",
help="for test: cache the remote destination.")
args = parser.parse_args()
return args
def gen_rclone_cfg(args):
sa_files = glob.glob(os.path.join(args.service_account, '*.json'))
output_of_config_file = './rclone.conf'
if len(sa_files) == 0:
sys.exit('No json files found in ./{}'.format(args.service_account))
with open(output_of_config_file, 'w') as fp:
for i, filename in enumerate(sa_files):
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(dir_path, filename)
filename = filename.replace(os.sep, '/')
# For source
if args.source_id:
if len(args.source_id) == 33:
folder_or_team_drive_src = 'root_folder_id'
elif len(args.source_id) == 19:
folder_or_team_drive_src = 'team_drive'
else:
sys.exit('Wrong length of team_drive_id or publicly shared root_folder_id')
text_to_write = "[{}{:03d}]\n" \
"type = drive\n" \
"scope = drive\n" \
"service_account_file = {}\n" \
"{} = {}\n".format('src', i + 1, filename, folder_or_team_drive_src, args.source_id)
# use path id instead path name
if args.source_path_id:
# for team drive only
if len(args.source_id) == 19:
if len(args.source_path_id) == 33:
text_to_write += 'root_folder_id = {}\n'.format(args.source_path_id)
else:
sys.exit('Wrong length of source_path_id')
else:
sys.exit('For publicly shared folder please do not set -spi flag')
text_to_write += "\n"
try:
fp.write(text_to_write)
except:
sys.exit("failed to write {} to {}".format(args.source_id, output_of_config_file))
else:
pass
# For destination
if len(args.destination_id) == 33:
folder_or_team_drive_dst = 'root_folder_id'
elif len(args.destination_id) == 19:
folder_or_team_drive_dst = 'team_drive'
else:
sys.exit('Wrong length of team_drive_id or publicly shared root_folder_id')
try:
fp.write('[{}{:03d}]\n'
'type = drive\n'
'scope = drive\n'
'service_account_file = {}\n'
'{} = {}\n\n'.format('dst', i + 1, filename, folder_or_team_drive_dst, args.destination_id))
except:
sys.exit("failed to write {} to {}".format(args.destination_id, output_of_config_file))
# For crypt destination
if args.crypt:
remote_name = '{}{:03d}'.format('dst', i + 1)
try:
fp.write('[{}_crypt]\n'
'type = crypt\n'
'remote = {}:\n'
'filename_encryption = standard\n'
'password = <PASSWORD>'
'directory_name_encryption = true\n\n'.format(remote_name, remote_name))
except:
sys.exit("failed to write {} to {}".format(args.destination_id, output_of_config_file))
# For cache destination
if args.cache:
remote_name = '{}{:03d}'.format('dst', i + 1)
try:
fp.write('[{}_cache]\n'
'type = cache\n'
'remote = {}:\n'
'chunk_total_size = 1G\n\n'.format(remote_name, remote_name))
except:
sys.exit("failed to write {} to {}".format(args.destination_id, output_of_config_file))
return output_of_config_file, i
def print_during(time_start):
time_stop = time.time()
hours, rem = divmod((time_stop - time_start), 3600)
minutes, sec = divmod(rem, 60)
print("Elapsed Time: {:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), sec))
def check_rclone_program():
# promote if user has not install rclone
rclone_prog = 'rclone'
if is_windows():
rclone_prog += ".exe"
ret = distutils.spawn.find_executable(rclone_prog)
if ret is None:
sys.exit("Please install rclone firstly: https://rclone.org/downloads/")
return ret
def check_path(path):
try:
ret = subprocess.check_output('rclone --config {} --disable ListR size \"{}\"'.format('rclone.conf', path),
shell=True)
print('It is okay:\n{}'.format(ret.decode('utf-8').replace('\0', '')))
except subprocess.SubprocessError as error:
sys.exit(str(error))
def main():
signal(SIGINT, handler)
# if rclone is not installed, quit directly
ret = check_rclone_program()
print("rclone is detected: {}".format(ret))
args = parse_args()
id = args.begin_sa_id
end_id = args.end_sa_id
config_file = args.rclone_config_file
if config_file is None:
print('generating rclone config file.')
config_file, end_id = gen_rclone_cfg(args)
print('rclone config file generated.')
else:
return print('not supported yet.')
pass
# need parse labels from config files
time_start = time.time()
print("Start: {}".format(time.strftime("%H:%M:%S")))
cnt_acc_error = 0
while id <= end_id + 1:
if id == end_id + 1:
break
# id = 1
with io.open('current_sa.txt', 'w', encoding='utf-8') as fp:
fp.write(str(id) + '\n')
src_label = "src" + "{0:03d}".format(id) + ":"
dst_label = "dst" + "{0:03d}".format(id) + ":"
if args.crypt:
dst_label = "dst" + "{0:03d}_crypt".format(id) + ":"
if args.cache:
dst_label = "dst" + "{0:03d}_cache".format(id) + ":"
src_full_path = src_label + args.source_path
if args.source_id is None:
src_full_path = args.source_path
dst_full_path = dst_label + args.destination_path
if args.destination_id is None:
dst_full_path = args.destination_path
if args.test_only:
print('\nsrc full path\n', src_full_path)
print('\ndst full path\n', dst_full_path, '\n')
if args.check_path and id == args.begin_sa_id:
print("Please wait. Checking source path...")
check_path(src_full_path)
print("Please wait. Checking destination path...")
check_path(dst_full_path)
# =================cmd to run=================
rclone_cmd = "rclone --config {} copy ".format(config_file)
if args.dry_run:
rclone_cmd += "--dry-run "
# --fast-list is default adopted in the latest rclone
rclone_cmd += "--drive-server-side-across-configs --rc --rc-addr=\"localhost:{}\" -vv --ignore-existing ".format(args.port)
rclone_cmd += "--tpslimit {} --transfers {} --drive-chunk-size 32M ".format(TPSLIMIT, TRANSFERS)
if args.disable_list_r:
rclone_cmd += "--disable ListR "
rclone_cmd += "--drive-acknowledge-abuse --log-file={} \"{}\" \"{}\"".format(logfile, src_full_path,
dst_full_path)
if not is_windows():
rclone_cmd = rclone_cmd + " &"
else:
rclone_cmd = "start /b " + rclone_cmd
# =================cmd to run=================
print(rclone_cmd)
try:
subprocess.check_call(rclone_cmd, shell=True)
print(">> Let us go {} {}".format(dst_label, time.strftime("%H:%M:%S")))
time.sleep(10)
except subprocess.SubprocessError as error:
return print("error: " + str(error))
cnt_error = 0
cnt_dead_retry = 0
size_bytes_done_before = 0
cnt_acc_sucess = 0
already_start = False
try:
response = subprocess.check_output('rclone rc --rc-addr="localhost:{}" core/pid'.format(args.port), shell=True)
pid = json.loads(response.decode('utf-8').replace('\0', ''))['pid']
if args.test_only: print('\npid is: {}\n'.format(pid))
global PID
PID = int(pid)
except subprocess.SubprocessError as error:
pass
while True:
rc_cmd = 'rclone rc --rc-addr="localhost:{}" core/stats'.format(format(args.port))
try:
response = subprocess.check_output(rc_cmd, shell=True)
cnt_acc_sucess += 1
cnt_error = 0
# if there is a long time waiting, this will be easily satisfied, so check if it is started using
# already_started flag
if already_start and cnt_acc_sucess >= 9:
cnt_acc_error = 0
cnt_acc_sucess = 0
if args.test_only: print(
"total 9 times success. the cnt_acc_error is reset to {}\n".format(cnt_acc_error))
except subprocess.SubprocessError as error:
# continually ...
cnt_error = cnt_error + 1
cnt_acc_error = cnt_acc_error + 1
if cnt_error >= 3:
cnt_acc_sucess = 0
if args.test_only: print(
"total 3 times failure. the cnt_acc_sucess is reset to {}\n".format(cnt_acc_sucess))
print('No rclone task detected (possibly done for this '
'account). ({}/3)'.format(int(cnt_acc_error / cnt_error)))
# Regard continually exit as *all done*.
if cnt_acc_error >= 9:
print('All done (3/3).')
print_during(time_start)
return
break
continue
response_processed = response.decode('utf-8').replace('\0', '')
response_processed_json = json.loads(response_processed)
size_bytes_done = int(response_processed_json['bytes'])
checks_done = int(response_processed_json['checks'])
size_GB_done = int(size_bytes_done * 9.31322e-10)
speed_now = float(int(response_processed_json['speed']) * 9.31322e-10 * 1024)
# try:
# print(json.loads(response.decode('utf-8')))
# except:
# print("have some encoding problem to print info")
if already_start:
print("%s %dGB Done @ %fMB/s | checks: %d files" % (dst_label, size_GB_done, speed_now, checks_done), end="\r")
else:
print("%s reading source/destination | checks: %d files" % (dst_label, checks_done), end="\r")
# continually no ...
if size_bytes_done - size_bytes_done_before == 0:
if already_start:
cnt_dead_retry += 1
if args.test_only:
print('\nsize_bytes_done', size_bytes_done)
print('size_bytes_done_before', size_bytes_done_before)
print("No. No size increase after job started.")
else:
cnt_dead_retry = 0
if args.test_only: print("\nOk. I think the job has started")
already_start = True
size_bytes_done_before = size_bytes_done
# Stop by error (403, etc) info
if size_GB_done >= SIZE_GB_MAX or cnt_dead_retry >= CNT_DEAD_RETRY:
if is_windows():
# kill_cmd = 'taskkill /IM "rclone.exe" /F'
kill_cmd = 'taskkill /PID {} /F'.format(PID)
else:
kill_cmd = "kill -9 {}".format(PID)
print("\n" + " " * 20 + " {}".format(time.strftime("%H:%M:%S")))
try:
subprocess.check_call(kill_cmd, shell=True)
print('\n')
except:
if args.test_only: print("\nFailed to kill.")
pass
# =================Finish it=================
if cnt_dead_retry >= CNT_DEAD_RETRY:
try:
cnt_exit += 1
except:
cnt_exit = 1
if args.test_only: print(
"1 more time for long time waiting. the cnt_exit is added to {}\n".format(cnt_exit))
else:
# clear cnt if there is one time
cnt_exit = 0
if args.test_only: print("1 time sucess. the cnt_exit is reset to {}\n".format(cnt_exit))
# Regard continually exit as *all done*.
if cnt_exit >= CNT_SA_EXIT:
print_during(time_start)
# exit directly rather than switch to next account.
print('All Done.')
return
# =================Finish it=================
break
time.sleep(2)
id = id + 1
print_during(time_start)
if __name__ == "__main__":
main() | <filename>rclone_sa_magic.py
# auto rclone
#
# Author Telegram https://t.me/CodyDoby
# Inbox <EMAIL>
#
# can copy from
# - [x] publicly shared folder to Team Drive
# - [x] Team Drive to Team Drive
# - [ ] publicly shared folder to publicly shared folder (with write privilege)
# - [ ] Team Drive to publicly shared folder
# `python3 .\rclone_sa_magic.py -s SourceID -d DestinationID -dp DestinationPathName -b 10`
#
# - [x] local to Team Drive
# - [ ] local to private folder
# - [ ] private folder to any (think service accounts cannot do anything about private folder)
#
from __future__ import print_function
import argparse
import glob
import json
import os, io
import platform
import subprocess
import sys
import time
import distutils.spawn
from signal import signal, SIGINT
# =================modify here=================
logfile = "log_rclone.txt" # log file: tail -f log_rclone.txt
PID = 0
# parameters for this script
SIZE_GB_MAX = 735 # if one account has already copied 735GB, switch to next account
CNT_DEAD_RETRY = 100 # if there is no files be copied for 100 times, switch to next account
CNT_SA_EXIT = 3 # if continually switch account for 3 times stop script
# change it when u know what are u doing
# paramters for rclone.
# If TPSLIMITxTRANSFERS is too big, will cause 404 user rate limit error,
# especially for tasks with a lot of small files
TPSLIMIT = 3
TRANSFERS = 3
# =================modify here=================
def is_windows():
return platform.system() == 'Windows'
def handler(signal_received, frame):
global PID
if is_windows():
kill_cmd = 'taskkill /PID {} /F'.format(PID)
else:
kill_cmd = "kill -9 {}".format(PID)
try:
print("\n" + " " * 20 + " {}".format(time.strftime("%H:%M:%S")))
subprocess.check_call(kill_cmd, shell=True)
except:
pass
sys.exit(0)
def parse_args():
parser = argparse.ArgumentParser(description="Copy from source (local/publicly shared drive/Team Drive/) "
"to destination (publicly shared drive/Team Drive).")
parser.add_argument('-s', '--source_id', type=str,
help='the id of source. Team Drive id or publicly shared folder id')
parser.add_argument('-d', '--destination_id', type=str, required=True,
help='the id of destination. Team Drive id or publicly shared folder id')
parser.add_argument('-sp', '--source_path', type=str, default="",
help='the folder path of source. In Google Drive or local.')
parser.add_argument('-dp', '--destination_path', type=str, default="",
help='the folder path of destination. In Google Drive.')
# if there are some special symbols in source path, please use this
# path id (publicly shared folder or folder inside team drive)
parser.add_argument('-spi', '--source_path_id', type=str, default="",
help='the folder path id (rather than name) of source. In Google Drive.')
parser.add_argument('-sa', '--service_account', type=str, default="accounts",
help='the folder path of json files for service accounts.')
parser.add_argument('-cp', '--check_path', action="store_true",
help='if check src/dst path or not.')
parser.add_argument('-p', '--port', type=int, default=5572,
help='the port to run rclone rc. set it to different one if you want to run other instance.')
parser.add_argument('-b', '--begin_sa_id', type=int, default=1,
help='the begin id of sa for source')
parser.add_argument('-e', '--end_sa_id', type=int, default=600,
help='the end id of sa for destination')
parser.add_argument('-c', '--rclone_config_file', type=str,
help='config file path of rclone')
parser.add_argument('-test', '--test_only', action="store_true",
help='for test: make rclone print some more information.')
parser.add_argument('-t', '--dry_run', action="store_true",
help='for test: make rclone dry-run.')
parser.add_argument('--disable_list_r', action="store_true",
help='for debug. do not use this.')
parser.add_argument('--crypt', action="store_true",
help='for test: crypt remote destination.')
parser.add_argument('--cache', action="store_true",
help="for test: cache the remote destination.")
args = parser.parse_args()
return args
def gen_rclone_cfg(args):
sa_files = glob.glob(os.path.join(args.service_account, '*.json'))
output_of_config_file = './rclone.conf'
if len(sa_files) == 0:
sys.exit('No json files found in ./{}'.format(args.service_account))
with open(output_of_config_file, 'w') as fp:
for i, filename in enumerate(sa_files):
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(dir_path, filename)
filename = filename.replace(os.sep, '/')
# For source
if args.source_id:
if len(args.source_id) == 33:
folder_or_team_drive_src = 'root_folder_id'
elif len(args.source_id) == 19:
folder_or_team_drive_src = 'team_drive'
else:
sys.exit('Wrong length of team_drive_id or publicly shared root_folder_id')
text_to_write = "[{}{:03d}]\n" \
"type = drive\n" \
"scope = drive\n" \
"service_account_file = {}\n" \
"{} = {}\n".format('src', i + 1, filename, folder_or_team_drive_src, args.source_id)
# use path id instead path name
if args.source_path_id:
# for team drive only
if len(args.source_id) == 19:
if len(args.source_path_id) == 33:
text_to_write += 'root_folder_id = {}\n'.format(args.source_path_id)
else:
sys.exit('Wrong length of source_path_id')
else:
sys.exit('For publicly shared folder please do not set -spi flag')
text_to_write += "\n"
try:
fp.write(text_to_write)
except:
sys.exit("failed to write {} to {}".format(args.source_id, output_of_config_file))
else:
pass
# For destination
if len(args.destination_id) == 33:
folder_or_team_drive_dst = 'root_folder_id'
elif len(args.destination_id) == 19:
folder_or_team_drive_dst = 'team_drive'
else:
sys.exit('Wrong length of team_drive_id or publicly shared root_folder_id')
try:
fp.write('[{}{:03d}]\n'
'type = drive\n'
'scope = drive\n'
'service_account_file = {}\n'
'{} = {}\n\n'.format('dst', i + 1, filename, folder_or_team_drive_dst, args.destination_id))
except:
sys.exit("failed to write {} to {}".format(args.destination_id, output_of_config_file))
# For crypt destination
if args.crypt:
remote_name = '{}{:03d}'.format('dst', i + 1)
try:
fp.write('[{}_crypt]\n'
'type = crypt\n'
'remote = {}:\n'
'filename_encryption = standard\n'
'password = <PASSWORD>'
'directory_name_encryption = true\n\n'.format(remote_name, remote_name))
except:
sys.exit("failed to write {} to {}".format(args.destination_id, output_of_config_file))
# For cache destination
if args.cache:
remote_name = '{}{:03d}'.format('dst', i + 1)
try:
fp.write('[{}_cache]\n'
'type = cache\n'
'remote = {}:\n'
'chunk_total_size = 1G\n\n'.format(remote_name, remote_name))
except:
sys.exit("failed to write {} to {}".format(args.destination_id, output_of_config_file))
return output_of_config_file, i
def print_during(time_start):
time_stop = time.time()
hours, rem = divmod((time_stop - time_start), 3600)
minutes, sec = divmod(rem, 60)
print("Elapsed Time: {:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), sec))
def check_rclone_program():
# promote if user has not install rclone
rclone_prog = 'rclone'
if is_windows():
rclone_prog += ".exe"
ret = distutils.spawn.find_executable(rclone_prog)
if ret is None:
sys.exit("Please install rclone firstly: https://rclone.org/downloads/")
return ret
def check_path(path):
try:
ret = subprocess.check_output('rclone --config {} --disable ListR size \"{}\"'.format('rclone.conf', path),
shell=True)
print('It is okay:\n{}'.format(ret.decode('utf-8').replace('\0', '')))
except subprocess.SubprocessError as error:
sys.exit(str(error))
def main():
signal(SIGINT, handler)
# if rclone is not installed, quit directly
ret = check_rclone_program()
print("rclone is detected: {}".format(ret))
args = parse_args()
id = args.begin_sa_id
end_id = args.end_sa_id
config_file = args.rclone_config_file
if config_file is None:
print('generating rclone config file.')
config_file, end_id = gen_rclone_cfg(args)
print('rclone config file generated.')
else:
return print('not supported yet.')
pass
# need parse labels from config files
time_start = time.time()
print("Start: {}".format(time.strftime("%H:%M:%S")))
cnt_acc_error = 0
while id <= end_id + 1:
if id == end_id + 1:
break
# id = 1
with io.open('current_sa.txt', 'w', encoding='utf-8') as fp:
fp.write(str(id) + '\n')
src_label = "src" + "{0:03d}".format(id) + ":"
dst_label = "dst" + "{0:03d}".format(id) + ":"
if args.crypt:
dst_label = "dst" + "{0:03d}_crypt".format(id) + ":"
if args.cache:
dst_label = "dst" + "{0:03d}_cache".format(id) + ":"
src_full_path = src_label + args.source_path
if args.source_id is None:
src_full_path = args.source_path
dst_full_path = dst_label + args.destination_path
if args.destination_id is None:
dst_full_path = args.destination_path
if args.test_only:
print('\nsrc full path\n', src_full_path)
print('\ndst full path\n', dst_full_path, '\n')
if args.check_path and id == args.begin_sa_id:
print("Please wait. Checking source path...")
check_path(src_full_path)
print("Please wait. Checking destination path...")
check_path(dst_full_path)
# =================cmd to run=================
rclone_cmd = "rclone --config {} copy ".format(config_file)
if args.dry_run:
rclone_cmd += "--dry-run "
# --fast-list is default adopted in the latest rclone
rclone_cmd += "--drive-server-side-across-configs --rc --rc-addr=\"localhost:{}\" -vv --ignore-existing ".format(args.port)
rclone_cmd += "--tpslimit {} --transfers {} --drive-chunk-size 32M ".format(TPSLIMIT, TRANSFERS)
if args.disable_list_r:
rclone_cmd += "--disable ListR "
rclone_cmd += "--drive-acknowledge-abuse --log-file={} \"{}\" \"{}\"".format(logfile, src_full_path,
dst_full_path)
if not is_windows():
rclone_cmd = rclone_cmd + " &"
else:
rclone_cmd = "start /b " + rclone_cmd
# =================cmd to run=================
print(rclone_cmd)
try:
subprocess.check_call(rclone_cmd, shell=True)
print(">> Let us go {} {}".format(dst_label, time.strftime("%H:%M:%S")))
time.sleep(10)
except subprocess.SubprocessError as error:
return print("error: " + str(error))
cnt_error = 0
cnt_dead_retry = 0
size_bytes_done_before = 0
cnt_acc_sucess = 0
already_start = False
try:
response = subprocess.check_output('rclone rc --rc-addr="localhost:{}" core/pid'.format(args.port), shell=True)
pid = json.loads(response.decode('utf-8').replace('\0', ''))['pid']
if args.test_only: print('\npid is: {}\n'.format(pid))
global PID
PID = int(pid)
except subprocess.SubprocessError as error:
pass
while True:
rc_cmd = 'rclone rc --rc-addr="localhost:{}" core/stats'.format(format(args.port))
try:
response = subprocess.check_output(rc_cmd, shell=True)
cnt_acc_sucess += 1
cnt_error = 0
# if there is a long time waiting, this will be easily satisfied, so check if it is started using
# already_started flag
if already_start and cnt_acc_sucess >= 9:
cnt_acc_error = 0
cnt_acc_sucess = 0
if args.test_only: print(
"total 9 times success. the cnt_acc_error is reset to {}\n".format(cnt_acc_error))
except subprocess.SubprocessError as error:
# continually ...
cnt_error = cnt_error + 1
cnt_acc_error = cnt_acc_error + 1
if cnt_error >= 3:
cnt_acc_sucess = 0
if args.test_only: print(
"total 3 times failure. the cnt_acc_sucess is reset to {}\n".format(cnt_acc_sucess))
print('No rclone task detected (possibly done for this '
'account). ({}/3)'.format(int(cnt_acc_error / cnt_error)))
# Regard continually exit as *all done*.
if cnt_acc_error >= 9:
print('All done (3/3).')
print_during(time_start)
return
break
continue
response_processed = response.decode('utf-8').replace('\0', '')
response_processed_json = json.loads(response_processed)
size_bytes_done = int(response_processed_json['bytes'])
checks_done = int(response_processed_json['checks'])
size_GB_done = int(size_bytes_done * 9.31322e-10)
speed_now = float(int(response_processed_json['speed']) * 9.31322e-10 * 1024)
# try:
# print(json.loads(response.decode('utf-8')))
# except:
# print("have some encoding problem to print info")
if already_start:
print("%s %dGB Done @ %fMB/s | checks: %d files" % (dst_label, size_GB_done, speed_now, checks_done), end="\r")
else:
print("%s reading source/destination | checks: %d files" % (dst_label, checks_done), end="\r")
# continually no ...
if size_bytes_done - size_bytes_done_before == 0:
if already_start:
cnt_dead_retry += 1
if args.test_only:
print('\nsize_bytes_done', size_bytes_done)
print('size_bytes_done_before', size_bytes_done_before)
print("No. No size increase after job started.")
else:
cnt_dead_retry = 0
if args.test_only: print("\nOk. I think the job has started")
already_start = True
size_bytes_done_before = size_bytes_done
# Stop by error (403, etc) info
if size_GB_done >= SIZE_GB_MAX or cnt_dead_retry >= CNT_DEAD_RETRY:
if is_windows():
# kill_cmd = 'taskkill /IM "rclone.exe" /F'
kill_cmd = 'taskkill /PID {} /F'.format(PID)
else:
kill_cmd = "kill -9 {}".format(PID)
print("\n" + " " * 20 + " {}".format(time.strftime("%H:%M:%S")))
try:
subprocess.check_call(kill_cmd, shell=True)
print('\n')
except:
if args.test_only: print("\nFailed to kill.")
pass
# =================Finish it=================
if cnt_dead_retry >= CNT_DEAD_RETRY:
try:
cnt_exit += 1
except:
cnt_exit = 1
if args.test_only: print(
"1 more time for long time waiting. the cnt_exit is added to {}\n".format(cnt_exit))
else:
# clear cnt if there is one time
cnt_exit = 0
if args.test_only: print("1 time sucess. the cnt_exit is reset to {}\n".format(cnt_exit))
# Regard continually exit as *all done*.
if cnt_exit >= CNT_SA_EXIT:
print_during(time_start)
# exit directly rather than switch to next account.
print('All Done.')
return
# =================Finish it=================
break
time.sleep(2)
id = id + 1
print_during(time_start)
if __name__ == "__main__":
main() | en | 0.767731 | # auto rclone # # Author Telegram https://t.me/CodyDoby # Inbox <EMAIL> # # can copy from # - [x] publicly shared folder to Team Drive # - [x] Team Drive to Team Drive # - [ ] publicly shared folder to publicly shared folder (with write privilege) # - [ ] Team Drive to publicly shared folder # `python3 .\rclone_sa_magic.py -s SourceID -d DestinationID -dp DestinationPathName -b 10` # # - [x] local to Team Drive # - [ ] local to private folder # - [ ] private folder to any (think service accounts cannot do anything about private folder) # # =================modify here================= # log file: tail -f log_rclone.txt # parameters for this script # if one account has already copied 735GB, switch to next account # if there is no files be copied for 100 times, switch to next account # if continually switch account for 3 times stop script # change it when u know what are u doing # paramters for rclone. # If TPSLIMITxTRANSFERS is too big, will cause 404 user rate limit error, # especially for tasks with a lot of small files # =================modify here================= # if there are some special symbols in source path, please use this # path id (publicly shared folder or folder inside team drive) # For source # use path id instead path name # for team drive only # For destination # For crypt destination # For cache destination # promote if user has not install rclone # if rclone is not installed, quit directly # need parse labels from config files # id = 1 # =================cmd to run================= # --fast-list is default adopted in the latest rclone # =================cmd to run================= # if there is a long time waiting, this will be easily satisfied, so check if it is started using # already_started flag # continually ... # Regard continually exit as *all done*. # try: # print(json.loads(response.decode('utf-8'))) # except: # print("have some encoding problem to print info") # continually no ... # Stop by error (403, etc) info # kill_cmd = 'taskkill /IM "rclone.exe" /F' # =================Finish it================= # clear cnt if there is one time # Regard continually exit as *all done*. # exit directly rather than switch to next account. # =================Finish it================= | 2.054266 | 2 |
main/CompuCellPythonTutorial/cellsort_engulfment_2D/Simulation/cellsort_engulfment_2D.py | JulianoGianlupi/nh-cc3d-4x-base-tool | 0 | 6617844 | <gh_stars>0
from cc3d import CompuCellSetup
from .cellsort_engulfment_2D_steppables import CellInitializer
CompuCellSetup.register_steppable(steppable=CellInitializer(frequency=1))
CompuCellSetup.run()
| from cc3d import CompuCellSetup
from .cellsort_engulfment_2D_steppables import CellInitializer
CompuCellSetup.register_steppable(steppable=CellInitializer(frequency=1))
CompuCellSetup.run() | none | 1 | 1.452409 | 1 | |
web_crawler_ptt_gamesale.py | yin-xuanHuang/web_crawler_ptt_gamesale | 1 | 6617845 | # -*- coding: utf-8 -*-
# author: <EMAIL>
# version: 1.0
# github: https://github.com/yin-xuanHuang/web_crawler_ptt_gamesale
# license: MIT license
import requests
import sqlite3
import re
from bs4 import BeautifulSoup
#from time import sleep
# 文章內容處理
def catch_content_return_list(content_list, sellORwanted):
# 找出目標內容,並擷取
find_all_body_list = content_list
content_wanted_index = 0
for body_string in find_all_body_list:
if u'【' in body_string: break
content_wanted_index = content_wanted_index + 1
try:
content_wanted = find_all_body_list[content_wanted_index]
except:
return []
while True:
try:
if find_all_body_list[content_wanted_index+1][0] == u"※": break
except:
break
content_wanted = content_wanted + find_all_body_list[content_wanted_index + 1]
content_wanted_index = content_wanted_index +1
# 當標題非售非徵
if sellORwanted != u'售' and sellORwanted != u'徵':
if u'徵' in content_wanted:
sellORwanted = u'徵'
else:
sellORwanted = u'售'
# 消除特定字元 TODO: 多遊戲同一文章之間隔區隔問題
body_content = content_wanted
body_content = body_content.replace("\n", "")
body_content = body_content.replace(" ", "")
body_content = body_content.replace(u'★', "")
body_content = body_content.replace(u' ', "")
# 消除可能的"範例文字"
# e.x.←可直接使用,無須編輯
if u"←可直接使用,無須編輯" in body_content:
body_content = body_content.replace(u"←可直接使用,無須編輯", "")
# e.x.(限制級、輔15級、輔12級、保護級、普遍級)
if u"(限制級、輔15級、輔12級、保護級、普遍級)" in body_content:
body_content = body_content.replace(u"(限制級、輔15級、輔12級、保護級、普遍級)", "")
# e.x.←分級提示
if u"←分級提示" in body_content:
body_content = body_content.replace(u"←分級提示", "")
# e.x.(未照左方分級者違規)
if u"(未照左方分級者違規)" in body_content:
body_content = body_content.replace(u"(未照左方分級者違規)", "")
# e.x.(徵求者、配件等無分級者可免填,販售國內未代理之遊戲,請直接填限制級即可。)
if u"(徵求者、配件等無分級者可免填,販售國內未代理之遊戲,請直接填限制級即可。)" in body_content:
body_content = body_content.replace(u"(徵求者、配件等無分級者可免填,販售國內未代理之遊戲,請直接填限制級即可。)", "")
# e.x.(遊戲有多語系版本必填,海外版請註明是哪個國家。)
if u"(遊戲有多語系版本必填,海外版請註明是哪個國家。)" in body_content:
body_content = body_content.replace(u"(遊戲有多語系版本必填,海外版請註明是哪個國家。)", "")
body_content = body_content.replace(u'。', "")
body_content = body_content[:-2] + u'【'
# 處理文章
try:
the_colon = body_content.index(u':')
the_brackets = body_content.index(u'【')
next_brackets = body_content.index(u'【', the_brackets + 1)
content_dic = dict()
while True:
dic_name = body_content[the_brackets + 1:the_colon - 1]
content_dic[dic_name] = body_content[the_colon + 1:next_brackets]
if u':' not in body_content[the_colon + 1:] or u'【' not in body_content[next_brackets + 1:]: break
the_brackets = next_brackets
next_brackets = body_content.index(u'【', next_brackets + 1)
the_colon = body_content.index(u':', the_brackets)
except:
return []
# 排序內容,回傳有排序的序列
# TODO : "交換" 選項
content_list = list()
i = 0
while i < 9:
if i == 0: # 售or徵求 0
content_list.append(sellORwanted)
i = i + 1
check = False
for item in content_dic:
if u"名" in item and i == 1: # 物品名稱 1
content_list.append(content_dic.get(item," "))
check = True
break
elif u"分" in item and i == 2: # 遊戲分級 2
content_list.append(content_dic.get(item," "))
check = True
break
elif u"語" in item and i == 3: # 語系版本 3
content_list.append(content_dic.get(item," "))
check = True
break
elif u"價" in item and i == 4: # 徵求價 or 售價 4
content_list.append(content_dic.get(item," "))
check = True
break
elif u"易" in item and i == 5: # 交易方式 5
content_list.append(content_dic.get(item," "))
check = True
break
elif u"保" in item and i == 6: # 保存狀況 6
content_list.append(content_dic.get(item," "))
check = True
break
elif u"地" in item and i == 7: # 地區 7
content_list.append(content_dic.get(item," "))
check = True
break
elif u"附" in item and i == 8: # 附註 8
content_list.append(content_dic.get(item," "))
check = True
break
if not check:
content_list.append(" ")
i = i + 1
return content_list
# 主程式開始
# connect database
conn = sqlite3.connect('ptt_gamesale.sqlite')
cur = conn.cursor()
# TODO 適當資料型態
# set database
# author, ok
# date, ok
# gamestation, ok
# title_id, titleID ok
# sellORwanted 0 ok
# 物品名稱 1 item_name ok
# 遊戲分級 2 law_limited ok
# 語系版本 3 language ok
# 徵求價 or 售價 4 price ok
# 交易方式 5 howtotrade ok
# 保存狀況 6 status ok
# 地區 7 address ok
# 附註 8 message ok
cur.executescript('''
CREATE TABLE IF NOT EXISTS Content(
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
titleID TEXT UNIQUE,
author_id INTEGER,
sellORwanted_id INTEGER,
gamestation_id INTEGER,
date TEXT,
item_name TEXT,
law_limited_id INTEGER,
language_id INTEGER,
price TEXT,
howtotrade TEXT,
status TEXT,
address TEXT,
message TEXT
);
CREATE TABLE IF NOT EXISTS Author (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS SellORwanted (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS Gamestation (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS Law_limited (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS Language (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS Fail_title (
title_id TEXT NOT NULL PRIMARY KEY UNIQUE
);
CREATE TABLE IF NOT EXISTS End_page (
pre_page TEXT NOT NULL PRIMARY KEY UNIQUE
);
''')
ptt_url = "https://www.ptt.cc/bbs/Gamesale/index.html"
#from_page = raw_input("輸入開始頁數(index3311.html),不需要就直接按Enter:")
#if from_page == "":
# res = requests.get(ptt_url)
#else:
# try:
# res = requests.get(ptt_url[:32] + from_page, "html.parser")
# except:
# print "輸入錯誤:", from_page
# exit()
#print res.encoding
res = requests.get(ptt_url)
soup = BeautifulSoup(res.text, "html.parser")
#print soup.prettify().encode('utf-8')
#title_list_per_page = list()
# 處理的文章條數
count = 0
# 處理的頁數
page = 0
# 問題文章數
fail_count = 0
# 發現重複處理的文章
#find_title_id = False
# 是否使用database 抓取上次寫入的頁數
is_read_database = False
# 遊戲主機白名單
gamestation_list = ["PC","XONE","X360",u"其他","WII","NDS","GBA","XBOX","PS","NGC","DC","DS"]
while True: # page loop
try:
page = page + 1
# page上的每一條文章 list row_list
row_list = soup.select("body div.title a")
row_list.reverse()
# 抓page上一頁的網址
pre_page = str(soup.find_all("a", text="‹ 上頁")[0].get('href'))
# 處理每條文章開始
for row in row_list: # title loop
# 文章的連結與其識別碼 str title_url title_id
title_id = row.get('href').encode('utf-8')
title_url = ptt_url[0:18] + title_id
title_id = title_url[32:-5]
# TODO 設定第二判斷(有爬完一次為第二次判斷,以免被覆蓋)
# 判斷文章識別碼是否已經在資料庫中
cur.execute('SELECT titleID FROM Content WHERE titleID =?',(title_id,))
r = cur.fetchone()
if r is not None :
#find_title_id = True
if not is_read_database:
cur.execute('SELECT pre_page FROM End_page')
try:
pre_page_database = cur.fetchone()[0]
pre_page = pre_page_database
is_read_database = True
except:
pass
#print title_id, "is already in database."
#print "title_url=", title_url
continue
# 擷取文章名
title_content = row.get_text()
# 徵求文或販賣文
try:
sellORwanted = re.search("\]\s*([\S+])\s*", title_content).group(1)
except:
cur.execute('''INSERT OR IGNORE INTO Fail_title (title_id)
VALUES ( ? )''', ( title_id, ) )
print title_id, "skiped.(Fail)"
fail_count = fail_count + 1
continue
# page上的遊戲主機 str gamestation
if '[' not in title_content or ']' not in title_content: continue
pre_bracket = title_content.index('[')
back_bracket = title_content.index(']')
gamestation = title_content[pre_bracket + 1: back_bracket].upper()
gamestation = gamestation.rstrip().lstrip()
# 文章的遊戲主機 是否在白名單中
in_gamestation_list = False
for station in gamestation_list:
if station in gamestation:
in_gamestation_list = True
if not in_gamestation_list:
print title_id, "skiped.(Not in white list)"
continue
# 爬進文章中 TODO:
# 網址會壞掉嗎?例如剛(被)砍的文章.ans:Yes(try and except?) 利用回應時間來解決?
res = requests.get(title_url)
soup = BeautifulSoup(res.text, "html.parser")
# 擷取文章資訊
title_head = soup.select(".article-meta-value")
try:
author = title_head[0].text
date = title_head[3].text
date = date[4:16] + date[19:]
except:
cur.execute('''INSERT OR IGNORE INTO Fail_title (title_id)
VALUES ( ? )''', ( title_id, ) )
print title_id, "skiped.(Fail)"
fail_count = fail_count + 1
continue
# 找出目標內容,並擷取
content_list = catch_content_return_list(soup.body.find_all(string=True), sellORwanted)
if content_list == []:
cur.execute('''INSERT OR IGNORE INTO Fail_title (title_id)
VALUES ( ? )''', ( title_id, ) )
print title_id, "skiped.(Fail)"
fail_count = fail_count + 1
continue
# print "---------------------------------------------------"
# print "author=", author
# print "date =", date
# print "GS =", gamestation
# print "ID =", title_id
# print "售或徵求 =", content_list[0]
# print "物品名稱 =", content_list[1]
# print "遊戲分級 =", content_list[2]
# print "語系版本 =", content_list[3]
# print "目標價錢 =", content_list[4]
# print "交易方式 =", content_list[5]
# print "保存狀況 =", content_list[6]
# print "所在地區 =", content_list[7]
# print "其他附註 =", content_list[8]
# print "---------------------------------------------------"
# database 存入
cur.execute('''INSERT OR IGNORE INTO Language (name)
VALUES ( ? )''', ( content_list[3], ) )
cur.execute('SELECT id FROM Language WHERE name = ? ', (content_list[3], ))
language_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Law_limited (name)
VALUES ( ? )''', ( content_list[2], ) )
cur.execute('SELECT id FROM Law_limited WHERE name = ? ', (content_list[2], ))
law_limited_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Gamestation (name)
VALUES ( ? )''', ( gamestation, ) )
cur.execute('SELECT id FROM Gamestation WHERE name = ? ', (gamestation, ))
gamestation_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO SellORwanted (name)
VALUES ( ? )''', ( content_list[0], ) )
cur.execute('SELECT id FROM SellORwanted WHERE name = ? ', (content_list[0], ))
sellORwanted_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Author (name)
VALUES ( ? )''', ( author, ) )
cur.execute('SELECT id FROM Author WHERE name = ? ', (author, ))
author_id = cur.fetchone()[0]
cur.execute('''
INSERT OR REPLACE INTO Content
( titleID, author_id, sellORwanted_id, gamestation_id, date, item_name,
law_limited_id, language_id, price, howtotrade, status, address, message)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )''',
( title_id, author_id, sellORwanted_id, gamestation_id, date, content_list[1],
law_limited_id, language_id, content_list[4], content_list[5], content_list[6], content_list[7], content_list[8] ) )
conn.commit()
print title_id, "success!"
is_read_database = False
count = count + 1
# 發現重複處理的文章後處置
#if find_title_id: break
# want be saved to database
pre_page_save = pre_page
# 爬進上一頁,直到第一頁
if pre_page == "None":
cur.execute('''DELETE FROM End_page''')
cur.execute('''INSERT OR REPLACE INTO End_page(pre_page)
VALUES ( ? )''', ("None", ) )
conn.commit()
print "pre_page=", pre_page
break
pre_page = ptt_url[:18] + pre_page
res = requests.get(pre_page)
soup = BeautifulSoup(res.text, "html.parser")
print "DO", pre_page
except:
print ""
print "Program interrupted!!!"
print "NOW TitleID:", title_id
print "NOW Page url:", pre_page_save
print "NOW titles:", count
cur.execute('''DELETE FROM End_page''')
cur.execute('''INSERT OR REPLACE INTO End_page(pre_page)
VALUES ( ? )''', ( pre_page_save, ) )
conn.commit()
print "Record pre_page to database."
exit()
print "Total pages:", page
print "Total titles:", count
print "Total fail pages", fail_count
print "Done!"
| # -*- coding: utf-8 -*-
# author: <EMAIL>
# version: 1.0
# github: https://github.com/yin-xuanHuang/web_crawler_ptt_gamesale
# license: MIT license
import requests
import sqlite3
import re
from bs4 import BeautifulSoup
#from time import sleep
# 文章內容處理
def catch_content_return_list(content_list, sellORwanted):
# 找出目標內容,並擷取
find_all_body_list = content_list
content_wanted_index = 0
for body_string in find_all_body_list:
if u'【' in body_string: break
content_wanted_index = content_wanted_index + 1
try:
content_wanted = find_all_body_list[content_wanted_index]
except:
return []
while True:
try:
if find_all_body_list[content_wanted_index+1][0] == u"※": break
except:
break
content_wanted = content_wanted + find_all_body_list[content_wanted_index + 1]
content_wanted_index = content_wanted_index +1
# 當標題非售非徵
if sellORwanted != u'售' and sellORwanted != u'徵':
if u'徵' in content_wanted:
sellORwanted = u'徵'
else:
sellORwanted = u'售'
# 消除特定字元 TODO: 多遊戲同一文章之間隔區隔問題
body_content = content_wanted
body_content = body_content.replace("\n", "")
body_content = body_content.replace(" ", "")
body_content = body_content.replace(u'★', "")
body_content = body_content.replace(u' ', "")
# 消除可能的"範例文字"
# e.x.←可直接使用,無須編輯
if u"←可直接使用,無須編輯" in body_content:
body_content = body_content.replace(u"←可直接使用,無須編輯", "")
# e.x.(限制級、輔15級、輔12級、保護級、普遍級)
if u"(限制級、輔15級、輔12級、保護級、普遍級)" in body_content:
body_content = body_content.replace(u"(限制級、輔15級、輔12級、保護級、普遍級)", "")
# e.x.←分級提示
if u"←分級提示" in body_content:
body_content = body_content.replace(u"←分級提示", "")
# e.x.(未照左方分級者違規)
if u"(未照左方分級者違規)" in body_content:
body_content = body_content.replace(u"(未照左方分級者違規)", "")
# e.x.(徵求者、配件等無分級者可免填,販售國內未代理之遊戲,請直接填限制級即可。)
if u"(徵求者、配件等無分級者可免填,販售國內未代理之遊戲,請直接填限制級即可。)" in body_content:
body_content = body_content.replace(u"(徵求者、配件等無分級者可免填,販售國內未代理之遊戲,請直接填限制級即可。)", "")
# e.x.(遊戲有多語系版本必填,海外版請註明是哪個國家。)
if u"(遊戲有多語系版本必填,海外版請註明是哪個國家。)" in body_content:
body_content = body_content.replace(u"(遊戲有多語系版本必填,海外版請註明是哪個國家。)", "")
body_content = body_content.replace(u'。', "")
body_content = body_content[:-2] + u'【'
# 處理文章
try:
the_colon = body_content.index(u':')
the_brackets = body_content.index(u'【')
next_brackets = body_content.index(u'【', the_brackets + 1)
content_dic = dict()
while True:
dic_name = body_content[the_brackets + 1:the_colon - 1]
content_dic[dic_name] = body_content[the_colon + 1:next_brackets]
if u':' not in body_content[the_colon + 1:] or u'【' not in body_content[next_brackets + 1:]: break
the_brackets = next_brackets
next_brackets = body_content.index(u'【', next_brackets + 1)
the_colon = body_content.index(u':', the_brackets)
except:
return []
# 排序內容,回傳有排序的序列
# TODO : "交換" 選項
content_list = list()
i = 0
while i < 9:
if i == 0: # 售or徵求 0
content_list.append(sellORwanted)
i = i + 1
check = False
for item in content_dic:
if u"名" in item and i == 1: # 物品名稱 1
content_list.append(content_dic.get(item," "))
check = True
break
elif u"分" in item and i == 2: # 遊戲分級 2
content_list.append(content_dic.get(item," "))
check = True
break
elif u"語" in item and i == 3: # 語系版本 3
content_list.append(content_dic.get(item," "))
check = True
break
elif u"價" in item and i == 4: # 徵求價 or 售價 4
content_list.append(content_dic.get(item," "))
check = True
break
elif u"易" in item and i == 5: # 交易方式 5
content_list.append(content_dic.get(item," "))
check = True
break
elif u"保" in item and i == 6: # 保存狀況 6
content_list.append(content_dic.get(item," "))
check = True
break
elif u"地" in item and i == 7: # 地區 7
content_list.append(content_dic.get(item," "))
check = True
break
elif u"附" in item and i == 8: # 附註 8
content_list.append(content_dic.get(item," "))
check = True
break
if not check:
content_list.append(" ")
i = i + 1
return content_list
# 主程式開始
# connect database
conn = sqlite3.connect('ptt_gamesale.sqlite')
cur = conn.cursor()
# TODO 適當資料型態
# set database
# author, ok
# date, ok
# gamestation, ok
# title_id, titleID ok
# sellORwanted 0 ok
# 物品名稱 1 item_name ok
# 遊戲分級 2 law_limited ok
# 語系版本 3 language ok
# 徵求價 or 售價 4 price ok
# 交易方式 5 howtotrade ok
# 保存狀況 6 status ok
# 地區 7 address ok
# 附註 8 message ok
cur.executescript('''
CREATE TABLE IF NOT EXISTS Content(
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
titleID TEXT UNIQUE,
author_id INTEGER,
sellORwanted_id INTEGER,
gamestation_id INTEGER,
date TEXT,
item_name TEXT,
law_limited_id INTEGER,
language_id INTEGER,
price TEXT,
howtotrade TEXT,
status TEXT,
address TEXT,
message TEXT
);
CREATE TABLE IF NOT EXISTS Author (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS SellORwanted (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS Gamestation (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS Law_limited (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS Language (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE IF NOT EXISTS Fail_title (
title_id TEXT NOT NULL PRIMARY KEY UNIQUE
);
CREATE TABLE IF NOT EXISTS End_page (
pre_page TEXT NOT NULL PRIMARY KEY UNIQUE
);
''')
ptt_url = "https://www.ptt.cc/bbs/Gamesale/index.html"
#from_page = raw_input("輸入開始頁數(index3311.html),不需要就直接按Enter:")
#if from_page == "":
# res = requests.get(ptt_url)
#else:
# try:
# res = requests.get(ptt_url[:32] + from_page, "html.parser")
# except:
# print "輸入錯誤:", from_page
# exit()
#print res.encoding
res = requests.get(ptt_url)
soup = BeautifulSoup(res.text, "html.parser")
#print soup.prettify().encode('utf-8')
#title_list_per_page = list()
# 處理的文章條數
count = 0
# 處理的頁數
page = 0
# 問題文章數
fail_count = 0
# 發現重複處理的文章
#find_title_id = False
# 是否使用database 抓取上次寫入的頁數
is_read_database = False
# 遊戲主機白名單
gamestation_list = ["PC","XONE","X360",u"其他","WII","NDS","GBA","XBOX","PS","NGC","DC","DS"]
while True: # page loop
try:
page = page + 1
# page上的每一條文章 list row_list
row_list = soup.select("body div.title a")
row_list.reverse()
# 抓page上一頁的網址
pre_page = str(soup.find_all("a", text="‹ 上頁")[0].get('href'))
# 處理每條文章開始
for row in row_list: # title loop
# 文章的連結與其識別碼 str title_url title_id
title_id = row.get('href').encode('utf-8')
title_url = ptt_url[0:18] + title_id
title_id = title_url[32:-5]
# TODO 設定第二判斷(有爬完一次為第二次判斷,以免被覆蓋)
# 判斷文章識別碼是否已經在資料庫中
cur.execute('SELECT titleID FROM Content WHERE titleID =?',(title_id,))
r = cur.fetchone()
if r is not None :
#find_title_id = True
if not is_read_database:
cur.execute('SELECT pre_page FROM End_page')
try:
pre_page_database = cur.fetchone()[0]
pre_page = pre_page_database
is_read_database = True
except:
pass
#print title_id, "is already in database."
#print "title_url=", title_url
continue
# 擷取文章名
title_content = row.get_text()
# 徵求文或販賣文
try:
sellORwanted = re.search("\]\s*([\S+])\s*", title_content).group(1)
except:
cur.execute('''INSERT OR IGNORE INTO Fail_title (title_id)
VALUES ( ? )''', ( title_id, ) )
print title_id, "skiped.(Fail)"
fail_count = fail_count + 1
continue
# page上的遊戲主機 str gamestation
if '[' not in title_content or ']' not in title_content: continue
pre_bracket = title_content.index('[')
back_bracket = title_content.index(']')
gamestation = title_content[pre_bracket + 1: back_bracket].upper()
gamestation = gamestation.rstrip().lstrip()
# 文章的遊戲主機 是否在白名單中
in_gamestation_list = False
for station in gamestation_list:
if station in gamestation:
in_gamestation_list = True
if not in_gamestation_list:
print title_id, "skiped.(Not in white list)"
continue
# 爬進文章中 TODO:
# 網址會壞掉嗎?例如剛(被)砍的文章.ans:Yes(try and except?) 利用回應時間來解決?
res = requests.get(title_url)
soup = BeautifulSoup(res.text, "html.parser")
# 擷取文章資訊
title_head = soup.select(".article-meta-value")
try:
author = title_head[0].text
date = title_head[3].text
date = date[4:16] + date[19:]
except:
cur.execute('''INSERT OR IGNORE INTO Fail_title (title_id)
VALUES ( ? )''', ( title_id, ) )
print title_id, "skiped.(Fail)"
fail_count = fail_count + 1
continue
# 找出目標內容,並擷取
content_list = catch_content_return_list(soup.body.find_all(string=True), sellORwanted)
if content_list == []:
cur.execute('''INSERT OR IGNORE INTO Fail_title (title_id)
VALUES ( ? )''', ( title_id, ) )
print title_id, "skiped.(Fail)"
fail_count = fail_count + 1
continue
# print "---------------------------------------------------"
# print "author=", author
# print "date =", date
# print "GS =", gamestation
# print "ID =", title_id
# print "售或徵求 =", content_list[0]
# print "物品名稱 =", content_list[1]
# print "遊戲分級 =", content_list[2]
# print "語系版本 =", content_list[3]
# print "目標價錢 =", content_list[4]
# print "交易方式 =", content_list[5]
# print "保存狀況 =", content_list[6]
# print "所在地區 =", content_list[7]
# print "其他附註 =", content_list[8]
# print "---------------------------------------------------"
# database 存入
cur.execute('''INSERT OR IGNORE INTO Language (name)
VALUES ( ? )''', ( content_list[3], ) )
cur.execute('SELECT id FROM Language WHERE name = ? ', (content_list[3], ))
language_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Law_limited (name)
VALUES ( ? )''', ( content_list[2], ) )
cur.execute('SELECT id FROM Law_limited WHERE name = ? ', (content_list[2], ))
law_limited_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Gamestation (name)
VALUES ( ? )''', ( gamestation, ) )
cur.execute('SELECT id FROM Gamestation WHERE name = ? ', (gamestation, ))
gamestation_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO SellORwanted (name)
VALUES ( ? )''', ( content_list[0], ) )
cur.execute('SELECT id FROM SellORwanted WHERE name = ? ', (content_list[0], ))
sellORwanted_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Author (name)
VALUES ( ? )''', ( author, ) )
cur.execute('SELECT id FROM Author WHERE name = ? ', (author, ))
author_id = cur.fetchone()[0]
cur.execute('''
INSERT OR REPLACE INTO Content
( titleID, author_id, sellORwanted_id, gamestation_id, date, item_name,
law_limited_id, language_id, price, howtotrade, status, address, message)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )''',
( title_id, author_id, sellORwanted_id, gamestation_id, date, content_list[1],
law_limited_id, language_id, content_list[4], content_list[5], content_list[6], content_list[7], content_list[8] ) )
conn.commit()
print title_id, "success!"
is_read_database = False
count = count + 1
# 發現重複處理的文章後處置
#if find_title_id: break
# want be saved to database
pre_page_save = pre_page
# 爬進上一頁,直到第一頁
if pre_page == "None":
cur.execute('''DELETE FROM End_page''')
cur.execute('''INSERT OR REPLACE INTO End_page(pre_page)
VALUES ( ? )''', ("None", ) )
conn.commit()
print "pre_page=", pre_page
break
pre_page = ptt_url[:18] + pre_page
res = requests.get(pre_page)
soup = BeautifulSoup(res.text, "html.parser")
print "DO", pre_page
except:
print ""
print "Program interrupted!!!"
print "NOW TitleID:", title_id
print "NOW Page url:", pre_page_save
print "NOW titles:", count
cur.execute('''DELETE FROM End_page''')
cur.execute('''INSERT OR REPLACE INTO End_page(pre_page)
VALUES ( ? )''', ( pre_page_save, ) )
conn.commit()
print "Record pre_page to database."
exit()
print "Total pages:", page
print "Total titles:", count
print "Total fail pages", fail_count
print "Done!"
| zh | 0.364321 | # -*- coding: utf-8 -*- # author: <EMAIL> # version: 1.0 # github: https://github.com/yin-xuanHuang/web_crawler_ptt_gamesale # license: MIT license #from time import sleep # 文章內容處理 # 找出目標內容,並擷取 # 當標題非售非徵 # 消除特定字元 TODO: 多遊戲同一文章之間隔區隔問題 # 消除可能的"範例文字" # e.x.←可直接使用,無須編輯 # e.x.(限制級、輔15級、輔12級、保護級、普遍級) # e.x.←分級提示 # e.x.(未照左方分級者違規) # e.x.(徵求者、配件等無分級者可免填,販售國內未代理之遊戲,請直接填限制級即可。) # e.x.(遊戲有多語系版本必填,海外版請註明是哪個國家。) # 處理文章 # 排序內容,回傳有排序的序列 # TODO : "交換" 選項 # 售or徵求 0 # 物品名稱 1 # 遊戲分級 2 # 語系版本 3 # 徵求價 or 售價 4 # 交易方式 5 # 保存狀況 6 # 地區 7 # 附註 8 # 主程式開始 # connect database # TODO 適當資料型態 # set database # author, ok # date, ok # gamestation, ok # title_id, titleID ok # sellORwanted 0 ok # 物品名稱 1 item_name ok # 遊戲分級 2 law_limited ok # 語系版本 3 language ok # 徵求價 or 售價 4 price ok # 交易方式 5 howtotrade ok # 保存狀況 6 status ok # 地區 7 address ok # 附註 8 message ok CREATE TABLE IF NOT EXISTS Content( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, titleID TEXT UNIQUE, author_id INTEGER, sellORwanted_id INTEGER, gamestation_id INTEGER, date TEXT, item_name TEXT, law_limited_id INTEGER, language_id INTEGER, price TEXT, howtotrade TEXT, status TEXT, address TEXT, message TEXT ); CREATE TABLE IF NOT EXISTS Author ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, name TEXT UNIQUE ); CREATE TABLE IF NOT EXISTS SellORwanted ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, name TEXT UNIQUE ); CREATE TABLE IF NOT EXISTS Gamestation ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, name TEXT UNIQUE ); CREATE TABLE IF NOT EXISTS Law_limited ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, name TEXT UNIQUE ); CREATE TABLE IF NOT EXISTS Language ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, name TEXT UNIQUE ); CREATE TABLE IF NOT EXISTS Fail_title ( title_id TEXT NOT NULL PRIMARY KEY UNIQUE ); CREATE TABLE IF NOT EXISTS End_page ( pre_page TEXT NOT NULL PRIMARY KEY UNIQUE ); #from_page = raw_input("輸入開始頁數(index3311.html),不需要就直接按Enter:") #if from_page == "": # res = requests.get(ptt_url) #else: # try: # res = requests.get(ptt_url[:32] + from_page, "html.parser") # except: # print "輸入錯誤:", from_page # exit() #print res.encoding #print soup.prettify().encode('utf-8') #title_list_per_page = list() # 處理的文章條數 # 處理的頁數 # 問題文章數 # 發現重複處理的文章 #find_title_id = False # 是否使用database 抓取上次寫入的頁數 # 遊戲主機白名單 # page loop # page上的每一條文章 list row_list # 抓page上一頁的網址 # 處理每條文章開始 # title loop # 文章的連結與其識別碼 str title_url title_id # TODO 設定第二判斷(有爬完一次為第二次判斷,以免被覆蓋) # 判斷文章識別碼是否已經在資料庫中 #find_title_id = True #print title_id, "is already in database." #print "title_url=", title_url # 擷取文章名 # 徵求文或販賣文 INSERT OR IGNORE INTO Fail_title (title_id) VALUES ( ? ) # page上的遊戲主機 str gamestation # 文章的遊戲主機 是否在白名單中 # 爬進文章中 TODO: # 網址會壞掉嗎?例如剛(被)砍的文章.ans:Yes(try and except?) 利用回應時間來解決? # 擷取文章資訊 INSERT OR IGNORE INTO Fail_title (title_id) VALUES ( ? ) # 找出目標內容,並擷取 INSERT OR IGNORE INTO Fail_title (title_id) VALUES ( ? ) # print "---------------------------------------------------" # print "author=", author # print "date =", date # print "GS =", gamestation # print "ID =", title_id # print "售或徵求 =", content_list[0] # print "物品名稱 =", content_list[1] # print "遊戲分級 =", content_list[2] # print "語系版本 =", content_list[3] # print "目標價錢 =", content_list[4] # print "交易方式 =", content_list[5] # print "保存狀況 =", content_list[6] # print "所在地區 =", content_list[7] # print "其他附註 =", content_list[8] # print "---------------------------------------------------" # database 存入 INSERT OR IGNORE INTO Language (name) VALUES ( ? ) INSERT OR IGNORE INTO Law_limited (name) VALUES ( ? ) INSERT OR IGNORE INTO Gamestation (name) VALUES ( ? ) INSERT OR IGNORE INTO SellORwanted (name) VALUES ( ? ) INSERT OR IGNORE INTO Author (name) VALUES ( ? ) INSERT OR REPLACE INTO Content ( titleID, author_id, sellORwanted_id, gamestation_id, date, item_name, law_limited_id, language_id, price, howtotrade, status, address, message) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) # 發現重複處理的文章後處置 #if find_title_id: break # want be saved to database # 爬進上一頁,直到第一頁 DELETE FROM End_page INSERT OR REPLACE INTO End_page(pre_page) VALUES ( ? ) DELETE FROM End_page INSERT OR REPLACE INTO End_page(pre_page) VALUES ( ? ) | 2.556262 | 3 |
app/backend.py | stweil/fess-site-search | 22 | 6617846 | import os
import hashlib
import random
from flask import redirect, url_for, flash
from werkzeug.utils import secure_filename
from .app import app
from .generate_config import generate_config
from .webpack_manager import WebpackManager
def upload(form, file):
if file.filename == '':
return redirect(url_for('generator'))
if file and is_css(file.filename):
base = secure_filename(file.filename)[:-4]
hash_str = rand_hash()
fname = '{}_{}'.format(base, hash_str)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], fname + '.css'))
print('Upload: {}.css'.format(fname))
return run_webpack(fname)
return redirect(url_for('generator'))
def wizard(form):
hash_str = form2hash(form)
fname = 'wizard_{}'.format(hash_str)
if js_exists(fname):
return redirect(url_for('demo', fname=fname))
elif generate_config(form, fname):
return run_webpack(fname)
else:
return redirect(url_for('generator'))
def run_webpack(fname):
wp_manager = WebpackManager()
if wp_manager.run(app.config['UPLOAD_FOLDER'], app.instance_path, fname):
return redirect(url_for('demo', fname=fname))
else:
flash('Please try again')
return redirect(url_for('generator'))
def rand_hash():
hashstr = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).hexdigest()
return hashstr[:10]
def form2hash(form):
return hashlib.sha256(str(form).encode()).hexdigest()[:10]
def is_css(filename):
if '.' in filename:
ext = filename.rsplit('.', 1)[1].lower()
return ext == 'css'
return False
def is_empty_form(form):
for (k, v) in form.items():
if v:
return False
return True
def js_exists(fname):
jsfile = 'fess-ss-{}.min.js'.format(fname)
path = os.path.join(app.config['DOWNLOAD_FOLDER'], jsfile)
return os.path.exists(path)
| import os
import hashlib
import random
from flask import redirect, url_for, flash
from werkzeug.utils import secure_filename
from .app import app
from .generate_config import generate_config
from .webpack_manager import WebpackManager
def upload(form, file):
if file.filename == '':
return redirect(url_for('generator'))
if file and is_css(file.filename):
base = secure_filename(file.filename)[:-4]
hash_str = rand_hash()
fname = '{}_{}'.format(base, hash_str)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], fname + '.css'))
print('Upload: {}.css'.format(fname))
return run_webpack(fname)
return redirect(url_for('generator'))
def wizard(form):
hash_str = form2hash(form)
fname = 'wizard_{}'.format(hash_str)
if js_exists(fname):
return redirect(url_for('demo', fname=fname))
elif generate_config(form, fname):
return run_webpack(fname)
else:
return redirect(url_for('generator'))
def run_webpack(fname):
wp_manager = WebpackManager()
if wp_manager.run(app.config['UPLOAD_FOLDER'], app.instance_path, fname):
return redirect(url_for('demo', fname=fname))
else:
flash('Please try again')
return redirect(url_for('generator'))
def rand_hash():
hashstr = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).hexdigest()
return hashstr[:10]
def form2hash(form):
return hashlib.sha256(str(form).encode()).hexdigest()[:10]
def is_css(filename):
if '.' in filename:
ext = filename.rsplit('.', 1)[1].lower()
return ext == 'css'
return False
def is_empty_form(form):
for (k, v) in form.items():
if v:
return False
return True
def js_exists(fname):
jsfile = 'fess-ss-{}.min.js'.format(fname)
path = os.path.join(app.config['DOWNLOAD_FOLDER'], jsfile)
return os.path.exists(path)
| none | 1 | 2.353606 | 2 | |
src/impscan/config.py | lmmx/impscan | 0 | 6617847 | <gh_stars>0
__all__ = ["EnvConfig"]
class EnvConfig:
def __init__(self, **kwargs):
self.settings = set()
for k, v in kwargs.items():
self.set_config(k, v)
def set_config(self, setting, value):
setattr(self, setting, value)
self.settings.add(setting)
| __all__ = ["EnvConfig"]
class EnvConfig:
def __init__(self, **kwargs):
self.settings = set()
for k, v in kwargs.items():
self.set_config(k, v)
def set_config(self, setting, value):
setattr(self, setting, value)
self.settings.add(setting) | none | 1 | 2.793683 | 3 | |
oo/pessoa.py | MichelPinho/pythonbirds | 0 | 6617848 | <reponame>MichelPinho/pythonbirds<gh_stars>0
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=44):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
@staticmethod
def metodo_estatico():
return 44
@classmethod
def nome_e_atributos_de_classes(cls):
return f'{cls} - olhos {cls.olhos}'
def cumprimentar(self):
return f'Olá {id(self)}'
if __name__ == '__main__':
renzo = Pessoa(nome='Renzo')
michel = Pessoa(renzo, nome='Michel')
print(Pessoa.cumprimentar(michel))
print(michel.cumprimentar())
print(michel.nome)
print(michel.idade)
for filho in michel.filhos:
print(filho.nome)
print(michel.olhos)
print(renzo.olhos)
print(Pessoa.metodo_estatico(), michel.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classes(), michel.nome_e_atributos_de_classes())
| class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=44):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
@staticmethod
def metodo_estatico():
return 44
@classmethod
def nome_e_atributos_de_classes(cls):
return f'{cls} - olhos {cls.olhos}'
def cumprimentar(self):
return f'Olá {id(self)}'
if __name__ == '__main__':
renzo = Pessoa(nome='Renzo')
michel = Pessoa(renzo, nome='Michel')
print(Pessoa.cumprimentar(michel))
print(michel.cumprimentar())
print(michel.nome)
print(michel.idade)
for filho in michel.filhos:
print(filho.nome)
print(michel.olhos)
print(renzo.olhos)
print(Pessoa.metodo_estatico(), michel.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classes(), michel.nome_e_atributos_de_classes()) | none | 1 | 3.883169 | 4 | |
gen-1-2bytes-wif-v2.py | pbies/bitcoin-tools | 0 | 6617849 | <gh_stars>0
#!/usr/bin/env python3
import base58
import hashlib
import sys
def b58(hex):
return base58.b58encode_check(hex);
def sha256(hex):
return hashlib.sha256(hex).digest();
def to_wif(data):
return b58(bytes.fromhex('80'+sha256(data).hex()));
for i in range(256):
print(b58(bytes.fromhex('80'+sha256(i.to_bytes(1,'big')).hex())).decode('utf-8')+' 0');
for i in range(65536):
print(b58(bytes.fromhex('80'+sha256(i.to_bytes(2,'big')).hex())).decode('utf-8')+' 0');
for i in range(256):
print(b58(bytes.fromhex('80'+sha256(sha256(i.to_bytes(1,'big'))).hex())).decode('utf-8')+' 0');
for i in range(65536):
print(b58(bytes.fromhex('80'+sha256(sha256(i.to_bytes(2,'big'))).hex())).decode('utf-8')+' 0');
| #!/usr/bin/env python3
import base58
import hashlib
import sys
def b58(hex):
return base58.b58encode_check(hex);
def sha256(hex):
return hashlib.sha256(hex).digest();
def to_wif(data):
return b58(bytes.fromhex('80'+sha256(data).hex()));
for i in range(256):
print(b58(bytes.fromhex('80'+sha256(i.to_bytes(1,'big')).hex())).decode('utf-8')+' 0');
for i in range(65536):
print(b58(bytes.fromhex('80'+sha256(i.to_bytes(2,'big')).hex())).decode('utf-8')+' 0');
for i in range(256):
print(b58(bytes.fromhex('80'+sha256(sha256(i.to_bytes(1,'big'))).hex())).decode('utf-8')+' 0');
for i in range(65536):
print(b58(bytes.fromhex('80'+sha256(sha256(i.to_bytes(2,'big'))).hex())).decode('utf-8')+' 0'); | fr | 0.221828 | #!/usr/bin/env python3 | 2.805157 | 3 |
mblogger/generate_data.py | mlatcl/fbp-vs-oop | 6 | 6617850 | <filename>mblogger/generate_data.py
import random
from essential_generators import DocumentGenerator
from mblogger.record_types import *
def generate_requests(n_follow_requests, n_unfollow_requests, user_ids, followers):
requests = []
for _ in range(n_follow_requests):
active, passive = random.sample(user_ids, 2)
follow_request = FollowRequest(active_author=active, passive_author=passive, follow=True)
requests.append(follow_request)
for _ in range(n_unfollow_requests):
followers_record = random.choice(followers)
if len(followers_record.followers) == 0:
# author has empty list of followers
# no unfollow requests this time then
break
active = followers_record.user_id
passive = random.choice(list(followers_record.followers))
unfollow_request = FollowRequest(active_author=active, passive_author=passive, follow=False)
requests.append(unfollow_request)
return requests
def generate_posts(n_new_posts, user_ids, post_id_offset):
gen = DocumentGenerator()
new_posts = []
for _ in range(n_new_posts):
text = gen.sentence()
author_id = random.choice(user_ids)
post_id = post_id_offset + len(new_posts) + 1
post = Post(post_id, author_id, text, timestamp=datetime.now())
new_posts.append(post)
return new_posts
| <filename>mblogger/generate_data.py
import random
from essential_generators import DocumentGenerator
from mblogger.record_types import *
def generate_requests(n_follow_requests, n_unfollow_requests, user_ids, followers):
requests = []
for _ in range(n_follow_requests):
active, passive = random.sample(user_ids, 2)
follow_request = FollowRequest(active_author=active, passive_author=passive, follow=True)
requests.append(follow_request)
for _ in range(n_unfollow_requests):
followers_record = random.choice(followers)
if len(followers_record.followers) == 0:
# author has empty list of followers
# no unfollow requests this time then
break
active = followers_record.user_id
passive = random.choice(list(followers_record.followers))
unfollow_request = FollowRequest(active_author=active, passive_author=passive, follow=False)
requests.append(unfollow_request)
return requests
def generate_posts(n_new_posts, user_ids, post_id_offset):
gen = DocumentGenerator()
new_posts = []
for _ in range(n_new_posts):
text = gen.sentence()
author_id = random.choice(user_ids)
post_id = post_id_offset + len(new_posts) + 1
post = Post(post_id, author_id, text, timestamp=datetime.now())
new_posts.append(post)
return new_posts
| en | 0.879743 | # author has empty list of followers # no unfollow requests this time then | 2.906633 | 3 |