max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
tests/test_sub_bookmark.py | nicolasmartinelli/oomusic | 4 | 6614351 | <filename>tests/test_sub_bookmark.py
# -*- coding: utf-8 -*-
from . import test_sub_common
class TestOomusicSubBookmark(test_sub_common.TestOomusicSubCommon):
def test_00_getBookmarks(self):
"""
Test getBookmarks method
"""
res = self.url_open("/rest/getBookmarks.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, " <bookmarks/>")
self.cleanUp()
def test_10_actionBookmark(self):
"""
Test actionBookmark method
"""
res = self.url_open("/rest/createBookmark.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, "")
res = self.url_open("/rest/deleteBookmark.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, "")
res = self.url_open("/rest/getPlayQueue.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, "")
res = self.url_open("/rest/savePlayQueue.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, "")
self.cleanUp()
| <filename>tests/test_sub_bookmark.py
# -*- coding: utf-8 -*-
from . import test_sub_common
class TestOomusicSubBookmark(test_sub_common.TestOomusicSubCommon):
def test_00_getBookmarks(self):
"""
Test getBookmarks method
"""
res = self.url_open("/rest/getBookmarks.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, " <bookmarks/>")
self.cleanUp()
def test_10_actionBookmark(self):
"""
Test actionBookmark method
"""
res = self.url_open("/rest/createBookmark.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, "")
res = self.url_open("/rest/deleteBookmark.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, "")
res = self.url_open("/rest/getPlayQueue.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, "")
res = self.url_open("/rest/savePlayQueue.view" + self.cred).content.decode("utf-8")
res_clean = "".join(res.split("\n")[2:][:-2])
self.assertEqual(res_clean, "")
self.cleanUp()
| en | 0.523071 | # -*- coding: utf-8 -*- Test getBookmarks method Test actionBookmark method | 2.595669 | 3 |
tests/test_app.py | WoolenSweater/liteapi | 0 | 6614352 | <reponame>WoolenSweater/liteapi
import os
import pytest
import asyncio
from functools import partial
from concurrent.futures import ThreadPoolExecutor
from starlette.testclient import TestClient
from starlette.exceptions import HTTPException
from starlette.middleware.trustedhost import TrustedHostMiddleware
from hius import Hius
from hius.routing import Router, route
from hius.handlers import StaticFiles
from hius.responses import JSONResponse, PlainTextResponse
app = Hius()
app.add_middleware(TrustedHostMiddleware, allowed_hosts=['testserver'])
@app.exception_handler(500)
async def error_500(request, exc):
return JSONResponse({'detail': 'Server Error'}, status_code=500)
@app.exception_handler(405)
async def method_not_allowed(request, exc):
return JSONResponse({'detail': 'Custom message'}, status_code=405)
@app.exception_handler(HTTPException)
async def http_exception(request, exc):
return JSONResponse({'detail': exc.detail}, status_code=exc.status_code)
@app.on_startup()
async def startup(app):
pass
@app.on_shutdown()
def shutdown(app):
pass
@app.on_lifespan()
async def lifespan(app):
yield
@app.route('/sync_func')
def sync_func_homepage(request):
return PlainTextResponse('Hello, world! (SYNC)')
@app.route('/async_func', name='custom_name_for_async')
async def async_func_homepage(request):
return PlainTextResponse('Hello, world! (ASYNC)')
@app.route('/class', methods=['GET', 'POST'])
class Homepage:
def get(self, request):
return PlainTextResponse('Hello, world! (SYNC, GET)')
async def post(self, request):
return PlainTextResponse('Hello, world! (ASYNC, POST)')
users = Router()
@users.route('/')
def all_users_page(request):
return PlainTextResponse('Hello, everyone!')
@users.route('/{username}')
def user_page(request, username: str):
return PlainTextResponse(f'Hello, {username}!')
app.mount('/users', users)
@app.route('/400')
def bad_request(request, username: str):
pass
@app.route('/500')
def runtime_error(request):
raise RuntimeError()
@app.websocket('/ws')
async def websocket_endpoint(ws, name: str = None):
await ws.accept()
await ws.send_text(f'Hello, {name}!')
await ws.close()
# ---
@pytest.fixture
def cli():
return TestClient(app)
@pytest.fixture
def cli_exc():
return TestClient(app, raise_server_exceptions=False)
# ---
def test_baggage():
app['variable'] = 'var'
assert app.baggage == {'variable': 'var'}
assert app['variable'] == 'var'
# ---
def test_url_path_for_func_name():
assert app.url_path_for('sync_func_homepage') == '/sync_func'
def test_url_path_for_custom_name():
assert app.url_path_for('custom_name_for_async') == '/async_func'
def test_url_path_for_mounted():
assert app.url_path_for('user_page', username='alice') == '/users/alice'
def test_sync_func_route(cli):
response = cli.get('/sync_func')
assert response.status_code == 200
assert response.text == 'Hello, world! (SYNC)'
response = cli.head('/sync_func')
assert response.status_code == 200
assert response.text == ""
def test_async_func_route(cli):
response = cli.get('/async_func')
assert response.status_code == 200
assert response.text == 'Hello, world! (ASYNC)'
response = cli.head('/async_func')
assert response.status_code == 200
assert response.text == ''
def test_sync_class_get_route(cli):
response = cli.get('/class')
assert response.status_code == 200
assert response.text == 'Hello, world! (SYNC, GET)'
def test_async_class_post_route(cli):
response = cli.post('/class')
assert response.status_code == 200
assert response.text == 'Hello, world! (ASYNC, POST)'
# ---
def test_mounted_route(cli):
response = cli.get('/users/')
assert response.status_code == 200
assert response.text == 'Hello, everyone!'
def test_mounted_route_path_params(cli):
response = cli.get('/users/hius')
assert response.status_code == 200
assert response.text == 'Hello, hius!'
def test_websocket_route(cli):
with cli.websocket_connect('/ws?name=Alice') as session:
assert session.receive_text() == "Hello, Alice!"
# --
def test_400(cli):
response = cli.get('/400')
assert response.status_code == 400
assert response.json() == [{
'loc': ['username'],
'msg': 'field required',
'type': 'value_error.missing'
}]
def test_404(cli):
response = cli.get('/404')
assert response.status_code == 404
assert response.json() == {'detail': 'Not Found'}
def test_405(cli):
response = cli.post('/sync_func')
assert response.status_code == 405
assert response.json() == {'detail': 'Custom message'}
response = cli.put('/class')
assert response.status_code == 405
assert response.json() == {'detail': 'Custom message'}
def test_500(cli_exc):
response = cli_exc.get('/500')
assert response.status_code == 500
assert response.json() == {'detail': 'Server Error'}
# ---
def test_middleware():
cli = TestClient(app, base_url='http://incorrecthost')
response = cli.get('/sync_func')
assert response.status_code == 400
assert response.text == 'Invalid host header'
def test_app_debug():
app = Hius(debug=True)
@app.route('/')
async def homepage(request):
raise RuntimeError()
cli = TestClient(app, raise_server_exceptions=False)
response = cli.get('/')
assert response.status_code == 500
assert 'RuntimeError' in response.text
assert app.debug
def test_app_mount(tmpdir):
path = os.path.join(tmpdir, 'example.txt')
with open(path, 'w') as file:
file.write('<file content>')
app = Hius()
app.mount('/static', StaticFiles(directory=tmpdir))
cli = TestClient(app)
response = cli.get('/static/example.txt')
assert response.status_code == 200
assert response.text == '<file content>'
response = cli.post('/static/example.txt')
assert response.status_code == 405
assert response.text == 'Method Not Allowed'
def test_app_add_route():
app = Hius()
async def homepage(request):
return PlainTextResponse('Hello, World!')
app.add_route('/', homepage)
cli = TestClient(app)
response = cli.get('/')
assert response.status_code == 200
assert response.text == 'Hello, World!'
def test_app_add_routes():
app = Hius()
async def homepage(request):
return PlainTextResponse('Hello, World!')
app.add_routes([route('/', homepage)])
cli = TestClient(app)
response = cli.get('/')
assert response.status_code == 200
assert response.text == 'Hello, World!'
def test_app_add_websocket_route():
app = Hius()
async def websocket_endpoint(session):
await session.accept()
await session.send_text('Hello, world!')
await session.close()
app.add_websocket('/ws', websocket_endpoint)
cli = TestClient(app)
with cli.websocket_connect('/ws') as session:
text = session.receive_text()
assert text == 'Hello, world!'
# ---
class MultipartPost:
async def post(self, req):
await asyncio.sleep(0.1)
form = await req.form()
file = await form['file'].read()
return PlainTextResponse(file)
def send(cli, data):
return cli.post('/', files={'file': ('test.txt', data)}).content.decode()
def test_multipart_form():
cli = TestClient(Hius(routes=[route('/', MultipartPost)]))
data = sorted(f'data{n}' for n in range(20))
with ThreadPoolExecutor() as pool:
assert sorted(pool.map(partial(send, cli), data)) == data
| import os
import pytest
import asyncio
from functools import partial
from concurrent.futures import ThreadPoolExecutor
from starlette.testclient import TestClient
from starlette.exceptions import HTTPException
from starlette.middleware.trustedhost import TrustedHostMiddleware
from hius import Hius
from hius.routing import Router, route
from hius.handlers import StaticFiles
from hius.responses import JSONResponse, PlainTextResponse
app = Hius()
app.add_middleware(TrustedHostMiddleware, allowed_hosts=['testserver'])
@app.exception_handler(500)
async def error_500(request, exc):
return JSONResponse({'detail': 'Server Error'}, status_code=500)
@app.exception_handler(405)
async def method_not_allowed(request, exc):
return JSONResponse({'detail': 'Custom message'}, status_code=405)
@app.exception_handler(HTTPException)
async def http_exception(request, exc):
return JSONResponse({'detail': exc.detail}, status_code=exc.status_code)
@app.on_startup()
async def startup(app):
pass
@app.on_shutdown()
def shutdown(app):
pass
@app.on_lifespan()
async def lifespan(app):
yield
@app.route('/sync_func')
def sync_func_homepage(request):
return PlainTextResponse('Hello, world! (SYNC)')
@app.route('/async_func', name='custom_name_for_async')
async def async_func_homepage(request):
return PlainTextResponse('Hello, world! (ASYNC)')
@app.route('/class', methods=['GET', 'POST'])
class Homepage:
def get(self, request):
return PlainTextResponse('Hello, world! (SYNC, GET)')
async def post(self, request):
return PlainTextResponse('Hello, world! (ASYNC, POST)')
users = Router()
@users.route('/')
def all_users_page(request):
return PlainTextResponse('Hello, everyone!')
@users.route('/{username}')
def user_page(request, username: str):
return PlainTextResponse(f'Hello, {username}!')
app.mount('/users', users)
@app.route('/400')
def bad_request(request, username: str):
pass
@app.route('/500')
def runtime_error(request):
raise RuntimeError()
@app.websocket('/ws')
async def websocket_endpoint(ws, name: str = None):
await ws.accept()
await ws.send_text(f'Hello, {name}!')
await ws.close()
# ---
@pytest.fixture
def cli():
return TestClient(app)
@pytest.fixture
def cli_exc():
return TestClient(app, raise_server_exceptions=False)
# ---
def test_baggage():
app['variable'] = 'var'
assert app.baggage == {'variable': 'var'}
assert app['variable'] == 'var'
# ---
def test_url_path_for_func_name():
assert app.url_path_for('sync_func_homepage') == '/sync_func'
def test_url_path_for_custom_name():
assert app.url_path_for('custom_name_for_async') == '/async_func'
def test_url_path_for_mounted():
assert app.url_path_for('user_page', username='alice') == '/users/alice'
def test_sync_func_route(cli):
response = cli.get('/sync_func')
assert response.status_code == 200
assert response.text == 'Hello, world! (SYNC)'
response = cli.head('/sync_func')
assert response.status_code == 200
assert response.text == ""
def test_async_func_route(cli):
response = cli.get('/async_func')
assert response.status_code == 200
assert response.text == 'Hello, world! (ASYNC)'
response = cli.head('/async_func')
assert response.status_code == 200
assert response.text == ''
def test_sync_class_get_route(cli):
response = cli.get('/class')
assert response.status_code == 200
assert response.text == 'Hello, world! (SYNC, GET)'
def test_async_class_post_route(cli):
response = cli.post('/class')
assert response.status_code == 200
assert response.text == 'Hello, world! (ASYNC, POST)'
# ---
def test_mounted_route(cli):
response = cli.get('/users/')
assert response.status_code == 200
assert response.text == 'Hello, everyone!'
def test_mounted_route_path_params(cli):
response = cli.get('/users/hius')
assert response.status_code == 200
assert response.text == 'Hello, hius!'
def test_websocket_route(cli):
with cli.websocket_connect('/ws?name=Alice') as session:
assert session.receive_text() == "Hello, Alice!"
# --
def test_400(cli):
response = cli.get('/400')
assert response.status_code == 400
assert response.json() == [{
'loc': ['username'],
'msg': 'field required',
'type': 'value_error.missing'
}]
def test_404(cli):
response = cli.get('/404')
assert response.status_code == 404
assert response.json() == {'detail': 'Not Found'}
def test_405(cli):
response = cli.post('/sync_func')
assert response.status_code == 405
assert response.json() == {'detail': 'Custom message'}
response = cli.put('/class')
assert response.status_code == 405
assert response.json() == {'detail': 'Custom message'}
def test_500(cli_exc):
response = cli_exc.get('/500')
assert response.status_code == 500
assert response.json() == {'detail': 'Server Error'}
# ---
def test_middleware():
cli = TestClient(app, base_url='http://incorrecthost')
response = cli.get('/sync_func')
assert response.status_code == 400
assert response.text == 'Invalid host header'
def test_app_debug():
app = Hius(debug=True)
@app.route('/')
async def homepage(request):
raise RuntimeError()
cli = TestClient(app, raise_server_exceptions=False)
response = cli.get('/')
assert response.status_code == 500
assert 'RuntimeError' in response.text
assert app.debug
def test_app_mount(tmpdir):
path = os.path.join(tmpdir, 'example.txt')
with open(path, 'w') as file:
file.write('<file content>')
app = Hius()
app.mount('/static', StaticFiles(directory=tmpdir))
cli = TestClient(app)
response = cli.get('/static/example.txt')
assert response.status_code == 200
assert response.text == '<file content>'
response = cli.post('/static/example.txt')
assert response.status_code == 405
assert response.text == 'Method Not Allowed'
def test_app_add_route():
app = Hius()
async def homepage(request):
return PlainTextResponse('Hello, World!')
app.add_route('/', homepage)
cli = TestClient(app)
response = cli.get('/')
assert response.status_code == 200
assert response.text == 'Hello, World!'
def test_app_add_routes():
app = Hius()
async def homepage(request):
return PlainTextResponse('Hello, World!')
app.add_routes([route('/', homepage)])
cli = TestClient(app)
response = cli.get('/')
assert response.status_code == 200
assert response.text == 'Hello, World!'
def test_app_add_websocket_route():
app = Hius()
async def websocket_endpoint(session):
await session.accept()
await session.send_text('Hello, world!')
await session.close()
app.add_websocket('/ws', websocket_endpoint)
cli = TestClient(app)
with cli.websocket_connect('/ws') as session:
text = session.receive_text()
assert text == 'Hello, world!'
# ---
class MultipartPost:
async def post(self, req):
await asyncio.sleep(0.1)
form = await req.form()
file = await form['file'].read()
return PlainTextResponse(file)
def send(cli, data):
return cli.post('/', files={'file': ('test.txt', data)}).content.decode()
def test_multipart_form():
cli = TestClient(Hius(routes=[route('/', MultipartPost)]))
data = sorted(f'data{n}' for n in range(20))
with ThreadPoolExecutor() as pool:
assert sorted(pool.map(partial(send, cli), data)) == data | en | 0.970589 | # --- # --- # --- # --- # -- # --- # --- | 2.090705 | 2 |
backend/sensors/Sensor.py | MarioBartolome/GII_0_17.02_SNSI | 1 | 6614353 | <filename>backend/sensors/Sensor.py<gh_stars>1-10
from Bluetin_Echo import Echo
class Sensor:
def __init__(self, trigger_pin, echo_pin, angle):
self._trigger_pin = trigger_pin
self._echo_pin = echo_pin
self._angle = angle
self._sr04 = Echo(self._trigger_pin, self._echo_pin)
def getDistance(self, samples = 1):
return self._sr04.read('cm', samples)
def getAngle(self):
return self._angle
def getTriggerPin(self):
return self._trigger_pin
def getEchoPin(self):
return self._echo_pin
def stop(self):
self._sr04.stop()
| <filename>backend/sensors/Sensor.py<gh_stars>1-10
from Bluetin_Echo import Echo
class Sensor:
def __init__(self, trigger_pin, echo_pin, angle):
self._trigger_pin = trigger_pin
self._echo_pin = echo_pin
self._angle = angle
self._sr04 = Echo(self._trigger_pin, self._echo_pin)
def getDistance(self, samples = 1):
return self._sr04.read('cm', samples)
def getAngle(self):
return self._angle
def getTriggerPin(self):
return self._trigger_pin
def getEchoPin(self):
return self._echo_pin
def stop(self):
self._sr04.stop()
| none | 1 | 2.728417 | 3 | |
tests/unit/plugins/openstack/scenarios/cinder/test_volume_types.py | TeamXgrid/xgrid-rally | 1 | 6614354 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.cinder import volume_types
from tests.unit import test
class fake_type(object):
name = "fake"
class CinderVolumeTypesTestCase(test.ScenarioTestCase):
def _get_context(self):
context = test.get_test_context()
context.update({
"volume_types": [{"id": "fake_id",
"name": "fake_name"}]})
return context
def test_create_and_get_volume_type(self):
scenario = volume_types.CreateAndGetVolumeType(self.context)
scenario._create_volume_type = mock.Mock()
scenario._get_volume_type = mock.Mock()
scenario.run(fakeargs="f")
scenario._create_volume_type.assert_called_once_with(fakeargs="f")
scenario._get_volume_type.assert_called_once_with(
scenario._create_volume_type.return_value)
def test_create_and_delete_volume_type(self):
scenario = volume_types.CreateAndDeleteVolumeType(self.context)
scenario._create_volume_type = mock.Mock()
scenario._delete_volume_type = mock.Mock()
scenario.run(fakeargs="fakeargs")
scenario._create_volume_type.assert_called_once_with(
fakeargs="fakeargs")
scenario._delete_volume_type.assert_called_once_with(
scenario._create_volume_type.return_value)
def test_create_and_delete_encryption_type(self):
scenario = volume_types.CreateAndDeleteEncryptionType(
self._get_context())
scenario._create_encryption_type = mock.Mock()
scenario._delete_encryption_type = mock.Mock()
scenario.run(create_specs="fakecreatespecs")
scenario._create_encryption_type.assert_called_once_with(
"fake_id", "fakecreatespecs")
scenario._delete_encryption_type.assert_called_once_with(
"fake_id")
def test_create_volume_type_and_encryption_type(self):
scenario = volume_types.CreateVolumeTypeAndEncryptionType(self.context)
scenario._create_volume_type = mock.Mock()
scenario._create_encryption_type = mock.Mock()
scenario.run(specs="fakespecs", fakeargs="fakeargs")
scenario._create_volume_type.assert_called_once_with(
fakeargs="fakeargs")
scenario._create_encryption_type.assert_called_once_with(
scenario._create_volume_type.return_value, "fakespecs")
def test_create_and_list_encryption_type(self):
scenario = volume_types.CreateAndListEncryptionType(self.context)
scenario._create_volume_type = mock.Mock()
scenario._create_encryption_type = mock.Mock()
scenario._list_encryption_type = mock.Mock()
scenario.run(specs="fakespecs", search_opts="fakeopts",
fakeargs="fakeargs")
scenario._create_volume_type.assert_called_once_with(
fakeargs="fakeargs")
scenario._create_encryption_type.assert_called_once_with(
scenario._create_volume_type.return_value, "fakespecs")
scenario._list_encryption_type.assert_called_once_with(
"fakeopts")
def test_create_and_set_volume_type_keys(self):
scenario = volume_types.CreateAndSetVolumeTypeKeys(self.context)
volume_type = mock.MagicMock()
volume_type_key = {"volume_backend_name": "LVM_iSCSI"}
scenario._create_volume_type = mock.MagicMock()
scenario._set_volume_type_keys = mock.MagicMock()
scenario._create_volume_type.return_value = volume_type
scenario.run(volume_type_key, fakeargs="fakeargs")
scenario._create_volume_type.assert_called_once_with(
fakeargs="fakeargs")
scenario._set_volume_type_keys.assert_called_once_with(volume_type,
volume_type_key)
| # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.cinder import volume_types
from tests.unit import test
class fake_type(object):
name = "fake"
class CinderVolumeTypesTestCase(test.ScenarioTestCase):
def _get_context(self):
context = test.get_test_context()
context.update({
"volume_types": [{"id": "fake_id",
"name": "fake_name"}]})
return context
def test_create_and_get_volume_type(self):
scenario = volume_types.CreateAndGetVolumeType(self.context)
scenario._create_volume_type = mock.Mock()
scenario._get_volume_type = mock.Mock()
scenario.run(fakeargs="f")
scenario._create_volume_type.assert_called_once_with(fakeargs="f")
scenario._get_volume_type.assert_called_once_with(
scenario._create_volume_type.return_value)
def test_create_and_delete_volume_type(self):
scenario = volume_types.CreateAndDeleteVolumeType(self.context)
scenario._create_volume_type = mock.Mock()
scenario._delete_volume_type = mock.Mock()
scenario.run(fakeargs="fakeargs")
scenario._create_volume_type.assert_called_once_with(
fakeargs="fakeargs")
scenario._delete_volume_type.assert_called_once_with(
scenario._create_volume_type.return_value)
def test_create_and_delete_encryption_type(self):
scenario = volume_types.CreateAndDeleteEncryptionType(
self._get_context())
scenario._create_encryption_type = mock.Mock()
scenario._delete_encryption_type = mock.Mock()
scenario.run(create_specs="fakecreatespecs")
scenario._create_encryption_type.assert_called_once_with(
"fake_id", "fakecreatespecs")
scenario._delete_encryption_type.assert_called_once_with(
"fake_id")
def test_create_volume_type_and_encryption_type(self):
scenario = volume_types.CreateVolumeTypeAndEncryptionType(self.context)
scenario._create_volume_type = mock.Mock()
scenario._create_encryption_type = mock.Mock()
scenario.run(specs="fakespecs", fakeargs="fakeargs")
scenario._create_volume_type.assert_called_once_with(
fakeargs="fakeargs")
scenario._create_encryption_type.assert_called_once_with(
scenario._create_volume_type.return_value, "fakespecs")
def test_create_and_list_encryption_type(self):
scenario = volume_types.CreateAndListEncryptionType(self.context)
scenario._create_volume_type = mock.Mock()
scenario._create_encryption_type = mock.Mock()
scenario._list_encryption_type = mock.Mock()
scenario.run(specs="fakespecs", search_opts="fakeopts",
fakeargs="fakeargs")
scenario._create_volume_type.assert_called_once_with(
fakeargs="fakeargs")
scenario._create_encryption_type.assert_called_once_with(
scenario._create_volume_type.return_value, "fakespecs")
scenario._list_encryption_type.assert_called_once_with(
"fakeopts")
def test_create_and_set_volume_type_keys(self):
scenario = volume_types.CreateAndSetVolumeTypeKeys(self.context)
volume_type = mock.MagicMock()
volume_type_key = {"volume_backend_name": "LVM_iSCSI"}
scenario._create_volume_type = mock.MagicMock()
scenario._set_volume_type_keys = mock.MagicMock()
scenario._create_volume_type.return_value = volume_type
scenario.run(volume_type_key, fakeargs="fakeargs")
scenario._create_volume_type.assert_called_once_with(
fakeargs="fakeargs")
scenario._set_volume_type_keys.assert_called_once_with(volume_type,
volume_type_key)
| en | 0.85856 | # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.88724 | 2 |
pretrained/__init__.py | SamujjwalSam/MatchingNetworks4XC | 0 | 6614355 | <filename>pretrained/__init__.py
from .TextEncoder import TextEncoder
# These are not required
del TextEncoder
| <filename>pretrained/__init__.py
from .TextEncoder import TextEncoder
# These are not required
del TextEncoder
| en | 0.957325 | # These are not required | 1.267832 | 1 |
QarnotCLI_Doc/CreateDoc/createDocMarkdown.py | qarnot/qarnot-cli | 1 | 6614356 | <gh_stars>1-10
import os
import sys
import subprocess
import collections
class CreateDoc:
def createCommandLine(self, commandLine, verb, subverb):
"""
create the command line
"""
return " ".join([commandLine, verb, subverb]).strip()
def addHelpToLine(self, line):
"""
add help to a line
"""
return line + " --help"
def createName(self, verb, subverb):
"""
return an assemble of 2 capitalized names
"""
return verb.capitalize() + subverb.capitalize()
def runCommandLine(self, cmdLine):
"""
run a command line
return it's stdout
"""
process = subprocess.run(cmdLine.split(" "),
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
stdout = process.stdout
return stdout
def CommandList(self):
"""
return commands for Windows and unix
"""
commandList = ["qarnot", "qarnot.exe"]
return commandList
def parseUsage(self, usage):
"""
get a man and split it to it's different values
"""
newUsage = usage.splitlines()
version = ""
copyrightQarnot = ""
UsageList = []
FlagList = []
step = 0
for line in newUsage:
if step == 0:
version = line
step += 1
elif step == 1:
copyrightQarnot = line
step += 1
elif step == 2:
UsageList.append(line)
if line.strip() == "":
step += 1
elif step == 3:
FlagList.append(line)
return version, copyrightQarnot, "\n".join(UsageList), "\n".join(FlagList)
def CreateMan(self, name, commandUnix, commandDos, usage, helpLine):
"""
assemble the man info extracted in a dictionary
"""
version, copyrightQarnot, usageList, flagList = self.parseUsage(usage)
filename = name
linesDict = collections.OrderedDict()
linesDict["name"] = name
linesDict["helpLine"] = helpLine
linesDict["commandUnix"] = commandUnix
linesDict["commandDos"] = commandDos
linesDict["usageList"] = usageList
linesDict["flagList"] = flagList
linesDict["version"] = version
linesDict["copyrightQarnot"] = copyrightQarnot
return {"name": filename, "value": linesDict}
def getHelpUsage(self, testCommand, verb, usageHelpList, newUsageList):
"""
extract the command verbs from a "command help"
"""
if verb.startswith("-"):
return
lineCommand = self.createCommandLine(testCommand, verb, "")
lineHelp = self.addHelpToLine(lineCommand)
usage = self.runCommandLine(lineHelp)
binaryName = "qarnot"
for line in usage.splitlines():
if line.startswith(" ") and not line.startswith(" -") and not line.startswith(" " + binaryName):
sublines = [l for l in line.split(" ") if l]
if len(sublines) == 2:
name = "".join([w.capitalize() for w in sublines[0].split(" ")])
usageHelpList[name] = [sublines[0].strip(), sublines[1].strip()]
verb = sublines[0].strip()
if verb != "help" and verb != "version":
newUsageList.append(sublines[0].strip())
else:
print("Error in the usage verb parsing")
print(sublines)
def GetManFormOneCommand(self, testCommand, commandUsage, key, elem, usageHelpList):
"""
extract and split all the info of a man
"""
commandList = self.CommandList()
lineCommand = self.createCommandLine(testCommand, key, elem)
lineHelp = self.addHelpToLine(lineCommand)
usage = self.runCommandLine(lineHelp)
name = self.createName(key, elem)
lineCommandUnix = self.createCommandLine(commandList[0], key, elem)
lineCommandDos = self.createCommandLine(commandList[1], key, elem)
commandUsage.append(self.CreateMan(name, lineCommandUnix, lineCommandDos, usage, usageHelpList[name][1]))
def CreateAndLaunchAllUsages(self, testCommand, printer):
"""
Get the list of command to launch
Launch all commands with a "--help"
Split the verbs to the flags
"""
newUsageList = []
subverbList = []
commandUsage = []
usageHelpList = dict()
SuvVerbusageHelpList = {}
# get the help extract the commands names to launch them and extract the help line of the verbs
self.getHelpUsage(testCommand, "", SuvVerbusageHelpList, newUsageList)
# stock the Command
sinfo = {"name": "Commands", "value": SuvVerbusageHelpList}
subverbList.append(sinfo)
# stock again the command for the full list)
usageHelpList.update(SuvVerbusageHelpList.copy())
# launch all the command find
for key in newUsageList:
subList = []
# idem
SuvVerbusageHelpList = {}
self.getHelpUsage(testCommand, key, SuvVerbusageHelpList, subList)
usageHelpList.update(SuvVerbusageHelpList.copy())
# diff the basic command from the commands with subcommands
if len(subList) == 0:
# basic command is parse for it usage
self.GetManFormOneCommand(testCommand, commandUsage, key, "", usageHelpList)
else:
# get the subcommand names
sinfo = {"name": key.capitalize(), "value": SuvVerbusageHelpList}
subverbList.append(sinfo)
# launch all the command + subcommand find
for elem in subList:
elem = elem.split(" ")[1]
self.GetManFormOneCommand(testCommand, commandUsage, key, elem, usageHelpList)
# Print the commands
for command in commandUsage:
printer.PrintInFile(command["name"], command["value"])
# print the sub list
subverbList.reverse()
for subverb in subverbList:
printer.PrintInFile(subverb["name"], {"subverbList": subverb})
# print the big list
name = "ManIndex"
indexDict = {"beginIndex": "", "IndexList": usageHelpList}
printer.PrintInFile(name, indexDict)
class MarkdownFormat:
"""
class converting a dict to a string markDown format
for the printer
"""
def __init__(self, directory, extend):
"""
Get the directory and extend names to test the files
map all the functions to easly use them
every dict function return a string
"""
self.directory = directory
self.extend = extend
self.dictReturnValues = {}
self.dictReturnValues["name"] = self.CreateName
self.dictReturnValues["helpLine"] = self.CreateHelpLine
self.dictReturnValues["version"] = self.CreateVersion
self.dictReturnValues["copyrightQarnot"] = self.CreateCopyrightQarnot
self.dictReturnValues["commandUnix"] = self.CreateCommandUnix
self.dictReturnValues["commandDos"] = self.CreateCommandDos
self.dictReturnValues["usageList"] = self.CreateUsageList
self.dictReturnValues["flagList"] = self.CreateFlagList
self.dictReturnValues["beginIndex"] = self.CreateBeginIndex
self.dictReturnValues["IndexList"] = self.CreateIndexList
self.dictReturnValues["subverbList"] = self.CreateSubverbList
self.header = "# Qarnot CLI \n" + \
"> List of the commands\n" + \
"\n" + \
"\n"
# Man funections
def CreateName(self, key, value):
"""
Man name
"""
return "# {1}\n".format(key, value)
def CreateHelpLine(self, key, value):
"""
Man small explication line
"""
return "> {1} \n\n".format(key, value)
def CreateCommandUnix(self, key, value):
"""
Man unix command
"""
return "Unix \n```bash\n {1}\n```\n".format(key, value)
def CreateCommandDos(self, key, value):
"""
Man DOS command
"""
return "Windows \n```bash\n {1}\n```\n".format(key, value)
def CreateVersion(self, key, value):
"""
CLI Version
"""
return "*Version*: *{1}* \n".format(key, value)
def CreateCopyrightQarnot(self, key, value):
"""
CLI Copyright
"""
return "*Copyright*: *{1}* \n".format(key, value)
def CreateUsageList(self, key, value):
"""
Usage exemples
"""
return "***\n### {0} {1}\n***\n".format(value.split("\n")[0], " \n".join(["> * `" + line + "`" if line.startswith(" ") else "\n>" + line for line in value.strip().split("\n")[1:]]))
def CreateFlagList(self, key, value):
"""
Flag list
"""
return "### Flags: \n```bash\n {1}\n```\n".format(key, value)
# Table of man functions
def CreateBeginIndex(self, key, value):
return self.header + "### {1}\n| name | description |\n|:--:|:--|\n".format(key, value)
def CreateValuesIndex(self, key, value):
if key == "Help" or key == "Version":
return ""
if os.path.exists(os.path.join(self.directory, key + self.extend)):
return "|[{1}]({0}{3})|{2}|\n".format(key, value[0], value[1], self.extend)
print(os.path.join(self.directory, key + self.extend) + " not found")
return ""
# Command Tables
def CreateIndexList(self, key, valueDict):
ret = ""
for subKey in valueDict:
ret += self.CreateValuesIndex(subKey, valueDict[subKey])
return ret
# Sub command Tables
def CreateSubverbList(self, name, value):
# print(value)
retString = self.CreateBeginIndex("", value["name"])
for key in value["value"]:
retString += self.CreateValuesIndex(key, value["value"][key])
return retString
def CreateFormatedString(self, key, value):
"""
Start function
Launch the dictionary
or exit
"""
if key in self.dictReturnValues:
return self.dictReturnValues[key](key, value)
else:
print("\n\nvalue not found : " + key)
print(value)
exit(1)
class XMLFormat:
"""
class converting dict to string format
for the printer
"""
def CreateFormatedString(self, key, value):
return "<{}>\n{}\n</{}>\n".format(key, value, key)
class PrintDoc:
"""
Class printing in the document
"""
def FormatValue(self, key, value):
return self.format.CreateFormatedString(key, value)
def __init__(self, name, extention, form):
self.extention = extention
self.format = form
self.dirName = name
self.CreateDir(name)
def CreateDir(self, dirName):
"""
create the directory
"""
try:
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
def WriteInFile(self, dirName, fileName, linesDict):
filePath = os.path.join(dirName, fileName)
with open(filePath, 'w') as file:
for key in linesDict:
value = linesDict[key]
file.write(self.FormatValue(key, value))
def PrintInFile(self, fileBaseName, linesDict):
self.WriteInFile(self.dirName, fileBaseName + self.extention, linesDict)
def getCommandPath():
"""
GEt the first elem in the command path
Or send the usage
"""
if len(sys.argv) > 1:
return sys.argv[1]
else:
print("Usage : python3 createDoc.py [binary-path]")
print("Example: python3 createDoc.py /usr/bin/qarnot")
print(" python3 createDoc.py ../Realize/qarnot")
exit(0)
def main():
"""
Launch the main
"""
testCommand = getCommandPath()
directory = "manMarkDown"
file_extend = ".md"
form = MarkdownFormat(directory, file_extend)
printer = PrintDoc(directory, file_extend, form)
create = CreateDoc()
print("start of Cli Doc extraction")
create.CreateAndLaunchAllUsages(testCommand, printer)
print("end of Cli Doc extraction")
if __name__ == "__main__":
# execute only if run as a script
main()
| import os
import sys
import subprocess
import collections
class CreateDoc:
def createCommandLine(self, commandLine, verb, subverb):
"""
create the command line
"""
return " ".join([commandLine, verb, subverb]).strip()
def addHelpToLine(self, line):
"""
add help to a line
"""
return line + " --help"
def createName(self, verb, subverb):
"""
return an assemble of 2 capitalized names
"""
return verb.capitalize() + subverb.capitalize()
def runCommandLine(self, cmdLine):
"""
run a command line
return it's stdout
"""
process = subprocess.run(cmdLine.split(" "),
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
stdout = process.stdout
return stdout
def CommandList(self):
"""
return commands for Windows and unix
"""
commandList = ["qarnot", "qarnot.exe"]
return commandList
def parseUsage(self, usage):
"""
get a man and split it to it's different values
"""
newUsage = usage.splitlines()
version = ""
copyrightQarnot = ""
UsageList = []
FlagList = []
step = 0
for line in newUsage:
if step == 0:
version = line
step += 1
elif step == 1:
copyrightQarnot = line
step += 1
elif step == 2:
UsageList.append(line)
if line.strip() == "":
step += 1
elif step == 3:
FlagList.append(line)
return version, copyrightQarnot, "\n".join(UsageList), "\n".join(FlagList)
def CreateMan(self, name, commandUnix, commandDos, usage, helpLine):
"""
assemble the man info extracted in a dictionary
"""
version, copyrightQarnot, usageList, flagList = self.parseUsage(usage)
filename = name
linesDict = collections.OrderedDict()
linesDict["name"] = name
linesDict["helpLine"] = helpLine
linesDict["commandUnix"] = commandUnix
linesDict["commandDos"] = commandDos
linesDict["usageList"] = usageList
linesDict["flagList"] = flagList
linesDict["version"] = version
linesDict["copyrightQarnot"] = copyrightQarnot
return {"name": filename, "value": linesDict}
def getHelpUsage(self, testCommand, verb, usageHelpList, newUsageList):
"""
extract the command verbs from a "command help"
"""
if verb.startswith("-"):
return
lineCommand = self.createCommandLine(testCommand, verb, "")
lineHelp = self.addHelpToLine(lineCommand)
usage = self.runCommandLine(lineHelp)
binaryName = "qarnot"
for line in usage.splitlines():
if line.startswith(" ") and not line.startswith(" -") and not line.startswith(" " + binaryName):
sublines = [l for l in line.split(" ") if l]
if len(sublines) == 2:
name = "".join([w.capitalize() for w in sublines[0].split(" ")])
usageHelpList[name] = [sublines[0].strip(), sublines[1].strip()]
verb = sublines[0].strip()
if verb != "help" and verb != "version":
newUsageList.append(sublines[0].strip())
else:
print("Error in the usage verb parsing")
print(sublines)
def GetManFormOneCommand(self, testCommand, commandUsage, key, elem, usageHelpList):
"""
extract and split all the info of a man
"""
commandList = self.CommandList()
lineCommand = self.createCommandLine(testCommand, key, elem)
lineHelp = self.addHelpToLine(lineCommand)
usage = self.runCommandLine(lineHelp)
name = self.createName(key, elem)
lineCommandUnix = self.createCommandLine(commandList[0], key, elem)
lineCommandDos = self.createCommandLine(commandList[1], key, elem)
commandUsage.append(self.CreateMan(name, lineCommandUnix, lineCommandDos, usage, usageHelpList[name][1]))
def CreateAndLaunchAllUsages(self, testCommand, printer):
"""
Get the list of command to launch
Launch all commands with a "--help"
Split the verbs to the flags
"""
newUsageList = []
subverbList = []
commandUsage = []
usageHelpList = dict()
SuvVerbusageHelpList = {}
# get the help extract the commands names to launch them and extract the help line of the verbs
self.getHelpUsage(testCommand, "", SuvVerbusageHelpList, newUsageList)
# stock the Command
sinfo = {"name": "Commands", "value": SuvVerbusageHelpList}
subverbList.append(sinfo)
# stock again the command for the full list)
usageHelpList.update(SuvVerbusageHelpList.copy())
# launch all the command find
for key in newUsageList:
subList = []
# idem
SuvVerbusageHelpList = {}
self.getHelpUsage(testCommand, key, SuvVerbusageHelpList, subList)
usageHelpList.update(SuvVerbusageHelpList.copy())
# diff the basic command from the commands with subcommands
if len(subList) == 0:
# basic command is parse for it usage
self.GetManFormOneCommand(testCommand, commandUsage, key, "", usageHelpList)
else:
# get the subcommand names
sinfo = {"name": key.capitalize(), "value": SuvVerbusageHelpList}
subverbList.append(sinfo)
# launch all the command + subcommand find
for elem in subList:
elem = elem.split(" ")[1]
self.GetManFormOneCommand(testCommand, commandUsage, key, elem, usageHelpList)
# Print the commands
for command in commandUsage:
printer.PrintInFile(command["name"], command["value"])
# print the sub list
subverbList.reverse()
for subverb in subverbList:
printer.PrintInFile(subverb["name"], {"subverbList": subverb})
# print the big list
name = "ManIndex"
indexDict = {"beginIndex": "", "IndexList": usageHelpList}
printer.PrintInFile(name, indexDict)
class MarkdownFormat:
"""
class converting a dict to a string markDown format
for the printer
"""
def __init__(self, directory, extend):
"""
Get the directory and extend names to test the files
map all the functions to easly use them
every dict function return a string
"""
self.directory = directory
self.extend = extend
self.dictReturnValues = {}
self.dictReturnValues["name"] = self.CreateName
self.dictReturnValues["helpLine"] = self.CreateHelpLine
self.dictReturnValues["version"] = self.CreateVersion
self.dictReturnValues["copyrightQarnot"] = self.CreateCopyrightQarnot
self.dictReturnValues["commandUnix"] = self.CreateCommandUnix
self.dictReturnValues["commandDos"] = self.CreateCommandDos
self.dictReturnValues["usageList"] = self.CreateUsageList
self.dictReturnValues["flagList"] = self.CreateFlagList
self.dictReturnValues["beginIndex"] = self.CreateBeginIndex
self.dictReturnValues["IndexList"] = self.CreateIndexList
self.dictReturnValues["subverbList"] = self.CreateSubverbList
self.header = "# Qarnot CLI \n" + \
"> List of the commands\n" + \
"\n" + \
"\n"
# Man funections
def CreateName(self, key, value):
"""
Man name
"""
return "# {1}\n".format(key, value)
def CreateHelpLine(self, key, value):
"""
Man small explication line
"""
return "> {1} \n\n".format(key, value)
def CreateCommandUnix(self, key, value):
"""
Man unix command
"""
return "Unix \n```bash\n {1}\n```\n".format(key, value)
def CreateCommandDos(self, key, value):
"""
Man DOS command
"""
return "Windows \n```bash\n {1}\n```\n".format(key, value)
def CreateVersion(self, key, value):
"""
CLI Version
"""
return "*Version*: *{1}* \n".format(key, value)
def CreateCopyrightQarnot(self, key, value):
"""
CLI Copyright
"""
return "*Copyright*: *{1}* \n".format(key, value)
def CreateUsageList(self, key, value):
"""
Usage exemples
"""
return "***\n### {0} {1}\n***\n".format(value.split("\n")[0], " \n".join(["> * `" + line + "`" if line.startswith(" ") else "\n>" + line for line in value.strip().split("\n")[1:]]))
def CreateFlagList(self, key, value):
"""
Flag list
"""
return "### Flags: \n```bash\n {1}\n```\n".format(key, value)
# Table of man functions
def CreateBeginIndex(self, key, value):
return self.header + "### {1}\n| name | description |\n|:--:|:--|\n".format(key, value)
def CreateValuesIndex(self, key, value):
if key == "Help" or key == "Version":
return ""
if os.path.exists(os.path.join(self.directory, key + self.extend)):
return "|[{1}]({0}{3})|{2}|\n".format(key, value[0], value[1], self.extend)
print(os.path.join(self.directory, key + self.extend) + " not found")
return ""
# Command Tables
def CreateIndexList(self, key, valueDict):
ret = ""
for subKey in valueDict:
ret += self.CreateValuesIndex(subKey, valueDict[subKey])
return ret
# Sub command Tables
def CreateSubverbList(self, name, value):
# print(value)
retString = self.CreateBeginIndex("", value["name"])
for key in value["value"]:
retString += self.CreateValuesIndex(key, value["value"][key])
return retString
def CreateFormatedString(self, key, value):
"""
Start function
Launch the dictionary
or exit
"""
if key in self.dictReturnValues:
return self.dictReturnValues[key](key, value)
else:
print("\n\nvalue not found : " + key)
print(value)
exit(1)
class XMLFormat:
"""
class converting dict to string format
for the printer
"""
def CreateFormatedString(self, key, value):
return "<{}>\n{}\n</{}>\n".format(key, value, key)
class PrintDoc:
"""
Class printing in the document
"""
def FormatValue(self, key, value):
return self.format.CreateFormatedString(key, value)
def __init__(self, name, extention, form):
self.extention = extention
self.format = form
self.dirName = name
self.CreateDir(name)
def CreateDir(self, dirName):
"""
create the directory
"""
try:
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
def WriteInFile(self, dirName, fileName, linesDict):
filePath = os.path.join(dirName, fileName)
with open(filePath, 'w') as file:
for key in linesDict:
value = linesDict[key]
file.write(self.FormatValue(key, value))
def PrintInFile(self, fileBaseName, linesDict):
self.WriteInFile(self.dirName, fileBaseName + self.extention, linesDict)
def getCommandPath():
"""
GEt the first elem in the command path
Or send the usage
"""
if len(sys.argv) > 1:
return sys.argv[1]
else:
print("Usage : python3 createDoc.py [binary-path]")
print("Example: python3 createDoc.py /usr/bin/qarnot")
print(" python3 createDoc.py ../Realize/qarnot")
exit(0)
def main():
"""
Launch the main
"""
testCommand = getCommandPath()
directory = "manMarkDown"
file_extend = ".md"
form = MarkdownFormat(directory, file_extend)
printer = PrintDoc(directory, file_extend, form)
create = CreateDoc()
print("start of Cli Doc extraction")
create.CreateAndLaunchAllUsages(testCommand, printer)
print("end of Cli Doc extraction")
if __name__ == "__main__":
# execute only if run as a script
main() | en | 0.722569 | create the command line add help to a line return an assemble of 2 capitalized names run a command line return it's stdout return commands for Windows and unix get a man and split it to it's different values assemble the man info extracted in a dictionary extract the command verbs from a "command help" extract and split all the info of a man Get the list of command to launch Launch all commands with a "--help" Split the verbs to the flags # get the help extract the commands names to launch them and extract the help line of the verbs # stock the Command # stock again the command for the full list) # launch all the command find # idem # diff the basic command from the commands with subcommands # basic command is parse for it usage # get the subcommand names # launch all the command + subcommand find # Print the commands # print the sub list # print the big list class converting a dict to a string markDown format for the printer Get the directory and extend names to test the files map all the functions to easly use them every dict function return a string # Man funections Man name Man small explication line Man unix command Man DOS command CLI Version CLI Copyright Usage exemples ### {0} {1}\n***\n".format(value.split("\n")[0], " \n".join(["> * `" + line + "`" if line.startswith(" ") else "\n>" + line for line in value.strip().split("\n")[1:]])) Flag list ## Flags: \n```bash\n {1}\n```\n".format(key, value) # Table of man functions ## {1}\n| name | description |\n|:--:|:--|\n".format(key, value) # Command Tables # Sub command Tables # print(value) Start function Launch the dictionary or exit class converting dict to string format for the printer Class printing in the document create the directory GEt the first elem in the command path Or send the usage Launch the main # execute only if run as a script | 3.032801 | 3 |
PyBall/models/draft/person.py | a-hacker/PyBall | 0 | 6614357 | <reponame>a-hacker/PyBall
from PyBall.models import BaseModel
from PyBall.models.pitch_hand import PitchHand
from PyBall.models.bat_side import BatSide
from PyBall.models.primary_position import PrimaryPostion
class Person(BaseModel):
_fields = {
'id': {'default_value': None, 'field_type': str},
'fullName': {'default_value': None, 'field_type': str},
'link': {'default_value': None, 'field_type': str},
'firstName': {'default_value': None, 'field_type': str},
'lastName': {'default_value': None, 'field_type': str},
'birthDate': {'default_value': None, 'field_type': str},
'birthCountry': {'default_value': None, 'field_type': str},
'primaryPosition': {'default_value': {}, 'field_type': PrimaryPostion},
'batSide': {'default_value': {}, 'field_type': BatSide},
'pitchHand': {'default_value': {}, 'field_type': PitchHand},
'nameSlug': {'default_value': None, 'field_type': str},
}
| from PyBall.models import BaseModel
from PyBall.models.pitch_hand import PitchHand
from PyBall.models.bat_side import BatSide
from PyBall.models.primary_position import PrimaryPostion
class Person(BaseModel):
_fields = {
'id': {'default_value': None, 'field_type': str},
'fullName': {'default_value': None, 'field_type': str},
'link': {'default_value': None, 'field_type': str},
'firstName': {'default_value': None, 'field_type': str},
'lastName': {'default_value': None, 'field_type': str},
'birthDate': {'default_value': None, 'field_type': str},
'birthCountry': {'default_value': None, 'field_type': str},
'primaryPosition': {'default_value': {}, 'field_type': PrimaryPostion},
'batSide': {'default_value': {}, 'field_type': BatSide},
'pitchHand': {'default_value': {}, 'field_type': PitchHand},
'nameSlug': {'default_value': None, 'field_type': str},
} | none | 1 | 2.479476 | 2 | |
webview/js/npo.py | hemid32/pywebview | 3,093 | 6614358 | <reponame>hemid32/pywebview
src = """
/*! Native Promise Only
v0.8.1 (c) <NAME>
MIT License: http://getify.mit-license.org
*/
(function UMD(name,context,definition){
// special form of UMD for polyfilling across evironments
context[name] = context[name] || definition();
if (typeof module != "undefined" && module.exports) { module.exports = context[name]; }
else if (typeof define == "function" && define.amd) { define(function $AMD$(){ return context[name]; }); }
})("Promise",typeof global != "undefined" ? global : this,function DEF(){
/*jshint validthis:true */
"use strict";
var builtInProp, cycle, scheduling_queue,
ToString = Object.prototype.toString,
timer = (typeof setImmediate != "undefined") ?
function timer(fn) { return setImmediate(fn); } :
setTimeout
;
// dammit, IE8.
try {
Object.defineProperty({},"x",{});
builtInProp = function builtInProp(obj,name,val,config) {
return Object.defineProperty(obj,name,{
value: val,
writable: true,
configurable: config !== false
});
};
}
catch (err) {
builtInProp = function builtInProp(obj,name,val) {
obj[name] = val;
return obj;
};
}
// Note: using a queue instead of array for efficiency
scheduling_queue = (function Queue() {
var first, last, item;
function Item(fn,self) {
this.fn = fn;
this.self = self;
this.next = void 0;
}
return {
add: function add(fn,self) {
item = new Item(fn,self);
if (last) {
last.next = item;
}
else {
first = item;
}
last = item;
item = void 0;
},
drain: function drain() {
var f = first;
first = last = cycle = void 0;
while (f) {
f.fn.call(f.self);
f = f.next;
}
}
};
})();
function schedule(fn,self) {
scheduling_queue.add(fn,self);
if (!cycle) {
cycle = timer(scheduling_queue.drain);
}
}
// promise duck typing
function isThenable(o) {
var _then, o_type = typeof o;
if (o != null &&
(
o_type == "object" || o_type == "function"
)
) {
_then = o.then;
}
return typeof _then == "function" ? _then : false;
}
function notify() {
for (var i=0; i<this.chain.length; i++) {
notifyIsolated(
this,
(this.state === 1) ? this.chain[i].success : this.chain[i].failure,
this.chain[i]
);
}
this.chain.length = 0;
}
// NOTE: This is a separate function to isolate
// the `try..catch` so that other code can be
// optimized better
function notifyIsolated(self,cb,chain) {
var ret, _then;
try {
if (cb === false) {
chain.reject(self.msg);
}
else {
if (cb === true) {
ret = self.msg;
}
else {
ret = cb.call(void 0,self.msg);
}
if (ret === chain.promise) {
chain.reject(TypeError("Promise-chain cycle"));
}
else if (_then = isThenable(ret)) {
_then.call(ret,chain.resolve,chain.reject);
}
else {
chain.resolve(ret);
}
}
}
catch (err) {
chain.reject(err);
}
}
function resolve(msg) {
var _then, self = this;
// already triggered?
if (self.triggered) { return; }
self.triggered = true;
// unwrap
if (self.def) {
self = self.def;
}
try {
if (_then = isThenable(msg)) {
schedule(function(){
var def_wrapper = new MakeDefWrapper(self);
try {
_then.call(msg,
function $resolve$(){ resolve.apply(def_wrapper,arguments); },
function $reject$(){ reject.apply(def_wrapper,arguments); }
);
}
catch (err) {
reject.call(def_wrapper,err);
}
})
}
else {
self.msg = msg;
self.state = 1;
if (self.chain.length > 0) {
schedule(notify,self);
}
}
}
catch (err) {
reject.call(new MakeDefWrapper(self),err);
}
}
function reject(msg) {
var self = this;
// already triggered?
if (self.triggered) { return; }
self.triggered = true;
// unwrap
if (self.def) {
self = self.def;
}
self.msg = msg;
self.state = 2;
if (self.chain.length > 0) {
schedule(notify,self);
}
}
function iteratePromises(Constructor,arr,resolver,rejecter) {
for (var idx=0; idx<arr.length; idx++) {
(function IIFE(idx){
Constructor.resolve(arr[idx])
.then(
function $resolver$(msg){
resolver(idx,msg);
},
rejecter
);
})(idx);
}
}
function MakeDefWrapper(self) {
this.def = self;
this.triggered = false;
}
function MakeDef(self) {
this.promise = self;
this.state = 0;
this.triggered = false;
this.chain = [];
this.msg = void 0;
}
function Promise(executor) {
if (typeof executor != "function") {
throw TypeError("Not a function");
}
if (this.__NPO__ !== 0) {
throw TypeError("Not a promise");
}
// instance shadowing the inherited "brand"
// to signal an already "initialized" promise
this.__NPO__ = 1;
var def = new MakeDef(this);
this["then"] = function then(success,failure) {
var o = {
success: typeof success == "function" ? success : true,
failure: typeof failure == "function" ? failure : false
};
// Note: `then(..)` itself can be borrowed to be used against
// a different promise constructor for making the chained promise,
// by substituting a different `this` binding.
o.promise = new this.constructor(function extractChain(resolve,reject) {
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
o.resolve = resolve;
o.reject = reject;
});
def.chain.push(o);
if (def.state !== 0) {
schedule(notify,def);
}
return o.promise;
};
this["catch"] = function $catch$(failure) {
return this.then(void 0,failure);
};
try {
executor.call(
void 0,
function publicResolve(msg){
resolve.call(def,msg);
},
function publicReject(msg) {
reject.call(def,msg);
}
);
}
catch (err) {
reject.call(def,err);
}
}
var PromisePrototype = builtInProp({},"constructor",Promise,
/*configurable=*/false
);
// Note: Android 4 cannot use `Object.defineProperty(..)` here
Promise.prototype = PromisePrototype;
// built-in "brand" to signal an "uninitialized" promise
builtInProp(PromisePrototype,"__NPO__",0,
/*configurable=*/false
);
builtInProp(Promise,"resolve",function Promise$resolve(msg) {
var Constructor = this;
// spec mandated checks
// note: best "isPromise" check that's practical for now
if (msg && typeof msg == "object" && msg.__NPO__ === 1) {
return msg;
}
return new Constructor(function executor(resolve,reject){
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
resolve(msg);
});
});
builtInProp(Promise,"reject",function Promise$reject(msg) {
return new this(function executor(resolve,reject){
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
reject(msg);
});
});
builtInProp(Promise,"all",function Promise$all(arr) {
var Constructor = this;
// spec mandated checks
if (ToString.call(arr) != "[object Array]") {
return Constructor.reject(TypeError("Not an array"));
}
if (arr.length === 0) {
return Constructor.resolve([]);
}
return new Constructor(function executor(resolve,reject){
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
var len = arr.length, msgs = Array(len), count = 0;
iteratePromises(Constructor,arr,function resolver(idx,msg) {
msgs[idx] = msg;
if (++count === len) {
resolve(msgs);
}
},reject);
});
});
builtInProp(Promise,"race",function Promise$race(arr) {
var Constructor = this;
// spec mandated checks
if (ToString.call(arr) != "[object Array]") {
return Constructor.reject(TypeError("Not an array"));
}
return new Constructor(function executor(resolve,reject){
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
iteratePromises(Constructor,arr,function resolver(idx,msg){
resolve(msg);
},reject);
});
});
return Promise;
});
""" | src = """
/*! Native Promise Only
v0.8.1 (c) <NAME>
MIT License: http://getify.mit-license.org
*/
(function UMD(name,context,definition){
// special form of UMD for polyfilling across evironments
context[name] = context[name] || definition();
if (typeof module != "undefined" && module.exports) { module.exports = context[name]; }
else if (typeof define == "function" && define.amd) { define(function $AMD$(){ return context[name]; }); }
})("Promise",typeof global != "undefined" ? global : this,function DEF(){
/*jshint validthis:true */
"use strict";
var builtInProp, cycle, scheduling_queue,
ToString = Object.prototype.toString,
timer = (typeof setImmediate != "undefined") ?
function timer(fn) { return setImmediate(fn); } :
setTimeout
;
// dammit, IE8.
try {
Object.defineProperty({},"x",{});
builtInProp = function builtInProp(obj,name,val,config) {
return Object.defineProperty(obj,name,{
value: val,
writable: true,
configurable: config !== false
});
};
}
catch (err) {
builtInProp = function builtInProp(obj,name,val) {
obj[name] = val;
return obj;
};
}
// Note: using a queue instead of array for efficiency
scheduling_queue = (function Queue() {
var first, last, item;
function Item(fn,self) {
this.fn = fn;
this.self = self;
this.next = void 0;
}
return {
add: function add(fn,self) {
item = new Item(fn,self);
if (last) {
last.next = item;
}
else {
first = item;
}
last = item;
item = void 0;
},
drain: function drain() {
var f = first;
first = last = cycle = void 0;
while (f) {
f.fn.call(f.self);
f = f.next;
}
}
};
})();
function schedule(fn,self) {
scheduling_queue.add(fn,self);
if (!cycle) {
cycle = timer(scheduling_queue.drain);
}
}
// promise duck typing
function isThenable(o) {
var _then, o_type = typeof o;
if (o != null &&
(
o_type == "object" || o_type == "function"
)
) {
_then = o.then;
}
return typeof _then == "function" ? _then : false;
}
function notify() {
for (var i=0; i<this.chain.length; i++) {
notifyIsolated(
this,
(this.state === 1) ? this.chain[i].success : this.chain[i].failure,
this.chain[i]
);
}
this.chain.length = 0;
}
// NOTE: This is a separate function to isolate
// the `try..catch` so that other code can be
// optimized better
function notifyIsolated(self,cb,chain) {
var ret, _then;
try {
if (cb === false) {
chain.reject(self.msg);
}
else {
if (cb === true) {
ret = self.msg;
}
else {
ret = cb.call(void 0,self.msg);
}
if (ret === chain.promise) {
chain.reject(TypeError("Promise-chain cycle"));
}
else if (_then = isThenable(ret)) {
_then.call(ret,chain.resolve,chain.reject);
}
else {
chain.resolve(ret);
}
}
}
catch (err) {
chain.reject(err);
}
}
function resolve(msg) {
var _then, self = this;
// already triggered?
if (self.triggered) { return; }
self.triggered = true;
// unwrap
if (self.def) {
self = self.def;
}
try {
if (_then = isThenable(msg)) {
schedule(function(){
var def_wrapper = new MakeDefWrapper(self);
try {
_then.call(msg,
function $resolve$(){ resolve.apply(def_wrapper,arguments); },
function $reject$(){ reject.apply(def_wrapper,arguments); }
);
}
catch (err) {
reject.call(def_wrapper,err);
}
})
}
else {
self.msg = msg;
self.state = 1;
if (self.chain.length > 0) {
schedule(notify,self);
}
}
}
catch (err) {
reject.call(new MakeDefWrapper(self),err);
}
}
function reject(msg) {
var self = this;
// already triggered?
if (self.triggered) { return; }
self.triggered = true;
// unwrap
if (self.def) {
self = self.def;
}
self.msg = msg;
self.state = 2;
if (self.chain.length > 0) {
schedule(notify,self);
}
}
function iteratePromises(Constructor,arr,resolver,rejecter) {
for (var idx=0; idx<arr.length; idx++) {
(function IIFE(idx){
Constructor.resolve(arr[idx])
.then(
function $resolver$(msg){
resolver(idx,msg);
},
rejecter
);
})(idx);
}
}
function MakeDefWrapper(self) {
this.def = self;
this.triggered = false;
}
function MakeDef(self) {
this.promise = self;
this.state = 0;
this.triggered = false;
this.chain = [];
this.msg = void 0;
}
function Promise(executor) {
if (typeof executor != "function") {
throw TypeError("Not a function");
}
if (this.__NPO__ !== 0) {
throw TypeError("Not a promise");
}
// instance shadowing the inherited "brand"
// to signal an already "initialized" promise
this.__NPO__ = 1;
var def = new MakeDef(this);
this["then"] = function then(success,failure) {
var o = {
success: typeof success == "function" ? success : true,
failure: typeof failure == "function" ? failure : false
};
// Note: `then(..)` itself can be borrowed to be used against
// a different promise constructor for making the chained promise,
// by substituting a different `this` binding.
o.promise = new this.constructor(function extractChain(resolve,reject) {
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
o.resolve = resolve;
o.reject = reject;
});
def.chain.push(o);
if (def.state !== 0) {
schedule(notify,def);
}
return o.promise;
};
this["catch"] = function $catch$(failure) {
return this.then(void 0,failure);
};
try {
executor.call(
void 0,
function publicResolve(msg){
resolve.call(def,msg);
},
function publicReject(msg) {
reject.call(def,msg);
}
);
}
catch (err) {
reject.call(def,err);
}
}
var PromisePrototype = builtInProp({},"constructor",Promise,
/*configurable=*/false
);
// Note: Android 4 cannot use `Object.defineProperty(..)` here
Promise.prototype = PromisePrototype;
// built-in "brand" to signal an "uninitialized" promise
builtInProp(PromisePrototype,"__NPO__",0,
/*configurable=*/false
);
builtInProp(Promise,"resolve",function Promise$resolve(msg) {
var Constructor = this;
// spec mandated checks
// note: best "isPromise" check that's practical for now
if (msg && typeof msg == "object" && msg.__NPO__ === 1) {
return msg;
}
return new Constructor(function executor(resolve,reject){
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
resolve(msg);
});
});
builtInProp(Promise,"reject",function Promise$reject(msg) {
return new this(function executor(resolve,reject){
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
reject(msg);
});
});
builtInProp(Promise,"all",function Promise$all(arr) {
var Constructor = this;
// spec mandated checks
if (ToString.call(arr) != "[object Array]") {
return Constructor.reject(TypeError("Not an array"));
}
if (arr.length === 0) {
return Constructor.resolve([]);
}
return new Constructor(function executor(resolve,reject){
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
var len = arr.length, msgs = Array(len), count = 0;
iteratePromises(Constructor,arr,function resolver(idx,msg) {
msgs[idx] = msg;
if (++count === len) {
resolve(msgs);
}
},reject);
});
});
builtInProp(Promise,"race",function Promise$race(arr) {
var Constructor = this;
// spec mandated checks
if (ToString.call(arr) != "[object Array]") {
return Constructor.reject(TypeError("Not an array"));
}
return new Constructor(function executor(resolve,reject){
if (typeof resolve != "function" || typeof reject != "function") {
throw TypeError("Not a function");
}
iteratePromises(Constructor,arr,function resolver(idx,msg){
resolve(msg);
},reject);
});
});
return Promise;
});
""" | en | 0.274619 | /*! Native Promise Only v0.8.1 (c) <NAME> MIT License: http://getify.mit-license.org */ (function UMD(name,context,definition){ // special form of UMD for polyfilling across evironments context[name] = context[name] || definition(); if (typeof module != "undefined" && module.exports) { module.exports = context[name]; } else if (typeof define == "function" && define.amd) { define(function $AMD$(){ return context[name]; }); } })("Promise",typeof global != "undefined" ? global : this,function DEF(){ /*jshint validthis:true */ "use strict"; var builtInProp, cycle, scheduling_queue, ToString = Object.prototype.toString, timer = (typeof setImmediate != "undefined") ? function timer(fn) { return setImmediate(fn); } : setTimeout ; // dammit, IE8. try { Object.defineProperty({},"x",{}); builtInProp = function builtInProp(obj,name,val,config) { return Object.defineProperty(obj,name,{ value: val, writable: true, configurable: config !== false }); }; } catch (err) { builtInProp = function builtInProp(obj,name,val) { obj[name] = val; return obj; }; } // Note: using a queue instead of array for efficiency scheduling_queue = (function Queue() { var first, last, item; function Item(fn,self) { this.fn = fn; this.self = self; this.next = void 0; } return { add: function add(fn,self) { item = new Item(fn,self); if (last) { last.next = item; } else { first = item; } last = item; item = void 0; }, drain: function drain() { var f = first; first = last = cycle = void 0; while (f) { f.fn.call(f.self); f = f.next; } } }; })(); function schedule(fn,self) { scheduling_queue.add(fn,self); if (!cycle) { cycle = timer(scheduling_queue.drain); } } // promise duck typing function isThenable(o) { var _then, o_type = typeof o; if (o != null && ( o_type == "object" || o_type == "function" ) ) { _then = o.then; } return typeof _then == "function" ? _then : false; } function notify() { for (var i=0; i<this.chain.length; i++) { notifyIsolated( this, (this.state === 1) ? this.chain[i].success : this.chain[i].failure, this.chain[i] ); } this.chain.length = 0; } // NOTE: This is a separate function to isolate // the `try..catch` so that other code can be // optimized better function notifyIsolated(self,cb,chain) { var ret, _then; try { if (cb === false) { chain.reject(self.msg); } else { if (cb === true) { ret = self.msg; } else { ret = cb.call(void 0,self.msg); } if (ret === chain.promise) { chain.reject(TypeError("Promise-chain cycle")); } else if (_then = isThenable(ret)) { _then.call(ret,chain.resolve,chain.reject); } else { chain.resolve(ret); } } } catch (err) { chain.reject(err); } } function resolve(msg) { var _then, self = this; // already triggered? if (self.triggered) { return; } self.triggered = true; // unwrap if (self.def) { self = self.def; } try { if (_then = isThenable(msg)) { schedule(function(){ var def_wrapper = new MakeDefWrapper(self); try { _then.call(msg, function $resolve$(){ resolve.apply(def_wrapper,arguments); }, function $reject$(){ reject.apply(def_wrapper,arguments); } ); } catch (err) { reject.call(def_wrapper,err); } }) } else { self.msg = msg; self.state = 1; if (self.chain.length > 0) { schedule(notify,self); } } } catch (err) { reject.call(new MakeDefWrapper(self),err); } } function reject(msg) { var self = this; // already triggered? if (self.triggered) { return; } self.triggered = true; // unwrap if (self.def) { self = self.def; } self.msg = msg; self.state = 2; if (self.chain.length > 0) { schedule(notify,self); } } function iteratePromises(Constructor,arr,resolver,rejecter) { for (var idx=0; idx<arr.length; idx++) { (function IIFE(idx){ Constructor.resolve(arr[idx]) .then( function $resolver$(msg){ resolver(idx,msg); }, rejecter ); })(idx); } } function MakeDefWrapper(self) { this.def = self; this.triggered = false; } function MakeDef(self) { this.promise = self; this.state = 0; this.triggered = false; this.chain = []; this.msg = void 0; } function Promise(executor) { if (typeof executor != "function") { throw TypeError("Not a function"); } if (this.__NPO__ !== 0) { throw TypeError("Not a promise"); } // instance shadowing the inherited "brand" // to signal an already "initialized" promise this.__NPO__ = 1; var def = new MakeDef(this); this["then"] = function then(success,failure) { var o = { success: typeof success == "function" ? success : true, failure: typeof failure == "function" ? failure : false }; // Note: `then(..)` itself can be borrowed to be used against // a different promise constructor for making the chained promise, // by substituting a different `this` binding. o.promise = new this.constructor(function extractChain(resolve,reject) { if (typeof resolve != "function" || typeof reject != "function") { throw TypeError("Not a function"); } o.resolve = resolve; o.reject = reject; }); def.chain.push(o); if (def.state !== 0) { schedule(notify,def); } return o.promise; }; this["catch"] = function $catch$(failure) { return this.then(void 0,failure); }; try { executor.call( void 0, function publicResolve(msg){ resolve.call(def,msg); }, function publicReject(msg) { reject.call(def,msg); } ); } catch (err) { reject.call(def,err); } } var PromisePrototype = builtInProp({},"constructor",Promise, /*configurable=*/false ); // Note: Android 4 cannot use `Object.defineProperty(..)` here Promise.prototype = PromisePrototype; // built-in "brand" to signal an "uninitialized" promise builtInProp(PromisePrototype,"__NPO__",0, /*configurable=*/false ); builtInProp(Promise,"resolve",function Promise$resolve(msg) { var Constructor = this; // spec mandated checks // note: best "isPromise" check that's practical for now if (msg && typeof msg == "object" && msg.__NPO__ === 1) { return msg; } return new Constructor(function executor(resolve,reject){ if (typeof resolve != "function" || typeof reject != "function") { throw TypeError("Not a function"); } resolve(msg); }); }); builtInProp(Promise,"reject",function Promise$reject(msg) { return new this(function executor(resolve,reject){ if (typeof resolve != "function" || typeof reject != "function") { throw TypeError("Not a function"); } reject(msg); }); }); builtInProp(Promise,"all",function Promise$all(arr) { var Constructor = this; // spec mandated checks if (ToString.call(arr) != "[object Array]") { return Constructor.reject(TypeError("Not an array")); } if (arr.length === 0) { return Constructor.resolve([]); } return new Constructor(function executor(resolve,reject){ if (typeof resolve != "function" || typeof reject != "function") { throw TypeError("Not a function"); } var len = arr.length, msgs = Array(len), count = 0; iteratePromises(Constructor,arr,function resolver(idx,msg) { msgs[idx] = msg; if (++count === len) { resolve(msgs); } },reject); }); }); builtInProp(Promise,"race",function Promise$race(arr) { var Constructor = this; // spec mandated checks if (ToString.call(arr) != "[object Array]") { return Constructor.reject(TypeError("Not an array")); } return new Constructor(function executor(resolve,reject){ if (typeof resolve != "function" || typeof reject != "function") { throw TypeError("Not a function"); } iteratePromises(Constructor,arr,function resolver(idx,msg){ resolve(msg); },reject); }); }); return Promise; }); | 2.05857 | 2 |
chatapp/models.py | rpurohit/Dj-Chat | 0 | 6614359 | #chatapp/models.py
from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class Message(models.Model):
author = models.ForeignKey(User,
related_name='author_messages',
on_delete=models.CASCADE)
content = models.TextField()
roomname = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.author.username
def previous_messages(self, roomname):
return Message.objects.filter(roomname=roomname).order_by('-timestamp').all()
| #chatapp/models.py
from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class Message(models.Model):
author = models.ForeignKey(User,
related_name='author_messages',
on_delete=models.CASCADE)
content = models.TextField()
roomname = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.author.username
def previous_messages(self, roomname):
return Message.objects.filter(roomname=roomname).order_by('-timestamp').all()
| en | 0.188668 | #chatapp/models.py | 2.279433 | 2 |
uScopeBackend/registers_manager.py | uscope-platform/uscope_server | 0 | 6614360 | <gh_stars>0
# Copyright 2021 University of Nottingham Ningbo China
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import current_app, Blueprint, jsonify, request
from flask_restful import Api, Resource
from flask_jwt_extended import jwt_required, get_jwt_identity
from . import role_required
############################################################
# IMPLEMENTATION #
############################################################
registers_manager_bp = Blueprint('regusters_manager', __name__, url_prefix='/registers')
api = Api(registers_manager_bp)
class RegisterValue(Resource):
@jwt_required()
@role_required("operator")
def get(self):
pass
@jwt_required()
@role_required("operator")
def post(self, peripheral):
registers_to_write = request.get_json(force=True)
user = get_jwt_identity()
current_app.register_mgr.set_register_value(peripheral, registers_to_write['payload'], user)
return '200'
class RegisterDescriptions(Resource):
@jwt_required()
@role_required("operator")
def get(self, peripheral):
user = get_jwt_identity()
return jsonify(current_app.register_mgr.get_registers_descriptions(peripheral,user))
@jwt_required()
@role_required("operator")
def post(self):
pass
class PeripheralsSpecs(Resource):
@jwt_required()
@role_required("operator")
def get(self):
return jsonify(current_app.register_mgr.get_all_peripherals())
class PeripheralsDigest(Resource):
@jwt_required()
@role_required("operator")
def get(self):
return current_app.register_mgr.get_peripherals_digest()
class RegistersBulkWrite(Resource):
@jwt_required()
@role_required("operator")
def post(self):
registers_to_write = request.get_json(force=True)
current_app.register_mgr.bulk_write(registers_to_write['payload'])
return '200'
api.add_resource(RegisterValue, '/<string:peripheral>/value')
api.add_resource(RegisterDescriptions, '/<string:peripheral>/descriptions')
api.add_resource(PeripheralsSpecs, '/all_peripheral/descriptions')
api.add_resource(RegistersBulkWrite, '/bulk_write')
api.add_resource(PeripheralsDigest, '/digest')
############################################################
# IMPLEMENTATION #
############################################################
class RegistersManager:
def __init__(self, interface, store):
self.interface = interface
self.data_store = store.Elements
self.settings_store = store.Settings
def get_all_peripherals(self):
"""Returns all the peripherals present in the database
Returns:
List:list of peripherals in the database
"""
return self.data_store.get_peripherals_dict()
def get_peripherals_digest(self):
"""Returns an hash of the jsonified peripherals list
Returns:
str:Digest of the peripherals present in the database
"""
return self.data_store.get_peripherals_hash()
def get_registers_descriptions(self, peripheral_name, username):
"""Returns the specification for the registers of the specified peripheral
Parameters:
peripheral_name: name of the peripheral whose registers need to be returned
username: username of the request issuer
Returns:
List:list of registers in the peripheral
"""
app = self.settings_store.get_per_user_value('chosen_application', username)
found = False
for peripheral in app['peripherals']:
if peripheral_name in peripheral['peripheral_id']:
found = True
parameters = self.data_store.get_peripheral(peripheral['spec_id'])
base_address = int(peripheral['base_address'], 0)
if not found:
raise ValueError("The component register file was not found")
registers_values = {}
for i in parameters['registers']:
if ('R' in i['direction'] or 'r' in i['direction']) and not current_app.app_mgr.peripheral_is_proxied(
peripheral_name, username):
address = base_address + int(i['offset'], 0)
if i['register_format'] == 'words':
registers_values[i['register_name']] = self.interface.read_register(address)
else:
registers_values[i['register_name']] = self.interface.read_register(address)
else:
registers_values[i['register_name']] = 0
return {'peripheral_name': parameters['peripheral_name'], 'registers': registers_values}
def get_register_value(self, peripheral_name, register_name):
pass
def set_register_value(self, peripheral, register, username):
"""Writes to a specifier register
Parameters:
peripheral: name of the peripheral whose registers need to be returned
register: dictionary containing the register name and value
username: username of the requester
"""
base_address = int(current_app.app_mgr.get_peripheral_base_address(peripheral, username), 0)
if current_app.app_mgr.peripheral_is_proxied(peripheral, username):
proxy_addr = int(current_app.app_mgr.get_peripheral_proxy_address(peripheral, username), 0)
self.__set_proxied_register_value(register, base_address, proxy_addr)
else:
self.__set_direct_register_value(register, base_address)
def bulk_write(self, registers):
""" Perform a bulk register write operations
Parameters:
registers: List of dictionaries containing the details for a single register write
"""
for i in registers:
self.interface.write_register(i['address'], i['value'])
# TODO: REFACTOR THESE METHODS AWAY, PUSHING THIS LOGIC TO THE CLIENT
def __set_direct_register_value(self, register, base_address):
"""Writes to a register that is directly accessible through the CPU bus itself
Parameters:
register: dictionary containing the details of the register write to perform
base_address: base address of the peripheral to write to
"""
periph = register['peripheral']
peripheral_registers = self.data_store.get_peripheral(periph)['registers']
for i in peripheral_registers:
if i['ID'] == register['name'] or i['register_name'] == register['name']:
address = base_address + int(i['offset'], 0)
value = register['value']
print(f'DIRECT WRITE: writen: {value} to register at address: {hex(address)}')
self.interface.write_register(address, value)
def __set_proxied_register_value(self, register, base_address, proxy_addr):
"""Writes to a register that is not directly connected to the bus but needs to be spoken with through a proxy peripheral
Parameters:
register: dictionary containing the details of the register write to perform
base_address: base address of the peripheral to write to
proxy_addr: base address of the proxy peripheral
"""
periph = register['peripheral']
peripheral_registers = self.data_store.get_peripheral(periph)['registers']
for i in peripheral_registers:
if i['ID'] == register['name'] or i['register_name'] == register['name']:
address = base_address + int(i['offset'], 0)
value = register['value']
print(f'PROXY WRITE: writen: {value} to register at address: {hex(address)} through proxy at address: {hex(proxy_addr)}')
self.interface.write_proxied_register(proxy_addr, address, value)
def __split_dword(self, val):
"""Splits a single 32 bit register value to two 16 bit field values
Parameters:
val: register value to be split
Returns:
Tuple: couple of field values
"""
w1 = int(val & 0xffff)
w2 = int((val >> 16) & 0xffff)
return w1, w2
| # Copyright 2021 University of Nottingham Ningbo China
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import current_app, Blueprint, jsonify, request
from flask_restful import Api, Resource
from flask_jwt_extended import jwt_required, get_jwt_identity
from . import role_required
############################################################
# IMPLEMENTATION #
############################################################
registers_manager_bp = Blueprint('regusters_manager', __name__, url_prefix='/registers')
api = Api(registers_manager_bp)
class RegisterValue(Resource):
@jwt_required()
@role_required("operator")
def get(self):
pass
@jwt_required()
@role_required("operator")
def post(self, peripheral):
registers_to_write = request.get_json(force=True)
user = get_jwt_identity()
current_app.register_mgr.set_register_value(peripheral, registers_to_write['payload'], user)
return '200'
class RegisterDescriptions(Resource):
@jwt_required()
@role_required("operator")
def get(self, peripheral):
user = get_jwt_identity()
return jsonify(current_app.register_mgr.get_registers_descriptions(peripheral,user))
@jwt_required()
@role_required("operator")
def post(self):
pass
class PeripheralsSpecs(Resource):
@jwt_required()
@role_required("operator")
def get(self):
return jsonify(current_app.register_mgr.get_all_peripherals())
class PeripheralsDigest(Resource):
@jwt_required()
@role_required("operator")
def get(self):
return current_app.register_mgr.get_peripherals_digest()
class RegistersBulkWrite(Resource):
@jwt_required()
@role_required("operator")
def post(self):
registers_to_write = request.get_json(force=True)
current_app.register_mgr.bulk_write(registers_to_write['payload'])
return '200'
api.add_resource(RegisterValue, '/<string:peripheral>/value')
api.add_resource(RegisterDescriptions, '/<string:peripheral>/descriptions')
api.add_resource(PeripheralsSpecs, '/all_peripheral/descriptions')
api.add_resource(RegistersBulkWrite, '/bulk_write')
api.add_resource(PeripheralsDigest, '/digest')
############################################################
# IMPLEMENTATION #
############################################################
class RegistersManager:
def __init__(self, interface, store):
self.interface = interface
self.data_store = store.Elements
self.settings_store = store.Settings
def get_all_peripherals(self):
"""Returns all the peripherals present in the database
Returns:
List:list of peripherals in the database
"""
return self.data_store.get_peripherals_dict()
def get_peripherals_digest(self):
"""Returns an hash of the jsonified peripherals list
Returns:
str:Digest of the peripherals present in the database
"""
return self.data_store.get_peripherals_hash()
def get_registers_descriptions(self, peripheral_name, username):
"""Returns the specification for the registers of the specified peripheral
Parameters:
peripheral_name: name of the peripheral whose registers need to be returned
username: username of the request issuer
Returns:
List:list of registers in the peripheral
"""
app = self.settings_store.get_per_user_value('chosen_application', username)
found = False
for peripheral in app['peripherals']:
if peripheral_name in peripheral['peripheral_id']:
found = True
parameters = self.data_store.get_peripheral(peripheral['spec_id'])
base_address = int(peripheral['base_address'], 0)
if not found:
raise ValueError("The component register file was not found")
registers_values = {}
for i in parameters['registers']:
if ('R' in i['direction'] or 'r' in i['direction']) and not current_app.app_mgr.peripheral_is_proxied(
peripheral_name, username):
address = base_address + int(i['offset'], 0)
if i['register_format'] == 'words':
registers_values[i['register_name']] = self.interface.read_register(address)
else:
registers_values[i['register_name']] = self.interface.read_register(address)
else:
registers_values[i['register_name']] = 0
return {'peripheral_name': parameters['peripheral_name'], 'registers': registers_values}
def get_register_value(self, peripheral_name, register_name):
pass
def set_register_value(self, peripheral, register, username):
"""Writes to a specifier register
Parameters:
peripheral: name of the peripheral whose registers need to be returned
register: dictionary containing the register name and value
username: username of the requester
"""
base_address = int(current_app.app_mgr.get_peripheral_base_address(peripheral, username), 0)
if current_app.app_mgr.peripheral_is_proxied(peripheral, username):
proxy_addr = int(current_app.app_mgr.get_peripheral_proxy_address(peripheral, username), 0)
self.__set_proxied_register_value(register, base_address, proxy_addr)
else:
self.__set_direct_register_value(register, base_address)
def bulk_write(self, registers):
""" Perform a bulk register write operations
Parameters:
registers: List of dictionaries containing the details for a single register write
"""
for i in registers:
self.interface.write_register(i['address'], i['value'])
# TODO: REFACTOR THESE METHODS AWAY, PUSHING THIS LOGIC TO THE CLIENT
def __set_direct_register_value(self, register, base_address):
"""Writes to a register that is directly accessible through the CPU bus itself
Parameters:
register: dictionary containing the details of the register write to perform
base_address: base address of the peripheral to write to
"""
periph = register['peripheral']
peripheral_registers = self.data_store.get_peripheral(periph)['registers']
for i in peripheral_registers:
if i['ID'] == register['name'] or i['register_name'] == register['name']:
address = base_address + int(i['offset'], 0)
value = register['value']
print(f'DIRECT WRITE: writen: {value} to register at address: {hex(address)}')
self.interface.write_register(address, value)
def __set_proxied_register_value(self, register, base_address, proxy_addr):
"""Writes to a register that is not directly connected to the bus but needs to be spoken with through a proxy peripheral
Parameters:
register: dictionary containing the details of the register write to perform
base_address: base address of the peripheral to write to
proxy_addr: base address of the proxy peripheral
"""
periph = register['peripheral']
peripheral_registers = self.data_store.get_peripheral(periph)['registers']
for i in peripheral_registers:
if i['ID'] == register['name'] or i['register_name'] == register['name']:
address = base_address + int(i['offset'], 0)
value = register['value']
print(f'PROXY WRITE: writen: {value} to register at address: {hex(address)} through proxy at address: {hex(proxy_addr)}')
self.interface.write_proxied_register(proxy_addr, address, value)
def __split_dword(self, val):
"""Splits a single 32 bit register value to two 16 bit field values
Parameters:
val: register value to be split
Returns:
Tuple: couple of field values
"""
w1 = int(val & 0xffff)
w2 = int((val >> 16) & 0xffff)
return w1, w2 | en | 0.70999 | # Copyright 2021 University of Nottingham Ningbo China # Author: <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################ # IMPLEMENTATION # ############################################################ ############################################################ # IMPLEMENTATION # ############################################################ Returns all the peripherals present in the database Returns: List:list of peripherals in the database Returns an hash of the jsonified peripherals list Returns: str:Digest of the peripherals present in the database Returns the specification for the registers of the specified peripheral Parameters: peripheral_name: name of the peripheral whose registers need to be returned username: username of the request issuer Returns: List:list of registers in the peripheral Writes to a specifier register Parameters: peripheral: name of the peripheral whose registers need to be returned register: dictionary containing the register name and value username: username of the requester Perform a bulk register write operations Parameters: registers: List of dictionaries containing the details for a single register write # TODO: REFACTOR THESE METHODS AWAY, PUSHING THIS LOGIC TO THE CLIENT Writes to a register that is directly accessible through the CPU bus itself Parameters: register: dictionary containing the details of the register write to perform base_address: base address of the peripheral to write to Writes to a register that is not directly connected to the bus but needs to be spoken with through a proxy peripheral Parameters: register: dictionary containing the details of the register write to perform base_address: base address of the peripheral to write to proxy_addr: base address of the proxy peripheral Splits a single 32 bit register value to two 16 bit field values Parameters: val: register value to be split Returns: Tuple: couple of field values | 2.272477 | 2 |
modules/odenremote.py | GeorgeIoak/Oden | 0 | 6614361 | #! /usr/bin/env python3
#coding: utf8
# TODO Change this to a class (?)
#import lirc
#import time
import os
import sys
import spidev
from smbus2 import SMBus
#import config
import threading
from evdev import InputDevice, categorize, ecodes
import selectors
from threading import Thread, Event
import asyncio
from queue import Queue
from time import*
from ConfigurationFiles.config import* # File locations for saving states
btnVolUp = 'KEY_VOLUMEUP' #2075 #"vol-up" # 0x1B
btnVolDwn = 'KEY_VOLUMEDOWN' #2076 #"vol-dwn" # 0x1C
btnSrcUp = 'KEY_NEXT' #2071 #"next" # 0x17
btnSrcDwn = 'KEY_PREVIOUS' #2072 #"prev" # 0x18
analogInputs = []
digitalInputs = []
volTable = [2, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 76,
80, 84, 88, 92, 94, 96, 98, 100, 102, 104, 106, 108,
110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130,
132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152,
154, 156, 158, 160, 162, 163, 164, 165, 166, 167, 168,
169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190,
191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
202, 203, 204, 205, 206, 207, 208, 209, 210]
curInput = 0 # What Source Input are we currently at
remCode = '' # Current remote code with toggle bit masked off
curVol = 0
old_vol = dbVol = 0
volStep = 1
volMax = len(volTable) - 1 # PGA2320 range is 0-255 but we'll use a 0-100 lookup table
i2c_port_num = 1
pcf_address = 0x3B # temp address was 0x38 from 0x3B PCF8574A: A0=H, A1=H, A2=L
tyr = [0x20, 0x27] # 2 PCF8574 devices define a Tyr
oden = [0x22, 0x23, 0x24]
digitalBoard = [0x21, 0x39] # PCF8574(A) address set to 001, either or address, not both
phonoBoard = [0x25]
isTyr = False
isOden = False
isDigital = False
isPhono = False
SPI_PORT = 1 # PGA2320 is at /dev/spidev1.0
SPI_DEVICE = 0
# Connect to the I2C Bus
try:
i2cBus = SMBus(i2c_port_num)
except:
print("I2C bus problem")
# Open SPI bus instance for PGA2320
try:
pga2320 = spidev.SpiDev()
pga2320.open(SPI_PORT, SPI_DEVICE)
pga2320.max_speed_hz = 1000000 # PGA2320 max SPI Speed is 6.25MHz
except:
print("Could not connect to SPI1 bus")
global events # Testing global to see if it will pass back to oden.py
selector = selectors.DefaultSelector()
try:
IRsignal = InputDevice('/dev/input/by-path/platform-ir-receiver@12-event')
Rotarysignal = InputDevice('/dev/input/by-path/platform-rotary@17-event')
# This works because InputDevice has a `fileno()` method.
selector.register(IRsignal, selectors.EVENT_READ)
selector.register(Rotarysignal, selectors.EVENT_READ)
events = Queue()
except (FileNotFoundError, PermissionError)as error:
print("Something wrong with IR or Rotary Encoder", error)
# RAM Drive setup on /var/ram
# TODO: need to use RAM Drive until shutting down
# Write volume to file
def save_vol(curVol):
with open( vol, 'w') as f: #f = open('/home/volumio/bladelius/var/vol', 'w')
f.write(str(CurVol))
# Get volume from file
def get_vol():
with open( vol, 'r') as f: #f = open('/home/volumio/bladelius/var/vol', 'r')
a = int(f.read())
return a
# PCF8574 Pin States:
# BAL 1= D0=H,D1=X,D2=X,D3=X,D4=X,D5=L,D6=H,D7=H 0
# BAL 2= D0=H,D1=X,D2=X,D3=X,D4=X,D5=L,D6=H,D7=L 1
# LINE 1 D0=X,D1=L,D2=L,D3=L,D4=H,D5=L,D6=L,D7=X 2
# LINE 2 D0=X,D1=L,D2=L,D3=H,D4=L,D5=L,D6=L,D7=X 3
# LINE 3 D0=X,D1=L,D2=H,D3=L,D4=L,D5=L,D6=L,D7=X 4
# LINE 4 D0=X,D1=H,D2=L,D3=L,D4=L,D5=L,D6=L,D7=X 5
# LINE 5 (TAPE)(LOOP) D0=X,D1=L,D2=L,D3=L,D4=L,D5=H,D6=L,D7=X 6
# DIG/PHONO D0=L,D1=L,D2=L,D3=L,D4=L,D5=H,D6=H,D7=H 7
def bal1(): #0xC1 / 0b1100 0001
i2cBus.write_byte(pcf_address, 0b11000001)
def bal2(): #0x41 / 0b0100 0001
i2cBus.write_byte(pcf_address, 0b01000001)
def line1(): #0x11 / 0b0001 0001
i2cBus.write_byte(pcf_address, 0b00010001)
def line2(): #0x09 / 0b0000 1001
i2cBus.write_byte(pcf_address, 0b00001001)
def line3(): #0x05 / 0b0000 0101
i2cBus.write_byte(pcf_address, 0b00000101)
def line4(): #0x03 / 0b0000 0011
i2cBus.write_byte(pcf_address, 0b00000011)
def line5(): #0x21 / 0b0010 0001
i2cBus.write_byte(pcf_address, 0b00100001)
def digital(): #0xC0 / 0b1100 0000
i2cBus.write_byte(pcf_address, 0b11000000)
switcherDigital = {
0: bal1,
1: bal2,
2: line1,
3: line2,
4: line3,
5: line4,
6: line5,
7: digital
}
def setAnalogInput(theInput):
func = switcherDigital.get(theInput, "whoops")
return func()
def listenRemote():
try:
while True:
global curVol, curInput # Needs to be global so values can be passed back to oden.py
for key, mask in selector.select():
device = key.fileobj
for event in device.read():
if event.type == ecodes.EV_KEY:
events.put(event)
data = categorize(event)
remCode = data.keycode
if data.keystate >= 1: # Only on key down event, 2 is held down
if (remCode == btnVolUp) or (remCode == btnVolDwn):
if (curVol > volMax) and (remCode == btnVolUp):
curVol = volMax
elif (remCode == btnVolUp):
curVol += volStep
if (remCode == btnVolDwn) and (curVol < 0):
curVol = 0
elif (remCode == btnVolDwn):
curVol -= volStep
print("Current volume is: ", curVol)
dbVol = volTable[curVol]
pga2320.writebytes([dbVol, dbVol, dbVol, dbVol]) # 1 PGA2320/channel so 4 writes
if (remCode == btnSrcUp) or (remCode == btnSrcDwn):
if curInput == numInputs and remCode == btnSrcUp:
curInput = 0
else:
if remCode == btnSrcUp:
print("SOURCE + was pressed")
curInput += 1
else:
print("SOURCE - was pressed")
if curInput == 0:
curInput = numInputs
else:
curInput -= 1
setAnalogInput(curInput)
print("Current Input is: ", theInputs[curInput])
if event.type == ecodes.EV_REL:
events.put(event)
curVol += event.value
if curVol < 0:
curVol = 0
elif curVol > volMax:
curVol = volMax
dbVol = volTable[curVol]
pga2320.writebytes([dbVol, dbVol, dbVol, dbVol]) # 1 PGA2320/channel so 4 writes
print("Rotary changed the volume to", curVol)
except Exception as error:
print("Had an IR exception", error)
def cleanup():
pga2320.close()
i2cBus.close()
IRsignal.close()
print("I just finished cleaning up!")
return
| #! /usr/bin/env python3
#coding: utf8
# TODO Change this to a class (?)
#import lirc
#import time
import os
import sys
import spidev
from smbus2 import SMBus
#import config
import threading
from evdev import InputDevice, categorize, ecodes
import selectors
from threading import Thread, Event
import asyncio
from queue import Queue
from time import*
from ConfigurationFiles.config import* # File locations for saving states
btnVolUp = 'KEY_VOLUMEUP' #2075 #"vol-up" # 0x1B
btnVolDwn = 'KEY_VOLUMEDOWN' #2076 #"vol-dwn" # 0x1C
btnSrcUp = 'KEY_NEXT' #2071 #"next" # 0x17
btnSrcDwn = 'KEY_PREVIOUS' #2072 #"prev" # 0x18
analogInputs = []
digitalInputs = []
volTable = [2, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 76,
80, 84, 88, 92, 94, 96, 98, 100, 102, 104, 106, 108,
110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130,
132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152,
154, 156, 158, 160, 162, 163, 164, 165, 166, 167, 168,
169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190,
191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
202, 203, 204, 205, 206, 207, 208, 209, 210]
curInput = 0 # What Source Input are we currently at
remCode = '' # Current remote code with toggle bit masked off
curVol = 0
old_vol = dbVol = 0
volStep = 1
volMax = len(volTable) - 1 # PGA2320 range is 0-255 but we'll use a 0-100 lookup table
i2c_port_num = 1
pcf_address = 0x3B # temp address was 0x38 from 0x3B PCF8574A: A0=H, A1=H, A2=L
tyr = [0x20, 0x27] # 2 PCF8574 devices define a Tyr
oden = [0x22, 0x23, 0x24]
digitalBoard = [0x21, 0x39] # PCF8574(A) address set to 001, either or address, not both
phonoBoard = [0x25]
isTyr = False
isOden = False
isDigital = False
isPhono = False
SPI_PORT = 1 # PGA2320 is at /dev/spidev1.0
SPI_DEVICE = 0
# Connect to the I2C Bus
try:
i2cBus = SMBus(i2c_port_num)
except:
print("I2C bus problem")
# Open SPI bus instance for PGA2320
try:
pga2320 = spidev.SpiDev()
pga2320.open(SPI_PORT, SPI_DEVICE)
pga2320.max_speed_hz = 1000000 # PGA2320 max SPI Speed is 6.25MHz
except:
print("Could not connect to SPI1 bus")
global events # Testing global to see if it will pass back to oden.py
selector = selectors.DefaultSelector()
try:
IRsignal = InputDevice('/dev/input/by-path/platform-ir-receiver@12-event')
Rotarysignal = InputDevice('/dev/input/by-path/platform-rotary@17-event')
# This works because InputDevice has a `fileno()` method.
selector.register(IRsignal, selectors.EVENT_READ)
selector.register(Rotarysignal, selectors.EVENT_READ)
events = Queue()
except (FileNotFoundError, PermissionError)as error:
print("Something wrong with IR or Rotary Encoder", error)
# RAM Drive setup on /var/ram
# TODO: need to use RAM Drive until shutting down
# Write volume to file
def save_vol(curVol):
with open( vol, 'w') as f: #f = open('/home/volumio/bladelius/var/vol', 'w')
f.write(str(CurVol))
# Get volume from file
def get_vol():
with open( vol, 'r') as f: #f = open('/home/volumio/bladelius/var/vol', 'r')
a = int(f.read())
return a
# PCF8574 Pin States:
# BAL 1= D0=H,D1=X,D2=X,D3=X,D4=X,D5=L,D6=H,D7=H 0
# BAL 2= D0=H,D1=X,D2=X,D3=X,D4=X,D5=L,D6=H,D7=L 1
# LINE 1 D0=X,D1=L,D2=L,D3=L,D4=H,D5=L,D6=L,D7=X 2
# LINE 2 D0=X,D1=L,D2=L,D3=H,D4=L,D5=L,D6=L,D7=X 3
# LINE 3 D0=X,D1=L,D2=H,D3=L,D4=L,D5=L,D6=L,D7=X 4
# LINE 4 D0=X,D1=H,D2=L,D3=L,D4=L,D5=L,D6=L,D7=X 5
# LINE 5 (TAPE)(LOOP) D0=X,D1=L,D2=L,D3=L,D4=L,D5=H,D6=L,D7=X 6
# DIG/PHONO D0=L,D1=L,D2=L,D3=L,D4=L,D5=H,D6=H,D7=H 7
def bal1(): #0xC1 / 0b1100 0001
i2cBus.write_byte(pcf_address, 0b11000001)
def bal2(): #0x41 / 0b0100 0001
i2cBus.write_byte(pcf_address, 0b01000001)
def line1(): #0x11 / 0b0001 0001
i2cBus.write_byte(pcf_address, 0b00010001)
def line2(): #0x09 / 0b0000 1001
i2cBus.write_byte(pcf_address, 0b00001001)
def line3(): #0x05 / 0b0000 0101
i2cBus.write_byte(pcf_address, 0b00000101)
def line4(): #0x03 / 0b0000 0011
i2cBus.write_byte(pcf_address, 0b00000011)
def line5(): #0x21 / 0b0010 0001
i2cBus.write_byte(pcf_address, 0b00100001)
def digital(): #0xC0 / 0b1100 0000
i2cBus.write_byte(pcf_address, 0b11000000)
switcherDigital = {
0: bal1,
1: bal2,
2: line1,
3: line2,
4: line3,
5: line4,
6: line5,
7: digital
}
def setAnalogInput(theInput):
func = switcherDigital.get(theInput, "whoops")
return func()
def listenRemote():
try:
while True:
global curVol, curInput # Needs to be global so values can be passed back to oden.py
for key, mask in selector.select():
device = key.fileobj
for event in device.read():
if event.type == ecodes.EV_KEY:
events.put(event)
data = categorize(event)
remCode = data.keycode
if data.keystate >= 1: # Only on key down event, 2 is held down
if (remCode == btnVolUp) or (remCode == btnVolDwn):
if (curVol > volMax) and (remCode == btnVolUp):
curVol = volMax
elif (remCode == btnVolUp):
curVol += volStep
if (remCode == btnVolDwn) and (curVol < 0):
curVol = 0
elif (remCode == btnVolDwn):
curVol -= volStep
print("Current volume is: ", curVol)
dbVol = volTable[curVol]
pga2320.writebytes([dbVol, dbVol, dbVol, dbVol]) # 1 PGA2320/channel so 4 writes
if (remCode == btnSrcUp) or (remCode == btnSrcDwn):
if curInput == numInputs and remCode == btnSrcUp:
curInput = 0
else:
if remCode == btnSrcUp:
print("SOURCE + was pressed")
curInput += 1
else:
print("SOURCE - was pressed")
if curInput == 0:
curInput = numInputs
else:
curInput -= 1
setAnalogInput(curInput)
print("Current Input is: ", theInputs[curInput])
if event.type == ecodes.EV_REL:
events.put(event)
curVol += event.value
if curVol < 0:
curVol = 0
elif curVol > volMax:
curVol = volMax
dbVol = volTable[curVol]
pga2320.writebytes([dbVol, dbVol, dbVol, dbVol]) # 1 PGA2320/channel so 4 writes
print("Rotary changed the volume to", curVol)
except Exception as error:
print("Had an IR exception", error)
def cleanup():
pga2320.close()
i2cBus.close()
IRsignal.close()
print("I just finished cleaning up!")
return
| en | 0.559731 | #! /usr/bin/env python3 #coding: utf8 # TODO Change this to a class (?) #import lirc #import time #import config # File locations for saving states #2075 #"vol-up" # 0x1B #2076 #"vol-dwn" # 0x1C #2071 #"next" # 0x17 #2072 #"prev" # 0x18 # What Source Input are we currently at # Current remote code with toggle bit masked off # PGA2320 range is 0-255 but we'll use a 0-100 lookup table # temp address was 0x38 from 0x3B PCF8574A: A0=H, A1=H, A2=L # 2 PCF8574 devices define a Tyr # PCF8574(A) address set to 001, either or address, not both # PGA2320 is at /dev/spidev1.0 # Connect to the I2C Bus # Open SPI bus instance for PGA2320 # PGA2320 max SPI Speed is 6.25MHz # Testing global to see if it will pass back to oden.py # This works because InputDevice has a `fileno()` method. # RAM Drive setup on /var/ram # TODO: need to use RAM Drive until shutting down # Write volume to file #f = open('/home/volumio/bladelius/var/vol', 'w') # Get volume from file #f = open('/home/volumio/bladelius/var/vol', 'r') # PCF8574 Pin States: # BAL 1= D0=H,D1=X,D2=X,D3=X,D4=X,D5=L,D6=H,D7=H 0 # BAL 2= D0=H,D1=X,D2=X,D3=X,D4=X,D5=L,D6=H,D7=L 1 # LINE 1 D0=X,D1=L,D2=L,D3=L,D4=H,D5=L,D6=L,D7=X 2 # LINE 2 D0=X,D1=L,D2=L,D3=H,D4=L,D5=L,D6=L,D7=X 3 # LINE 3 D0=X,D1=L,D2=H,D3=L,D4=L,D5=L,D6=L,D7=X 4 # LINE 4 D0=X,D1=H,D2=L,D3=L,D4=L,D5=L,D6=L,D7=X 5 # LINE 5 (TAPE)(LOOP) D0=X,D1=L,D2=L,D3=L,D4=L,D5=H,D6=L,D7=X 6 # DIG/PHONO D0=L,D1=L,D2=L,D3=L,D4=L,D5=H,D6=H,D7=H 7 #0xC1 / 0b1100 0001 #0x41 / 0b0100 0001 #0x11 / 0b0001 0001 #0x09 / 0b0000 1001 #0x05 / 0b0000 0101 #0x03 / 0b0000 0011 #0x21 / 0b0010 0001 #0xC0 / 0b1100 0000 # Needs to be global so values can be passed back to oden.py # Only on key down event, 2 is held down # 1 PGA2320/channel so 4 writes # 1 PGA2320/channel so 4 writes | 1.84783 | 2 |
frameworks/bridge/declarative_frontend/state_mgmt/build.py | openharmony-gitee-mirror/ace_ace_engine | 0 | 6614362 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import shutil
def main(argv):
#Check if build is release (flag provided from BUILD.gn)
is_release = len(argv) > 3 and argv[3] == "--release"
os.chdir(os.path.abspath(argv[1]))
if not os.path.exists(os.path.abspath(argv[2])):
subprocess.call(["npm", "install"])
script = "build"
if is_release:
script = "build_release"
subprocess.call(["npm", "run", script])
if __name__ == '__main__':
main(sys.argv)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import shutil
def main(argv):
#Check if build is release (flag provided from BUILD.gn)
is_release = len(argv) > 3 and argv[3] == "--release"
os.chdir(os.path.abspath(argv[1]))
if not os.path.exists(os.path.abspath(argv[2])):
subprocess.call(["npm", "install"])
script = "build"
if is_release:
script = "build_release"
subprocess.call(["npm", "run", script])
if __name__ == '__main__':
main(sys.argv) | en | 0.826395 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2021 Huawei Device Co., Ltd. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #Check if build is release (flag provided from BUILD.gn) | 2.391718 | 2 |
extensions/opros.py | Manazius/blacksmith-bot | 3 | 6614363 | # BS mark.1-55
# /* coding: utf-8 */
# BlackSmith plugin
# opros_plugin.py
# Coded by: WitcherGeralt (<EMAIL>)
# http://witcher-team.ucoz.ru/
#-extmanager-extVer:0.5-#
OPROS_USERS = {}
OPROS_CHAT = []
OTOPIC = {}
MODERIN = []
STARTER = None
OPROS_GTS = {'title': False, 'ops': 0, 'tryes': 0, 'work': False}
def opros_stopped():
globals()['STARTER'] = None
globals()['OPROS_CHAT'] = []
globals()['OPROS_USERS'] = {}
globals()['OTOPIC'] = {}
globals()['MODERIN'] = []
globals()['OPROS_GTS'] = {'title': False, 'ops': 0, 'tryes': 0, 'work': False}
def opros_results():
items, ovet, col = '', '', 0
for item in OTOPIC:
if item != 'title':
items += u'За пункт "'+OTOPIC[item]['text']
items += u'" проголосовало %s юзеров\n' % str(OTOPIC[item]['col'])
for usr in OPROS_USERS:
if OPROS_USERS[usr]['text']:
col = col + 1
ovet += str(col)+'. '+usr+': '+OPROS_USERS[usr]['text']+'\n'
if col != 0:
result = (u'\n### Мнения юзеров (высказалось %d юзеров):\n' % col)+ovet
else:
result = u'\n### Мнений не было высказано'
return items+result
def opros_exe(starter):
if OPROS_GTS['work']:
if OPROS_GTS['tryes'] >= 36:
msg(STARTER, (u'Опрос прошел в %s кругов, Итоги:\n\n### С опросом ознакомились %s модеров\n' % (str(OPROS_GTS['tryes']), str(len(MODERIN))))+opros_results())
opros_stopped()
else:
OPROS_GTS['tryes'] += 1
topic = OTOPIC['title']+'\n'
items = OTOPIC.keys()
items.sort()
for item in items:
if item != 'title':
topic += item+'. '+OTOPIC[item]['text']+'\n'
for conf in GROUPCHATS.keys():
if conf not in OPROS_CHAT:
OPROS_CHAT.append(conf)
msg(conf, u'ВНИМАНИЕ!! Глобальный опрос модераторов (By %s):\n\n%s\nДля ответа напишите: "вариант" <№ варианта> или же выскажитесь словами "вариант*" <высказывание> (можно написать и то и то)' % (starter, topic))
else:
for user in GROUPCHATS[conf]:
conf_user = conf+'/'+user
if GROUPCHATS[conf][user]['ishere'] and user_level(conf_user, conf) >= 15:
jid = handler_jid(conf_user)
if not jid in MODERIN:
MODERIN.append(jid)
OPROS_USERS[jid] = {'vote': False, 'mind': False, 'text': None}
msg(conf_user, u'ВНИМАНИЕ!! Глобальный опрос модераторов (By %s):\n\n%s\nДля ответа напишите: "вариант" <№ варианта> или же выскажитесь словами "вариант*" <высказывание> (можно написать и то и то)' % (starter, topic))
try:
composeTimer(1200, opros_exe, opros_exe.func_name, (starter,)).start()
except:
pass
def handler_opros(type, source, body):
if body:
body = body.lower()
if OPROS_GTS['ops'] <= 1 or not OPROS_GTS['title']:
reply(type, source, u'Сначала дополни опрос (заголовок обязателен и минимум 2 пункта)')
elif body in [u'старт', 'start']:
if not OPROS_GTS['work']:
OPROS_GTS['work'] = True
globals()['STARTER'] = handler_jid(source[0])
opros_exe(source[2])
reply(type, source, u'Опрос стартовал')
else:
reply(type, source, u'Опрос уже был запущен')
elif body in [u'стоп', 'stop']:
if OPROS_GTS['work']:
if type == 'public':
reply(type, source, u'Опрос остановлен! Результат ищи в привате.')
else:
reply(type, source, u'Опрос остановлен!')
time.sleep(2)
msg(source[0], (u'Опрос прошел в %s кругов, Итоги:\n\n### С опросом ознакомились %s модеров\n' % (str(OPROS_GTS['tryes']), str(len(MODERIN))))+opros_results())
opros_stopped()
else:
reply(type, source, u'А он и не был запущен')
else:
reply(type, source, u'инвалид синтакс')
else:
if OPROS_GTS['work']:
if type == 'public':
reply(type, source, u'смотри в приват')
reply('private', source, (u'Прошло %s кругов опроса, Итоги на данный момент:\n\n### С опросом ознакомились %s модеров\n' % (str(OPROS_GTS['tryes']), str(len(MODERIN))))+opros_results())
else:
reply(type, source, u'сейчас не идёт опроса')
def handler_opros_base(type, source, body):
if body:
if not OPROS_GTS['work']:
args = body.split()
if len(args) >= 2:
number = args[0].strip()
text = body[(body.find(' ') + 1):].strip()
if number.lower() in [u'заголовок', u'титл']:
OTOPIC['title'] = text
OPROS_GTS['title'] = True
repl = u'Тайтл установлен'
elif check_number(number):
if number not in OTOPIC:
OPROS_GTS['ops'] += 1
OTOPIC[number] = {'col': 0, 'text': text}
repl = u'пункт опроса добавлен'
else:
repl = u'помоему это не число'
else:
repl = u'инвалид синтакс'
else:
repl = u'Во время активного вопроса нельзя добавлять пункты'
else:
repl = u'Боди опроса:\n'
if OTOPIC.has_key('title'):
repl += OTOPIC['title']+'\n'
conf = OTOPIC.keys()
conf.sort()
for l in conf:
if l != 'title':
repl += l+'. '+OTOPIC[l]['text']+'\n'
reply(type, source, repl)
def handler_opros_otvet(type, source, body):
if OPROS_GTS['work']:
jid = handler_jid(source[0])
if jid not in OPROS_USERS:
MODERIN.append(jid)
OPROS_USERS[jid] = {'vote': False, 'mind': False, 'text': None}
if OPROS_USERS[jid]['vote']:
repl = u'Ты уже выбирал пункт'
elif body:
if body in OTOPIC and body != 'title':
OPROS_USERS[jid]['vote'] = True
OTOPIC[body]['col'] += 1
repl = u'ваш голос учтён'
else:
repl = u'нет такого пункта'
else:
repl = u'за что голосуеш то?'
else:
repl = u'Сейчас нет опроса'
reply(type, source, repl)
def handler_opros_mind(type, source, body):
if OPROS_GTS['work']:
jid = handler_jid(source[0])
if jid not in OPROS_USERS:
MODERIN.append(jid)
OPROS_USERS[jid] = {'vote': False, 'mind': False, 'text': None}
if OPROS_USERS[jid]['mind']:
repl = u'Ты уже высказал мнение'
elif body:
if len(body) <= 256:
OPROS_USERS[jid] = {'vote': OPROS_USERS[jid]['vote'], 'mind': True, 'text': body}
repl = u'ваш голос учтён'
else:
repl = u'слишком много текста (256 знаков предел)'
else:
repl = u'Ну и что же ты думаеш по этому вопросу?'
else:
repl = u'Сейчас нет опроса'
reply(type, source, repl)
command_handler(handler_opros, 100, "opros")
command_handler(handler_opros_base, 100, "opros")
command_handler(handler_opros_otvet, 15, "opros")
command_handler(handler_opros_mind, 15, "opros")
| # BS mark.1-55
# /* coding: utf-8 */
# BlackSmith plugin
# opros_plugin.py
# Coded by: WitcherGeralt (<EMAIL>)
# http://witcher-team.ucoz.ru/
#-extmanager-extVer:0.5-#
OPROS_USERS = {}
OPROS_CHAT = []
OTOPIC = {}
MODERIN = []
STARTER = None
OPROS_GTS = {'title': False, 'ops': 0, 'tryes': 0, 'work': False}
def opros_stopped():
globals()['STARTER'] = None
globals()['OPROS_CHAT'] = []
globals()['OPROS_USERS'] = {}
globals()['OTOPIC'] = {}
globals()['MODERIN'] = []
globals()['OPROS_GTS'] = {'title': False, 'ops': 0, 'tryes': 0, 'work': False}
def opros_results():
items, ovet, col = '', '', 0
for item in OTOPIC:
if item != 'title':
items += u'За пункт "'+OTOPIC[item]['text']
items += u'" проголосовало %s юзеров\n' % str(OTOPIC[item]['col'])
for usr in OPROS_USERS:
if OPROS_USERS[usr]['text']:
col = col + 1
ovet += str(col)+'. '+usr+': '+OPROS_USERS[usr]['text']+'\n'
if col != 0:
result = (u'\n### Мнения юзеров (высказалось %d юзеров):\n' % col)+ovet
else:
result = u'\n### Мнений не было высказано'
return items+result
def opros_exe(starter):
if OPROS_GTS['work']:
if OPROS_GTS['tryes'] >= 36:
msg(STARTER, (u'Опрос прошел в %s кругов, Итоги:\n\n### С опросом ознакомились %s модеров\n' % (str(OPROS_GTS['tryes']), str(len(MODERIN))))+opros_results())
opros_stopped()
else:
OPROS_GTS['tryes'] += 1
topic = OTOPIC['title']+'\n'
items = OTOPIC.keys()
items.sort()
for item in items:
if item != 'title':
topic += item+'. '+OTOPIC[item]['text']+'\n'
for conf in GROUPCHATS.keys():
if conf not in OPROS_CHAT:
OPROS_CHAT.append(conf)
msg(conf, u'ВНИМАНИЕ!! Глобальный опрос модераторов (By %s):\n\n%s\nДля ответа напишите: "вариант" <№ варианта> или же выскажитесь словами "вариант*" <высказывание> (можно написать и то и то)' % (starter, topic))
else:
for user in GROUPCHATS[conf]:
conf_user = conf+'/'+user
if GROUPCHATS[conf][user]['ishere'] and user_level(conf_user, conf) >= 15:
jid = handler_jid(conf_user)
if not jid in MODERIN:
MODERIN.append(jid)
OPROS_USERS[jid] = {'vote': False, 'mind': False, 'text': None}
msg(conf_user, u'ВНИМАНИЕ!! Глобальный опрос модераторов (By %s):\n\n%s\nДля ответа напишите: "вариант" <№ варианта> или же выскажитесь словами "вариант*" <высказывание> (можно написать и то и то)' % (starter, topic))
try:
composeTimer(1200, opros_exe, opros_exe.func_name, (starter,)).start()
except:
pass
def handler_opros(type, source, body):
if body:
body = body.lower()
if OPROS_GTS['ops'] <= 1 or not OPROS_GTS['title']:
reply(type, source, u'Сначала дополни опрос (заголовок обязателен и минимум 2 пункта)')
elif body in [u'старт', 'start']:
if not OPROS_GTS['work']:
OPROS_GTS['work'] = True
globals()['STARTER'] = handler_jid(source[0])
opros_exe(source[2])
reply(type, source, u'Опрос стартовал')
else:
reply(type, source, u'Опрос уже был запущен')
elif body in [u'стоп', 'stop']:
if OPROS_GTS['work']:
if type == 'public':
reply(type, source, u'Опрос остановлен! Результат ищи в привате.')
else:
reply(type, source, u'Опрос остановлен!')
time.sleep(2)
msg(source[0], (u'Опрос прошел в %s кругов, Итоги:\n\n### С опросом ознакомились %s модеров\n' % (str(OPROS_GTS['tryes']), str(len(MODERIN))))+opros_results())
opros_stopped()
else:
reply(type, source, u'А он и не был запущен')
else:
reply(type, source, u'инвалид синтакс')
else:
if OPROS_GTS['work']:
if type == 'public':
reply(type, source, u'смотри в приват')
reply('private', source, (u'Прошло %s кругов опроса, Итоги на данный момент:\n\n### С опросом ознакомились %s модеров\n' % (str(OPROS_GTS['tryes']), str(len(MODERIN))))+opros_results())
else:
reply(type, source, u'сейчас не идёт опроса')
def handler_opros_base(type, source, body):
if body:
if not OPROS_GTS['work']:
args = body.split()
if len(args) >= 2:
number = args[0].strip()
text = body[(body.find(' ') + 1):].strip()
if number.lower() in [u'заголовок', u'титл']:
OTOPIC['title'] = text
OPROS_GTS['title'] = True
repl = u'Тайтл установлен'
elif check_number(number):
if number not in OTOPIC:
OPROS_GTS['ops'] += 1
OTOPIC[number] = {'col': 0, 'text': text}
repl = u'пункт опроса добавлен'
else:
repl = u'помоему это не число'
else:
repl = u'инвалид синтакс'
else:
repl = u'Во время активного вопроса нельзя добавлять пункты'
else:
repl = u'Боди опроса:\n'
if OTOPIC.has_key('title'):
repl += OTOPIC['title']+'\n'
conf = OTOPIC.keys()
conf.sort()
for l in conf:
if l != 'title':
repl += l+'. '+OTOPIC[l]['text']+'\n'
reply(type, source, repl)
def handler_opros_otvet(type, source, body):
if OPROS_GTS['work']:
jid = handler_jid(source[0])
if jid not in OPROS_USERS:
MODERIN.append(jid)
OPROS_USERS[jid] = {'vote': False, 'mind': False, 'text': None}
if OPROS_USERS[jid]['vote']:
repl = u'Ты уже выбирал пункт'
elif body:
if body in OTOPIC and body != 'title':
OPROS_USERS[jid]['vote'] = True
OTOPIC[body]['col'] += 1
repl = u'ваш голос учтён'
else:
repl = u'нет такого пункта'
else:
repl = u'за что голосуеш то?'
else:
repl = u'Сейчас нет опроса'
reply(type, source, repl)
def handler_opros_mind(type, source, body):
if OPROS_GTS['work']:
jid = handler_jid(source[0])
if jid not in OPROS_USERS:
MODERIN.append(jid)
OPROS_USERS[jid] = {'vote': False, 'mind': False, 'text': None}
if OPROS_USERS[jid]['mind']:
repl = u'Ты уже высказал мнение'
elif body:
if len(body) <= 256:
OPROS_USERS[jid] = {'vote': OPROS_USERS[jid]['vote'], 'mind': True, 'text': body}
repl = u'ваш голос учтён'
else:
repl = u'слишком много текста (256 знаков предел)'
else:
repl = u'Ну и что же ты думаеш по этому вопросу?'
else:
repl = u'Сейчас нет опроса'
reply(type, source, repl)
command_handler(handler_opros, 100, "opros")
command_handler(handler_opros_base, 100, "opros")
command_handler(handler_opros_otvet, 15, "opros")
command_handler(handler_opros_mind, 15, "opros")
| ru | 0.640638 | # BS mark.1-55 # /* coding: utf-8 */ # BlackSmith plugin # opros_plugin.py # Coded by: WitcherGeralt (<EMAIL>) # http://witcher-team.ucoz.ru/ #-extmanager-extVer:0.5-# ### Мнения юзеров (высказалось %d юзеров):\n' % col)+ovet ### Мнений не было высказано' ### С опросом ознакомились %s модеров\n' % (str(OPROS_GTS['tryes']), str(len(MODERIN))))+opros_results()) ### С опросом ознакомились %s модеров\n' % (str(OPROS_GTS['tryes']), str(len(MODERIN))))+opros_results()) ### С опросом ознакомились %s модеров\n' % (str(OPROS_GTS['tryes']), str(len(MODERIN))))+opros_results()) | 2.014819 | 2 |
generate_lerp_video.py | samsniderheld/SpriteGan2 | 0 | 6614364 | import argparse
import os
import tensorflow as tf
from Model.generator import *
from Model.ops import *
import cv2
import shutil
import numpy as np
from tqdm import tqdm
def parse_args():
desc = "create a lerp video"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--step', type=int, default=99000, help='The step to load the model from ')
parser.add_argument('--num_lerps', type=int, default=10, help='How many lerps to do')
parser.add_argument('--noise_dim', type=int, default=512, help='The size of the latent vector')
parser.add_argument('--img_dim', type=int, default=256, help='The dimension of the image')
return parser.parse_args()
if os.path.exists("Results/GeneratedImages"):
shutil.rmtree("Results/GeneratedImages")
os.makedirs('Results/GeneratedImages')
else:
os.makedirs('Results/GeneratedImages')
args = parse_args()
idx=0
num_lerps = args.num_lerps
noise_dim = args.noise_dim
img_dim = args.img_dim
step = args.step
generator = make_style_gan_generator(img_dim, noise_dim)
generator.load_weights("SavedModels/generator_weights_at_step_{}.h5".format(step))
noise_vector_1 = noise(1,noise_dim)
noise_image_1 = noise_image(1,img_dim)
noise_vector_2 = noise(1,noise_dim)
noise_image_2 = noise_image(1,img_dim)
linX = list(np.linspace(0, 1, 50))
for i in tqdm(range(0,num_lerps)):
for x in linX:
frame = None
#use a linear interpolater
lerped_vector = noise_vector_1 * (1-x) + noise_vector_2 * (x)
noise_vector_list = [lerped_vector] * int(log2(img_dim) - 1)
lerped_noise_image = noise_image_1 * (1-x) + noise_image_2 * (x)
image = generator.predict(noise_vector_list + [lerped_noise_image], batch_size = 1)
resizedImage = cv2.resize(image[0]*255., dsize=(512, 512), interpolation=cv2.INTER_NEAREST)
cv2.imwrite('Results/GeneratedImages/image{}.png'.format('%04d'%idx), cv2.cvtColor(resizedImage, cv2.COLOR_RGB2BGR))
idx+=1
noise_vector_1 = noise_vector_2
noise_image_1 = noise_image_2
noise_vector_2 = noise(1,noise_dim)
noise_image_2 = noise_image(1,img_dim)
| import argparse
import os
import tensorflow as tf
from Model.generator import *
from Model.ops import *
import cv2
import shutil
import numpy as np
from tqdm import tqdm
def parse_args():
desc = "create a lerp video"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--step', type=int, default=99000, help='The step to load the model from ')
parser.add_argument('--num_lerps', type=int, default=10, help='How many lerps to do')
parser.add_argument('--noise_dim', type=int, default=512, help='The size of the latent vector')
parser.add_argument('--img_dim', type=int, default=256, help='The dimension of the image')
return parser.parse_args()
if os.path.exists("Results/GeneratedImages"):
shutil.rmtree("Results/GeneratedImages")
os.makedirs('Results/GeneratedImages')
else:
os.makedirs('Results/GeneratedImages')
args = parse_args()
idx=0
num_lerps = args.num_lerps
noise_dim = args.noise_dim
img_dim = args.img_dim
step = args.step
generator = make_style_gan_generator(img_dim, noise_dim)
generator.load_weights("SavedModels/generator_weights_at_step_{}.h5".format(step))
noise_vector_1 = noise(1,noise_dim)
noise_image_1 = noise_image(1,img_dim)
noise_vector_2 = noise(1,noise_dim)
noise_image_2 = noise_image(1,img_dim)
linX = list(np.linspace(0, 1, 50))
for i in tqdm(range(0,num_lerps)):
for x in linX:
frame = None
#use a linear interpolater
lerped_vector = noise_vector_1 * (1-x) + noise_vector_2 * (x)
noise_vector_list = [lerped_vector] * int(log2(img_dim) - 1)
lerped_noise_image = noise_image_1 * (1-x) + noise_image_2 * (x)
image = generator.predict(noise_vector_list + [lerped_noise_image], batch_size = 1)
resizedImage = cv2.resize(image[0]*255., dsize=(512, 512), interpolation=cv2.INTER_NEAREST)
cv2.imwrite('Results/GeneratedImages/image{}.png'.format('%04d'%idx), cv2.cvtColor(resizedImage, cv2.COLOR_RGB2BGR))
idx+=1
noise_vector_1 = noise_vector_2
noise_image_1 = noise_image_2
noise_vector_2 = noise(1,noise_dim)
noise_image_2 = noise_image(1,img_dim)
| it | 0.223716 | #use a linear interpolater | 2.530354 | 3 |
gan/train.py | houssamzenati/Generative-Adverserial-Network-MNIST-CIPHAR10-Tensorflow | 8 | 6614365 | # Train DCGAN model on CIFAR-10 data or other specified data
# Originally written by <NAME>
import tensorflow as tf
import logging
import importlib
from data import utilities
logger = logging.getLogger("gan.train")
# Create CIFAR-10 input
BATCH_SIZE = 32
def train(dataset, except_class_to_ignore, stop=300000):
'''
Trains the autoencoder on all dataset except the class/digit considered
anomalous.
Args:
dataset (str): name of the dataset, mnist or cifar10
except_class_to_ignore (int): int in range 0 to 10, is the class/digit
on which the neural net is not trained
'''
dcgan = importlib.import_module('gan.dcgan_{}'.format(dataset))
data = importlib.import_module('data.{}'.format(dataset))
logger.warn("The gan is training on {}, \
ignoring the class {}".format(dataset, except_class_to_ignore))
data_generator = map((lambda inp: (inp[0]*2. - 1., inp[1])), utilities. \
infinite_generator(data.get_train(except_class_to_ignore), BATCH_SIZE))
# Input images
input_placeholder = tf.placeholder(tf.float32,
shape=data.get_shape_input(),
name="input")
# Sample noise from random normal distribution
random_z = tf.random_normal([BATCH_SIZE, 100], mean=0.0, stddev=1.0,
name='random_z')
# Generate images with generator
generator = dcgan.generator(random_z, is_training=True, name='generator')
# Pass real and fake images into discriminator separately
real_discriminator = dcgan.discriminator(input_placeholder, is_training=True,
name='discriminator')
fake_discriminator = dcgan.discriminator(generator, is_training=True,
reuse=True, name='discriminator')
# Calculate seperate losses for discriminator with real and fake images
real_discriminator_loss = tf.losses.sigmoid_cross_entropy(
tf.constant(1, shape=[BATCH_SIZE]),
real_discriminator,
scope='real_discriminator_loss')
fake_discriminator_loss = tf.losses.sigmoid_cross_entropy(
tf.constant(0, shape=[BATCH_SIZE]),
fake_discriminator,
scope='fake_discriminator_loss')
# Add discriminator losses
discriminator_loss = real_discriminator_loss + fake_discriminator_loss
# Calculate loss for generator by flipping label on discriminator output
generator_loss = tf.losses.sigmoid_cross_entropy(
tf.constant(1, shape=[BATCH_SIZE]),
fake_discriminator,
scope='generator_loss')
# Add summaries to visualise output images and losses
summary_discriminator = tf.summary.merge([ \
tf.summary.scalar('summary/real_discriminator_loss', real_discriminator_loss), \
tf.summary.scalar('summary/fake_discriminator_loss', fake_discriminator_loss), \
tf.summary.scalar('summary/discriminator_loss', discriminator_loss)])
input_visualisation = tf.cast(((input_placeholder / 2.0) + 0.5) * 255.0, tf.uint8)
generator_visualisation = tf.cast(((generator / 2.0) + 0.5) * 255.0, tf.uint8)
summary_input = tf.summary.image('summary/input',
input_visualisation, max_outputs=3)
summary_generator = tf.summary.merge([ \
tf.summary.image('summary/generator', generator_visualisation, max_outputs=3), \
tf.summary.scalar('summary/generator_loss', generator_loss)])
# Get discriminator and generator variables to train separately
discriminator_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='discriminator')
generator_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='generator')
# Get discriminator and generator update ops
discriminator_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
scope='discriminator')
generator_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
scope='generator')
# Train discriminator first followed by generator
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.control_dependencies(discriminator_update_ops):
train_discriminator = tf.train. \
AdamOptimizer(learning_rate=0.0002, beta1=0.5). \
minimize(discriminator_loss,
var_list=discriminator_variables)
with tf.control_dependencies(generator_update_ops):
train_generator = tf.train. \
AdamOptimizer(learning_rate=0.0002, beta1=0.5). \
minimize(generator_loss, global_step=global_step,
var_list=generator_variables)
# We disable automatic summaries here, because the automatic system assumes that
# any time you run any part of the graph, you will be providing values for _all_
# summaries:
logdir = "gan/train_logs/{}/{}". \
format(dataset,except_class_to_ignore)
sv = tf.train.Supervisor(logdir=logdir, global_step=global_step,
save_summaries_secs=None, save_model_secs=120)
batch = 0
with sv.managed_session() as sess:
# Set up tensorboard logging:
logwriter = tf.summary.FileWriter(logdir, sess.graph)
while not sv.should_stop() and batch < stop:
if batch > 0 and batch % 100 == 0:
logger.info('Step {}.'.format(batch))
inp, _ = next(data_generator)
(_, sum_dis) = sess.run((train_discriminator, summary_discriminator),
feed_dict={input_placeholder: inp})
logwriter.add_summary(sum_dis, batch)
(_, sum_gen) = sess.run((train_generator, summary_generator))
logwriter.add_summary(sum_gen, batch)
s = sess.run(summary_input, feed_dict={input_placeholder: inp})
logwriter.add_summary(s, batch)
batch += 1
def run(dataset, except_class_to_ignore):
'''
Runs the training process
Args:
dataset (str): name of the dataset, mnist or cifar10
except_class_to_ignore (int): int in range 0 to 10, is the class/digit
on which the neural net is not trained
'''
train(dataset, except_class_to_ignore) | # Train DCGAN model on CIFAR-10 data or other specified data
# Originally written by <NAME>
import tensorflow as tf
import logging
import importlib
from data import utilities
logger = logging.getLogger("gan.train")
# Create CIFAR-10 input
BATCH_SIZE = 32
def train(dataset, except_class_to_ignore, stop=300000):
'''
Trains the autoencoder on all dataset except the class/digit considered
anomalous.
Args:
dataset (str): name of the dataset, mnist or cifar10
except_class_to_ignore (int): int in range 0 to 10, is the class/digit
on which the neural net is not trained
'''
dcgan = importlib.import_module('gan.dcgan_{}'.format(dataset))
data = importlib.import_module('data.{}'.format(dataset))
logger.warn("The gan is training on {}, \
ignoring the class {}".format(dataset, except_class_to_ignore))
data_generator = map((lambda inp: (inp[0]*2. - 1., inp[1])), utilities. \
infinite_generator(data.get_train(except_class_to_ignore), BATCH_SIZE))
# Input images
input_placeholder = tf.placeholder(tf.float32,
shape=data.get_shape_input(),
name="input")
# Sample noise from random normal distribution
random_z = tf.random_normal([BATCH_SIZE, 100], mean=0.0, stddev=1.0,
name='random_z')
# Generate images with generator
generator = dcgan.generator(random_z, is_training=True, name='generator')
# Pass real and fake images into discriminator separately
real_discriminator = dcgan.discriminator(input_placeholder, is_training=True,
name='discriminator')
fake_discriminator = dcgan.discriminator(generator, is_training=True,
reuse=True, name='discriminator')
# Calculate seperate losses for discriminator with real and fake images
real_discriminator_loss = tf.losses.sigmoid_cross_entropy(
tf.constant(1, shape=[BATCH_SIZE]),
real_discriminator,
scope='real_discriminator_loss')
fake_discriminator_loss = tf.losses.sigmoid_cross_entropy(
tf.constant(0, shape=[BATCH_SIZE]),
fake_discriminator,
scope='fake_discriminator_loss')
# Add discriminator losses
discriminator_loss = real_discriminator_loss + fake_discriminator_loss
# Calculate loss for generator by flipping label on discriminator output
generator_loss = tf.losses.sigmoid_cross_entropy(
tf.constant(1, shape=[BATCH_SIZE]),
fake_discriminator,
scope='generator_loss')
# Add summaries to visualise output images and losses
summary_discriminator = tf.summary.merge([ \
tf.summary.scalar('summary/real_discriminator_loss', real_discriminator_loss), \
tf.summary.scalar('summary/fake_discriminator_loss', fake_discriminator_loss), \
tf.summary.scalar('summary/discriminator_loss', discriminator_loss)])
input_visualisation = tf.cast(((input_placeholder / 2.0) + 0.5) * 255.0, tf.uint8)
generator_visualisation = tf.cast(((generator / 2.0) + 0.5) * 255.0, tf.uint8)
summary_input = tf.summary.image('summary/input',
input_visualisation, max_outputs=3)
summary_generator = tf.summary.merge([ \
tf.summary.image('summary/generator', generator_visualisation, max_outputs=3), \
tf.summary.scalar('summary/generator_loss', generator_loss)])
# Get discriminator and generator variables to train separately
discriminator_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='discriminator')
generator_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='generator')
# Get discriminator and generator update ops
discriminator_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
scope='discriminator')
generator_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
scope='generator')
# Train discriminator first followed by generator
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.control_dependencies(discriminator_update_ops):
train_discriminator = tf.train. \
AdamOptimizer(learning_rate=0.0002, beta1=0.5). \
minimize(discriminator_loss,
var_list=discriminator_variables)
with tf.control_dependencies(generator_update_ops):
train_generator = tf.train. \
AdamOptimizer(learning_rate=0.0002, beta1=0.5). \
minimize(generator_loss, global_step=global_step,
var_list=generator_variables)
# We disable automatic summaries here, because the automatic system assumes that
# any time you run any part of the graph, you will be providing values for _all_
# summaries:
logdir = "gan/train_logs/{}/{}". \
format(dataset,except_class_to_ignore)
sv = tf.train.Supervisor(logdir=logdir, global_step=global_step,
save_summaries_secs=None, save_model_secs=120)
batch = 0
with sv.managed_session() as sess:
# Set up tensorboard logging:
logwriter = tf.summary.FileWriter(logdir, sess.graph)
while not sv.should_stop() and batch < stop:
if batch > 0 and batch % 100 == 0:
logger.info('Step {}.'.format(batch))
inp, _ = next(data_generator)
(_, sum_dis) = sess.run((train_discriminator, summary_discriminator),
feed_dict={input_placeholder: inp})
logwriter.add_summary(sum_dis, batch)
(_, sum_gen) = sess.run((train_generator, summary_generator))
logwriter.add_summary(sum_gen, batch)
s = sess.run(summary_input, feed_dict={input_placeholder: inp})
logwriter.add_summary(s, batch)
batch += 1
def run(dataset, except_class_to_ignore):
'''
Runs the training process
Args:
dataset (str): name of the dataset, mnist or cifar10
except_class_to_ignore (int): int in range 0 to 10, is the class/digit
on which the neural net is not trained
'''
train(dataset, except_class_to_ignore) | en | 0.75764 | # Train DCGAN model on CIFAR-10 data or other specified data # Originally written by <NAME> # Create CIFAR-10 input Trains the autoencoder on all dataset except the class/digit considered anomalous. Args: dataset (str): name of the dataset, mnist or cifar10 except_class_to_ignore (int): int in range 0 to 10, is the class/digit on which the neural net is not trained # Input images # Sample noise from random normal distribution # Generate images with generator # Pass real and fake images into discriminator separately # Calculate seperate losses for discriminator with real and fake images # Add discriminator losses # Calculate loss for generator by flipping label on discriminator output # Add summaries to visualise output images and losses # Get discriminator and generator variables to train separately # Get discriminator and generator update ops # Train discriminator first followed by generator # We disable automatic summaries here, because the automatic system assumes that # any time you run any part of the graph, you will be providing values for _all_ # summaries: # Set up tensorboard logging: Runs the training process Args: dataset (str): name of the dataset, mnist or cifar10 except_class_to_ignore (int): int in range 0 to 10, is the class/digit on which the neural net is not trained | 2.971495 | 3 |
pep.lib/proc/procSENTINEL_L2.py | alpha-zou/TAMP | 1 | 6614366 | #!/usr/bin/env python
import numpy.ma as ma
import os,sys, subprocess, math, datetime
from os.path import basename
import numpy as np
import time as tt
import gdal
import h5py
from datetime import timedelta,datetime
from gdalconst import GDT_Float32, GA_Update
from osgeo import ogr, osr
def createImgSENTINEL_L2(fileAbsPath, pixelSize=0.1):
#fileAbsPath='S5P_NRTI_L2__SO2____20080808T224727_20080808T234211_21635_01_021797_00000000T000000.nc'
filename = os.path.basename(fileAbsPath)
instrument = filename.split('_')[0]
product = filename[4:19]
date = datetime.strptime(filename[20:35],'%Y%m%dT%H%M%S').strftime('%Y%m%d.%H%M%S')
outFileList = []
hdf = h5py.File(fileAbsPath, 'r')
driver = gdal.GetDriverByName('GTiff')
coordFillValue = hdf['PRODUCT']['latitude'].attrs['_FillValue'][0]
#searching the last valid column
for i in range(np.array(hdf['PRODUCT']['latitude']).shape[1]):
if np.array(hdf['PRODUCT']['latitude'])[0,i] == coordFillValue:
break
lat = np.array(hdf['PRODUCT']['latitude'][:,:i])
lon = np.array(hdf['PRODUCT']['longitude'][:,:i])
so2_vertical_column = np.array(hdf['PRODUCT']['so2_vertical_column'][0,:,:i]) #/100000000000000000000000000000000000
qa_value = np.array(hdf['PRODUCT']['qa_value'][0,:,:i])
dataType = GDT_Float32
xSize = lat.shape[1]
ySize = lat.shape[0]
no_data = -9999
fillValue = hdf['PRODUCT']['so2_vertical_column'].attrs['_FillValue']
#workingDir='/home/tamp/pep.lib/'
workingDir = os.path.dirname(os.path.realpath(__file__)) + '/../'
timeStart = datetime.strptime(filename[20:35],'%Y%m%dT%H%M%S').strftime('%Y-%m-%dT%H:%M:%SZ')
timeEnd = datetime.strptime(filename[36:51],'%Y%m%dT%H%M%S').strftime('%Y-%m-%dT%H:%M:%SZ')
filenameCoords = 'SENTINEL_Coords_' + date + '.tif'
coord_ds = driver.Create(filenameCoords, xSize, ySize, 2, dataType)
coord_ds.GetRasterBand(1).WriteArray(lat)
coord_ds.GetRasterBand(2).WriteArray(lon)
coord_ds = None
tmpOutFile_so2_vertical_column = instrument + '_SO2_VERTICAL_COLUMN_L2_' + date + '_tmp.tif'
data_ds = driver.Create(tmpOutFile_so2_vertical_column, xSize, ySize, 1, dataType)
band = so2_vertical_column
band[band == fillValue] = no_data
maxValue=np.max(ma.masked_equal(band,no_data))
minValue=np.min(ma.masked_equal(band,no_data))
data_ds.GetRasterBand(1).WriteArray(band)
data_ds = None
window = str(xSize)+'x'+str(ySize)
upper_left = []
lower_right = []
upper_left.append(np.amax(lat))
upper_left.append(np.amin(lon))
lower_right.append(np.amin(lat))
lower_right.append(np.amax(lon))
command_call = [workingDir + 'bin/remap', '-i', tmpOutFile_so2_vertical_column , '-l', str(upper_left[0]), str(upper_left[1]), '-e', str(lower_right[0])+','+ str(lower_right[1]), '-a', filenameCoords, '-s', str(pixelSize), '-n', str(no_data), '-q', '-o', tmpOutFile_so2_vertical_column+ '_mask','-f','60000']
mask_process = subprocess.Popen(command_call, stdout=open(os.devnull, 'wb'))
# remove tmpOutFile_so2_vertical_column++'_mask','-f','60000' from command_call
command_call.pop()
command_call.pop()
command_call.pop()
command_call.append(tmpOutFile_so2_vertical_column+'_remapped')
command_call.append('-c')
coord_process = subprocess.Popen(command_call, stdout=open(os.devnull, 'wb'))
mask_process.wait()
coord_process.wait()
remap_ds = gdal.Open(tmpOutFile_so2_vertical_column+'_remapped', gdal.GA_ReadOnly)
transform_i = remap_ds.GetRasterBand(1).ReadAsArray().transpose()
transform_j = remap_ds.GetRasterBand(2).ReadAsArray().transpose()
mask_ds = gdal.Open(tmpOutFile_so2_vertical_column + '_mask', gdal.GA_ReadOnly)
mask = mask_ds.GetRasterBand(1).ReadAsArray().transpose()
filenameOutput_so2_vertical_column = instrument + '_SO2_VERTICAL_COLUMN_L2_' + date + '.tif'
dst_ds = driver.Create(filenameOutput_so2_vertical_column, transform_j.shape[0], transform_j.shape[1], 1, gdal.GDT_Float32)
outData = np.ones([transform_j.shape[0], transform_j.shape[1]]) * no_data
band = so2_vertical_column
for i in range(outData.shape[0]):
for j in range(outData.shape[1]):
if band[ transform_j[i,j], transform_i[i,j]] != no_data:
outData[i, j] = band[ transform_j[i,j], transform_i[i,j]]
else:
outData[i, j] = band[ transform_j[i,j], transform_i[i,j]]
outData[mask==no_data] = no_data
dst_ds.GetRasterBand(1).WriteArray(outData.transpose())
dst_ds.SetGeoTransform([upper_left[1], pixelSize, 0, upper_left[0], 0, -pixelSize])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
dst_ds.SetProjection(srs.ExportToWkt())
dst_ds.SetMetadataItem('GLOBAL_MAX',str(maxValue))
dst_ds.SetMetadataItem('GLOBAL_MIN',str(minValue))
dst_ds.SetMetadataItem('TIME_END', timeEnd)
dst_ds.SetMetadataItem('TIME_START', timeStart)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.GetRasterBand(1).ComputeStatistics(False)
dst_ds = None
filenameOutput_qa_value = instrument + '_SO2_QA_VALUE_L2_' + date + '.tif'
dst_ds = driver.Create(filenameOutput_qa_value, transform_j.shape[0], transform_j.shape[1], 1, gdal.GDT_Float32)
outData = np.ones([transform_j.shape[0], transform_j.shape[1]]) * no_data
band = qa_value
for i in range(outData.shape[0]):
for j in range(outData.shape[1]):
if band[ transform_j[i,j], transform_i[i,j]] != no_data:
outData[i, j] = band[ transform_j[i,j], transform_i[i,j]]
else:
outData[i, j] = band[ transform_j[i,j], transform_i[i,j]]
outData[mask==no_data] = no_data
dst_ds.GetRasterBand(1).WriteArray(outData.transpose())
dst_ds.SetGeoTransform([upper_left[1], pixelSize, 0, upper_left[0], 0, -pixelSize])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
dst_ds.SetProjection(srs.ExportToWkt())
dst_ds.SetMetadataItem('GLOBAL_MAX',str(np.max(ma.masked_equal(band,no_data))))
dst_ds.SetMetadataItem('GLOBAL_MIN',str(np.min(ma.masked_equal(band,no_data))))
dst_ds.SetMetadataItem('TIME_END', timeEnd)
dst_ds.SetMetadataItem('TIME_START', timeStart)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.GetRasterBand(1).ComputeStatistics(False)
dst_ds = None
outFileList.append(filenameOutput_so2_vertical_column)
outFileList.append(filenameOutput_qa_value)
#os.system('rm ' +tmpOutFile_so2_vertical_column)
#os.system('rm ' + tmpOutFile_so2_vertical_column + '_mask')
#os.system('rm ' + tmpOutFile_so2_vertical_column + '_remapped')
#os.system('rm ' + filenameCoords)
return outFileList
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('\nUsage: %s SENTINEL_file \n' % sys.argv[0] )
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: File %s was not found!\n' % sys.argv[1])
fileAbsPath = sys.argv[1]
createImgSENTINEL_L2(fileAbsPath)
exit(0)
| #!/usr/bin/env python
import numpy.ma as ma
import os,sys, subprocess, math, datetime
from os.path import basename
import numpy as np
import time as tt
import gdal
import h5py
from datetime import timedelta,datetime
from gdalconst import GDT_Float32, GA_Update
from osgeo import ogr, osr
def createImgSENTINEL_L2(fileAbsPath, pixelSize=0.1):
#fileAbsPath='S5P_NRTI_L2__SO2____20080808T224727_20080808T234211_21635_01_021797_00000000T000000.nc'
filename = os.path.basename(fileAbsPath)
instrument = filename.split('_')[0]
product = filename[4:19]
date = datetime.strptime(filename[20:35],'%Y%m%dT%H%M%S').strftime('%Y%m%d.%H%M%S')
outFileList = []
hdf = h5py.File(fileAbsPath, 'r')
driver = gdal.GetDriverByName('GTiff')
coordFillValue = hdf['PRODUCT']['latitude'].attrs['_FillValue'][0]
#searching the last valid column
for i in range(np.array(hdf['PRODUCT']['latitude']).shape[1]):
if np.array(hdf['PRODUCT']['latitude'])[0,i] == coordFillValue:
break
lat = np.array(hdf['PRODUCT']['latitude'][:,:i])
lon = np.array(hdf['PRODUCT']['longitude'][:,:i])
so2_vertical_column = np.array(hdf['PRODUCT']['so2_vertical_column'][0,:,:i]) #/100000000000000000000000000000000000
qa_value = np.array(hdf['PRODUCT']['qa_value'][0,:,:i])
dataType = GDT_Float32
xSize = lat.shape[1]
ySize = lat.shape[0]
no_data = -9999
fillValue = hdf['PRODUCT']['so2_vertical_column'].attrs['_FillValue']
#workingDir='/home/tamp/pep.lib/'
workingDir = os.path.dirname(os.path.realpath(__file__)) + '/../'
timeStart = datetime.strptime(filename[20:35],'%Y%m%dT%H%M%S').strftime('%Y-%m-%dT%H:%M:%SZ')
timeEnd = datetime.strptime(filename[36:51],'%Y%m%dT%H%M%S').strftime('%Y-%m-%dT%H:%M:%SZ')
filenameCoords = 'SENTINEL_Coords_' + date + '.tif'
coord_ds = driver.Create(filenameCoords, xSize, ySize, 2, dataType)
coord_ds.GetRasterBand(1).WriteArray(lat)
coord_ds.GetRasterBand(2).WriteArray(lon)
coord_ds = None
tmpOutFile_so2_vertical_column = instrument + '_SO2_VERTICAL_COLUMN_L2_' + date + '_tmp.tif'
data_ds = driver.Create(tmpOutFile_so2_vertical_column, xSize, ySize, 1, dataType)
band = so2_vertical_column
band[band == fillValue] = no_data
maxValue=np.max(ma.masked_equal(band,no_data))
minValue=np.min(ma.masked_equal(band,no_data))
data_ds.GetRasterBand(1).WriteArray(band)
data_ds = None
window = str(xSize)+'x'+str(ySize)
upper_left = []
lower_right = []
upper_left.append(np.amax(lat))
upper_left.append(np.amin(lon))
lower_right.append(np.amin(lat))
lower_right.append(np.amax(lon))
command_call = [workingDir + 'bin/remap', '-i', tmpOutFile_so2_vertical_column , '-l', str(upper_left[0]), str(upper_left[1]), '-e', str(lower_right[0])+','+ str(lower_right[1]), '-a', filenameCoords, '-s', str(pixelSize), '-n', str(no_data), '-q', '-o', tmpOutFile_so2_vertical_column+ '_mask','-f','60000']
mask_process = subprocess.Popen(command_call, stdout=open(os.devnull, 'wb'))
# remove tmpOutFile_so2_vertical_column++'_mask','-f','60000' from command_call
command_call.pop()
command_call.pop()
command_call.pop()
command_call.append(tmpOutFile_so2_vertical_column+'_remapped')
command_call.append('-c')
coord_process = subprocess.Popen(command_call, stdout=open(os.devnull, 'wb'))
mask_process.wait()
coord_process.wait()
remap_ds = gdal.Open(tmpOutFile_so2_vertical_column+'_remapped', gdal.GA_ReadOnly)
transform_i = remap_ds.GetRasterBand(1).ReadAsArray().transpose()
transform_j = remap_ds.GetRasterBand(2).ReadAsArray().transpose()
mask_ds = gdal.Open(tmpOutFile_so2_vertical_column + '_mask', gdal.GA_ReadOnly)
mask = mask_ds.GetRasterBand(1).ReadAsArray().transpose()
filenameOutput_so2_vertical_column = instrument + '_SO2_VERTICAL_COLUMN_L2_' + date + '.tif'
dst_ds = driver.Create(filenameOutput_so2_vertical_column, transform_j.shape[0], transform_j.shape[1], 1, gdal.GDT_Float32)
outData = np.ones([transform_j.shape[0], transform_j.shape[1]]) * no_data
band = so2_vertical_column
for i in range(outData.shape[0]):
for j in range(outData.shape[1]):
if band[ transform_j[i,j], transform_i[i,j]] != no_data:
outData[i, j] = band[ transform_j[i,j], transform_i[i,j]]
else:
outData[i, j] = band[ transform_j[i,j], transform_i[i,j]]
outData[mask==no_data] = no_data
dst_ds.GetRasterBand(1).WriteArray(outData.transpose())
dst_ds.SetGeoTransform([upper_left[1], pixelSize, 0, upper_left[0], 0, -pixelSize])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
dst_ds.SetProjection(srs.ExportToWkt())
dst_ds.SetMetadataItem('GLOBAL_MAX',str(maxValue))
dst_ds.SetMetadataItem('GLOBAL_MIN',str(minValue))
dst_ds.SetMetadataItem('TIME_END', timeEnd)
dst_ds.SetMetadataItem('TIME_START', timeStart)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.GetRasterBand(1).ComputeStatistics(False)
dst_ds = None
filenameOutput_qa_value = instrument + '_SO2_QA_VALUE_L2_' + date + '.tif'
dst_ds = driver.Create(filenameOutput_qa_value, transform_j.shape[0], transform_j.shape[1], 1, gdal.GDT_Float32)
outData = np.ones([transform_j.shape[0], transform_j.shape[1]]) * no_data
band = qa_value
for i in range(outData.shape[0]):
for j in range(outData.shape[1]):
if band[ transform_j[i,j], transform_i[i,j]] != no_data:
outData[i, j] = band[ transform_j[i,j], transform_i[i,j]]
else:
outData[i, j] = band[ transform_j[i,j], transform_i[i,j]]
outData[mask==no_data] = no_data
dst_ds.GetRasterBand(1).WriteArray(outData.transpose())
dst_ds.SetGeoTransform([upper_left[1], pixelSize, 0, upper_left[0], 0, -pixelSize])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
dst_ds.SetProjection(srs.ExportToWkt())
dst_ds.SetMetadataItem('GLOBAL_MAX',str(np.max(ma.masked_equal(band,no_data))))
dst_ds.SetMetadataItem('GLOBAL_MIN',str(np.min(ma.masked_equal(band,no_data))))
dst_ds.SetMetadataItem('TIME_END', timeEnd)
dst_ds.SetMetadataItem('TIME_START', timeStart)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.GetRasterBand(1).ComputeStatistics(False)
dst_ds = None
outFileList.append(filenameOutput_so2_vertical_column)
outFileList.append(filenameOutput_qa_value)
#os.system('rm ' +tmpOutFile_so2_vertical_column)
#os.system('rm ' + tmpOutFile_so2_vertical_column + '_mask')
#os.system('rm ' + tmpOutFile_so2_vertical_column + '_remapped')
#os.system('rm ' + filenameCoords)
return outFileList
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('\nUsage: %s SENTINEL_file \n' % sys.argv[0] )
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: File %s was not found!\n' % sys.argv[1])
fileAbsPath = sys.argv[1]
createImgSENTINEL_L2(fileAbsPath)
exit(0)
| en | 0.229368 | #!/usr/bin/env python #fileAbsPath='S5P_NRTI_L2__SO2____20080808T224727_20080808T234211_21635_01_021797_00000000T000000.nc' #searching the last valid column #/100000000000000000000000000000000000 #workingDir='/home/tamp/pep.lib/' # remove tmpOutFile_so2_vertical_column++'_mask','-f','60000' from command_call #os.system('rm ' +tmpOutFile_so2_vertical_column) #os.system('rm ' + tmpOutFile_so2_vertical_column + '_mask') #os.system('rm ' + tmpOutFile_so2_vertical_column + '_remapped') #os.system('rm ' + filenameCoords) | 2.044318 | 2 |
python/ValidSudoku.py | JumHorn/leetcode | 1 | 6614367 | from typing import List
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
sudoku = []
for i, row in enumerate(board):
for j, c in enumerate(row):
if c != '.':
sudoku += ((i, c), (c, j), (i//3, j//3, c))
return len(sudoku) == len(set(sudoku))
| from typing import List
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
sudoku = []
for i, row in enumerate(board):
for j, c in enumerate(row):
if c != '.':
sudoku += ((i, c), (c, j), (i//3, j//3, c))
return len(sudoku) == len(set(sudoku))
| none | 1 | 3.470929 | 3 | |
cee_syslog_handler/__init__.py | blue-yonder/cee_syslog_handler | 7 | 6614368 | import json
import logging
import re
import socket
import traceback
from datetime import datetime
from logging.handlers import SYSLOG_UDP_PORT, SysLogHandler
SYSLOG_LEVELS = {
logging.CRITICAL: 2,
logging.ERROR: 3,
logging.WARNING: 4,
logging.INFO: 6,
logging.DEBUG: 7,
}
# The following fields are standard log record fields according to
# http://docs.python.org/library/logging.html#logrecord-attributes
# Hint: exc_text is a cache field used by the logging module
_STANDARD_FIELDS = set(
(
"args",
"asctime",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"levelname",
"levelno",
"lineno",
"module",
"msecs",
"message",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"stack_info",
"thread",
"threadName",
)
)
# The GELF format does not support "_id" fields
_SKIPPED_FIELDS = _STANDARD_FIELDS | set(("id", "_id"))
_SUPPORTED_OUTPUT_TYPES = (str, float, int)
# see http://github.com/hoffmann/graypy/blob/master/graypy/handler.py
def get_full_message(exc_info, message):
return "\n".join(traceback.format_exception(*exc_info)) if exc_info else message
# see http://github.com/hoffmann/graypy/blob/master/graypy/handler.py
def make_message_dict(
record, fqdn, debugging_fields, extra_fields, facility, static_fields
):
message = record.getMessage()
message_dict = {
"host": fqdn,
"short_message": message,
"message": get_full_message(record.exc_info, message),
"timestamp": record.created,
"level": SYSLOG_LEVELS.get(record.levelno, record.levelno),
"facility": facility or record.name,
"source_facility": facility or record.name,
}
if facility is not None:
message_dict.update({"_logger": record.name})
if debugging_fields:
message_dict.update(
{
"file": record.pathname,
"line": record.lineno,
"_function": record.funcName,
"_pid": record.process,
"_thread_name": record.threadName,
"_process_name": record.processName,
}
)
message_dict.update(static_fields)
if extra_fields:
message_dict = get_fields(message_dict, record)
return message_dict
def _to_supported_output_type(value):
if not isinstance(value, _SUPPORTED_OUTPUT_TYPES):
try:
return str(value)
except:
# make logging nothrow
return "value could not be converted to str"
else:
return value
def _custom_key(key):
if key.startswith("_"):
return key
else:
return "_{}".format(key)
def _sanitize_fields(fields):
return {_custom_key(k): _to_supported_output_type(v) for k, v in fields.items()}
# See http://github.com/hoffmann/graypy/blob/master/graypy/handler.py
def get_fields(message_dict, record):
fields = record.__dict__
unskipped_field_names = set(fields.keys()) - _SKIPPED_FIELDS
for key in sorted(unskipped_field_names, reverse=True):
value = fields[key]
message_dict[_custom_key(key)] = _to_supported_output_type(value)
return message_dict
class JsonFormatter(logging.Formatter):
""" A Json Formatter for Python Logging
Usage:
import logging
from cee_syslog_handler import JsonFormatter
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('spam.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(JsonFormatter())
logger.addHandler(fh)
logger.warn("foo")
"""
def __init__(
self,
datefmt="%Y-%m-%dT%H:%M:%S.%f",
debugging_fields=True,
extra_fields=True,
**kwargs
):
"""
:param datefmt: The date formatting
:param debugging_fields: Whether to include file, line number, function, process and thread
id in the log
:param extra_fields: Whether to include extra fields (submitted via the keyword argument
extra to a logger) in the log dictionary
:param facility: If not specified uses the logger's name as facility
:param kwargs: Additional static fields to be injected in each message.
"""
self.datefmt = datefmt
self.debugging_fields = debugging_fields
self.extra_fields = extra_fields
self._static_fields = _sanitize_fields(kwargs)
self._fqdn = socket.getfqdn()
def format(self, record):
record = make_message_dict(
record,
fqdn=self._fqdn,
debugging_fields=self.debugging_fields,
extra_fields=self.extra_fields,
facility=None,
static_fields=self._static_fields,
)
record["timestamp"] = datetime.fromtimestamp(record["timestamp"]).strftime(
self.datefmt
)
del record["short_message"]
del record["source_facility"]
return json.dumps(record)
class CeeSysLogHandler(SysLogHandler):
"""
A syslog handler that formats extra fields as a CEE compatible structured log message. A CEE
compatible message is a syslog log entry that contains a cookie string "@cee:" in its message
part. Everything behind the colon is expected to be a JSON dictionary (containing no lists as
children).
See the following links for the specification of the CEE syntax:
http://www.rsyslog.com/doc/mmpstrucdata.html
http://cee.mitre.org
http://cee.mitre.org/language/1.0-beta1/clt.html#appendix-1-cee-over-syslog-transport-mapping
The handler is compatible to graypy and emits the same structured log messages as the graypy
gelf handler does.
Usage::
import logging
from cee_syslog_handler import CeeSysLogHandler
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
ch = CeeSysLogHandler(address=("10.2.160.20", 514))
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
logger.debug('debug message')
logger.info('info message', extra=dict(foo="bar"))
Expected Ouput on the syslog side::
Sep 9 09:31:11 10.128.4.107 : @cee: {"message": "XXXXXXXXXXXX debug message", "level": 7}
Sep 9 09:31:11 10.128.4.107 : @cee: {"_foo": "bar", "message": "XXXXXXXXXXX info message", "level": 6}
"""
def __init__(
self,
address=("localhost", SYSLOG_UDP_PORT),
socktype=socket.SOCK_DGRAM,
debugging_fields=True,
extra_fields=True,
facility=None,
**kwargs
):
"""
:param address: Address of the syslog server (hostname, port)
:param socktype: If specified (socket.SOCK_DGRAM or socket.SOCK_STREAM) uses UDP or TCP
respectively
:param debugging_fields: Whether to include file, line number, function, process and thread
id in the log
:param extra_fields: Whether to include extra fields (submitted via the keyword argument
extra to a logger) in the log dictionary
:param facility: If not specified uses the logger's name as facility
:param kwargs: Additional static fields to be injected in each message.
"""
super(CeeSysLogHandler, self).__init__(
address, facility=SysLogHandler.LOG_USER, socktype=socktype
)
self._debugging_fields = debugging_fields
self._extra_fields = extra_fields
self._facility = facility
self._static_fields = _sanitize_fields(kwargs)
self._fqdn = socket.getfqdn()
def format(self, record):
message = make_message_dict(
record,
self._fqdn,
self._debugging_fields,
self._extra_fields,
self._facility,
self._static_fields,
)
return ": @cee: %s" % json.dumps(message)
class NamedCeeLogger(CeeSysLogHandler):
def __init__(self, address, socket_type, name):
super(NamedCeeLogger, self).__init__(address, socket_type, name=name)
class RegexFilter(logging.Filter):
"""
This Filter can be used to discard log messages that contain a match of
a given regular expression.
"""
def __init__(self, filter_regex):
super(RegexFilter, self).__init__()
self._pattern = re.compile(filter_regex)
def filter(self, record):
"""
Returns True if the record shall be logged. False otherwise.
https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L607
"""
found = self._pattern.search(record.getMessage())
return not found
class RegexRedactFilter(logging.Filter):
"""
This filter redacts parts of log messages and exception traces
by substituting through a provided regular expression.
Use with caution: with great power comes great responsibility
"""
def __init__(self, filter_regex=None, replace_string="<redacted>"):
super(RegexRedactFilter, self).__init__()
self._pattern = re.compile(filter_regex)
self._replacement = replace_string
# use methods from formatter to safely redact exception
# and stack traces
self._formatter = logging.Formatter()
def redact(self, string):
return re.sub(self._pattern, self._replacement, string)
def filter(self, record):
message = record.getMessage()
if not self._pattern.search(message) and not (
record.exc_info or record.exc_text or record.stack_info
):
return True
record.msg = self.redact(message)
record.args = ()
if record.exc_info:
# exc_info is a tuple based on sys.exc_info()
# (type, value, traceback)
record.msg = (
record.msg
+ "\n"
+ self.redact(self._formatter.formatException(record.exc_info))
)
record.exc_info = None
if record.exc_text:
record.exc_text = self.redact(record.exc_text)
if record.stack_info:
record.msg = (
record.msg
+ "\n"
+ self.redact(self._formatter.formatStack(record.stack_info))
)
record.stack_info = None
return True
| import json
import logging
import re
import socket
import traceback
from datetime import datetime
from logging.handlers import SYSLOG_UDP_PORT, SysLogHandler
SYSLOG_LEVELS = {
logging.CRITICAL: 2,
logging.ERROR: 3,
logging.WARNING: 4,
logging.INFO: 6,
logging.DEBUG: 7,
}
# The following fields are standard log record fields according to
# http://docs.python.org/library/logging.html#logrecord-attributes
# Hint: exc_text is a cache field used by the logging module
_STANDARD_FIELDS = set(
(
"args",
"asctime",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"levelname",
"levelno",
"lineno",
"module",
"msecs",
"message",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"stack_info",
"thread",
"threadName",
)
)
# The GELF format does not support "_id" fields
_SKIPPED_FIELDS = _STANDARD_FIELDS | set(("id", "_id"))
_SUPPORTED_OUTPUT_TYPES = (str, float, int)
# see http://github.com/hoffmann/graypy/blob/master/graypy/handler.py
def get_full_message(exc_info, message):
return "\n".join(traceback.format_exception(*exc_info)) if exc_info else message
# see http://github.com/hoffmann/graypy/blob/master/graypy/handler.py
def make_message_dict(
record, fqdn, debugging_fields, extra_fields, facility, static_fields
):
message = record.getMessage()
message_dict = {
"host": fqdn,
"short_message": message,
"message": get_full_message(record.exc_info, message),
"timestamp": record.created,
"level": SYSLOG_LEVELS.get(record.levelno, record.levelno),
"facility": facility or record.name,
"source_facility": facility or record.name,
}
if facility is not None:
message_dict.update({"_logger": record.name})
if debugging_fields:
message_dict.update(
{
"file": record.pathname,
"line": record.lineno,
"_function": record.funcName,
"_pid": record.process,
"_thread_name": record.threadName,
"_process_name": record.processName,
}
)
message_dict.update(static_fields)
if extra_fields:
message_dict = get_fields(message_dict, record)
return message_dict
def _to_supported_output_type(value):
if not isinstance(value, _SUPPORTED_OUTPUT_TYPES):
try:
return str(value)
except:
# make logging nothrow
return "value could not be converted to str"
else:
return value
def _custom_key(key):
if key.startswith("_"):
return key
else:
return "_{}".format(key)
def _sanitize_fields(fields):
return {_custom_key(k): _to_supported_output_type(v) for k, v in fields.items()}
# See http://github.com/hoffmann/graypy/blob/master/graypy/handler.py
def get_fields(message_dict, record):
fields = record.__dict__
unskipped_field_names = set(fields.keys()) - _SKIPPED_FIELDS
for key in sorted(unskipped_field_names, reverse=True):
value = fields[key]
message_dict[_custom_key(key)] = _to_supported_output_type(value)
return message_dict
class JsonFormatter(logging.Formatter):
""" A Json Formatter for Python Logging
Usage:
import logging
from cee_syslog_handler import JsonFormatter
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('spam.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(JsonFormatter())
logger.addHandler(fh)
logger.warn("foo")
"""
def __init__(
self,
datefmt="%Y-%m-%dT%H:%M:%S.%f",
debugging_fields=True,
extra_fields=True,
**kwargs
):
"""
:param datefmt: The date formatting
:param debugging_fields: Whether to include file, line number, function, process and thread
id in the log
:param extra_fields: Whether to include extra fields (submitted via the keyword argument
extra to a logger) in the log dictionary
:param facility: If not specified uses the logger's name as facility
:param kwargs: Additional static fields to be injected in each message.
"""
self.datefmt = datefmt
self.debugging_fields = debugging_fields
self.extra_fields = extra_fields
self._static_fields = _sanitize_fields(kwargs)
self._fqdn = socket.getfqdn()
def format(self, record):
record = make_message_dict(
record,
fqdn=self._fqdn,
debugging_fields=self.debugging_fields,
extra_fields=self.extra_fields,
facility=None,
static_fields=self._static_fields,
)
record["timestamp"] = datetime.fromtimestamp(record["timestamp"]).strftime(
self.datefmt
)
del record["short_message"]
del record["source_facility"]
return json.dumps(record)
class CeeSysLogHandler(SysLogHandler):
"""
A syslog handler that formats extra fields as a CEE compatible structured log message. A CEE
compatible message is a syslog log entry that contains a cookie string "@cee:" in its message
part. Everything behind the colon is expected to be a JSON dictionary (containing no lists as
children).
See the following links for the specification of the CEE syntax:
http://www.rsyslog.com/doc/mmpstrucdata.html
http://cee.mitre.org
http://cee.mitre.org/language/1.0-beta1/clt.html#appendix-1-cee-over-syslog-transport-mapping
The handler is compatible to graypy and emits the same structured log messages as the graypy
gelf handler does.
Usage::
import logging
from cee_syslog_handler import CeeSysLogHandler
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
ch = CeeSysLogHandler(address=("10.2.160.20", 514))
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
logger.debug('debug message')
logger.info('info message', extra=dict(foo="bar"))
Expected Ouput on the syslog side::
Sep 9 09:31:11 10.128.4.107 : @cee: {"message": "XXXXXXXXXXXX debug message", "level": 7}
Sep 9 09:31:11 10.128.4.107 : @cee: {"_foo": "bar", "message": "XXXXXXXXXXX info message", "level": 6}
"""
def __init__(
self,
address=("localhost", SYSLOG_UDP_PORT),
socktype=socket.SOCK_DGRAM,
debugging_fields=True,
extra_fields=True,
facility=None,
**kwargs
):
"""
:param address: Address of the syslog server (hostname, port)
:param socktype: If specified (socket.SOCK_DGRAM or socket.SOCK_STREAM) uses UDP or TCP
respectively
:param debugging_fields: Whether to include file, line number, function, process and thread
id in the log
:param extra_fields: Whether to include extra fields (submitted via the keyword argument
extra to a logger) in the log dictionary
:param facility: If not specified uses the logger's name as facility
:param kwargs: Additional static fields to be injected in each message.
"""
super(CeeSysLogHandler, self).__init__(
address, facility=SysLogHandler.LOG_USER, socktype=socktype
)
self._debugging_fields = debugging_fields
self._extra_fields = extra_fields
self._facility = facility
self._static_fields = _sanitize_fields(kwargs)
self._fqdn = socket.getfqdn()
def format(self, record):
message = make_message_dict(
record,
self._fqdn,
self._debugging_fields,
self._extra_fields,
self._facility,
self._static_fields,
)
return ": @cee: %s" % json.dumps(message)
class NamedCeeLogger(CeeSysLogHandler):
def __init__(self, address, socket_type, name):
super(NamedCeeLogger, self).__init__(address, socket_type, name=name)
class RegexFilter(logging.Filter):
"""
This Filter can be used to discard log messages that contain a match of
a given regular expression.
"""
def __init__(self, filter_regex):
super(RegexFilter, self).__init__()
self._pattern = re.compile(filter_regex)
def filter(self, record):
"""
Returns True if the record shall be logged. False otherwise.
https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L607
"""
found = self._pattern.search(record.getMessage())
return not found
class RegexRedactFilter(logging.Filter):
"""
This filter redacts parts of log messages and exception traces
by substituting through a provided regular expression.
Use with caution: with great power comes great responsibility
"""
def __init__(self, filter_regex=None, replace_string="<redacted>"):
super(RegexRedactFilter, self).__init__()
self._pattern = re.compile(filter_regex)
self._replacement = replace_string
# use methods from formatter to safely redact exception
# and stack traces
self._formatter = logging.Formatter()
def redact(self, string):
return re.sub(self._pattern, self._replacement, string)
def filter(self, record):
message = record.getMessage()
if not self._pattern.search(message) and not (
record.exc_info or record.exc_text or record.stack_info
):
return True
record.msg = self.redact(message)
record.args = ()
if record.exc_info:
# exc_info is a tuple based on sys.exc_info()
# (type, value, traceback)
record.msg = (
record.msg
+ "\n"
+ self.redact(self._formatter.formatException(record.exc_info))
)
record.exc_info = None
if record.exc_text:
record.exc_text = self.redact(record.exc_text)
if record.stack_info:
record.msg = (
record.msg
+ "\n"
+ self.redact(self._formatter.formatStack(record.stack_info))
)
record.stack_info = None
return True
| en | 0.658014 | # The following fields are standard log record fields according to # http://docs.python.org/library/logging.html#logrecord-attributes # Hint: exc_text is a cache field used by the logging module # The GELF format does not support "_id" fields # see http://github.com/hoffmann/graypy/blob/master/graypy/handler.py # see http://github.com/hoffmann/graypy/blob/master/graypy/handler.py # make logging nothrow # See http://github.com/hoffmann/graypy/blob/master/graypy/handler.py A Json Formatter for Python Logging Usage: import logging from cee_syslog_handler import JsonFormatter logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) fh = logging.FileHandler('spam.log') fh.setLevel(logging.DEBUG) fh.setFormatter(JsonFormatter()) logger.addHandler(fh) logger.warn("foo") :param datefmt: The date formatting :param debugging_fields: Whether to include file, line number, function, process and thread id in the log :param extra_fields: Whether to include extra fields (submitted via the keyword argument extra to a logger) in the log dictionary :param facility: If not specified uses the logger's name as facility :param kwargs: Additional static fields to be injected in each message. A syslog handler that formats extra fields as a CEE compatible structured log message. A CEE compatible message is a syslog log entry that contains a cookie string "@cee:" in its message part. Everything behind the colon is expected to be a JSON dictionary (containing no lists as children). See the following links for the specification of the CEE syntax: http://www.rsyslog.com/doc/mmpstrucdata.html http://cee.mitre.org http://cee.mitre.org/language/1.0-beta1/clt.html#appendix-1-cee-over-syslog-transport-mapping The handler is compatible to graypy and emits the same structured log messages as the graypy gelf handler does. Usage:: import logging from cee_syslog_handler import CeeSysLogHandler logger = logging.getLogger('simple_example') logger.setLevel(logging.DEBUG) ch = CeeSysLogHandler(address=("10.2.160.20", 514)) ch.setLevel(logging.DEBUG) logger.addHandler(ch) logger.debug('debug message') logger.info('info message', extra=dict(foo="bar")) Expected Ouput on the syslog side:: Sep 9 09:31:11 10.128.4.107 : @cee: {"message": "XXXXXXXXXXXX debug message", "level": 7} Sep 9 09:31:11 10.128.4.107 : @cee: {"_foo": "bar", "message": "XXXXXXXXXXX info message", "level": 6} :param address: Address of the syslog server (hostname, port) :param socktype: If specified (socket.SOCK_DGRAM or socket.SOCK_STREAM) uses UDP or TCP respectively :param debugging_fields: Whether to include file, line number, function, process and thread id in the log :param extra_fields: Whether to include extra fields (submitted via the keyword argument extra to a logger) in the log dictionary :param facility: If not specified uses the logger's name as facility :param kwargs: Additional static fields to be injected in each message. This Filter can be used to discard log messages that contain a match of a given regular expression. Returns True if the record shall be logged. False otherwise. https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L607 This filter redacts parts of log messages and exception traces by substituting through a provided regular expression. Use with caution: with great power comes great responsibility # use methods from formatter to safely redact exception # and stack traces # exc_info is a tuple based on sys.exc_info() # (type, value, traceback) | 2.061874 | 2 |
tests/core/test_events.py | odss/py-odss | 0 | 6614369 | <gh_stars>0
import pytest
from odss.core import Callback, create_framework
from odss.core.consts import OBJECTCLASS, SERVICE_ID, SERVICE_BUNDLE_ID, SERVICE_RANKING
from odss.core.errors import BundleException
from odss.core.events import BundleEvent, FrameworkEvent, ServiceEvent
from odss.core.registry import ServiceReference
from tests.core.interfaces import ITextService
from tests.bundles.translate import Activator as TActivator
from tests.utils import SIMPLE_BUNDLE, TRANSLATE_BUNDLE
pytestmark = pytest.mark.asyncio
def test_add_incorrect_bundle_listener(events):
class Listener:
pass
with pytest.raises(BundleException):
events.add_bundle_listener(Listener())
@pytest.mark.asyncio
async def test_fire_bundle_listener(events, listener):
assert events.add_bundle_listener(listener)
assert not events.add_bundle_listener(listener)
event = BundleEvent(BundleEvent.INSTALLED, "bundle", "origin")
await events.fire_bundle_event(event)
assert listener.last_event() == event
assert len(listener) == 1
assert event.kind == BundleEvent.INSTALLED
assert event.bundle == "bundle"
assert event.origin == "origin"
assert events.remove_bundle_listener(listener)
assert not events.remove_bundle_listener(listener)
await events.fire_bundle_event(event)
assert len(listener) == 1
def test_incorrect_framework_listener(events):
class Listener:
pass
with pytest.raises(BundleException):
events.add_framework_listener(Listener())
async def test_error_in_listener(events, listener):
class ErrorListener:
@Callback
def bundle_changed(self, event):
raise Exception("buu")
events.add_bundle_listener(ErrorListener())
events.add_bundle_listener(listener)
event = BundleEvent(BundleEvent.INSTALLED, "bundle", "origin")
await events.fire_bundle_event(event)
assert listener.last_event() == event
assert len(listener) == 1
@pytest.mark.asyncio
async def test_framework_listener(events, listener):
event = BundleEvent(BundleEvent.STARTING, "bundle", "origin")
assert events.add_framework_listener(listener)
assert not events.add_framework_listener(listener)
await events.fire_framework_event(event)
assert len(listener) == 1
assert events.remove_framework_listener(listener)
assert not events.remove_framework_listener(listener)
await events.fire_framework_event(event)
assert len(listener) == 1
def test_incorrect_service_listener(events):
class Listener:
pass
with pytest.raises(BundleException):
events.add_service_listener(Listener())
@pytest.mark.asyncio
async def test_service_listener_all_interfaces(events, listener):
reference = ServiceReference("bundle", {SERVICE_ID: 1, OBJECTCLASS: ["interface"]})
event = ServiceEvent(ServiceEvent.REGISTERED, reference)
assert events.add_service_listener(listener)
assert not events.add_service_listener(listener)
await events.fire_service_event(event)
assert len(listener) == 1
assert listener.last_event() == event
assert events.remove_service_listener(listener)
assert not events.remove_service_listener(listener)
await events.fire_service_event(event)
assert len(listener) == 1
@pytest.mark.asyncio
async def test_service_listener_with_interface(framework, events, listener):
context = framework.get_context()
context.add_service_listener(listener, ITextService)
reg = await context.register_service(ITextService, "mock service")
await reg.unregister()
assert len(listener) == 2
assert listener.events[0].kind == ServiceEvent.REGISTERED
assert listener.events[1].kind == ServiceEvent.UNREGISTERING
@pytest.mark.asyncio
async def test_framework_events(listener):
framework = await create_framework()
context = framework.get_context()
context.add_framework_listener(listener)
await framework.start()
await framework.stop()
events = listener.events
assert len(events) == 4
assert events[0].kind == FrameworkEvent.STARTING
assert events[1].kind == FrameworkEvent.STARTED
assert events[2].kind == FrameworkEvent.STOPPING
assert events[3].kind == FrameworkEvent.STOPPED
@pytest.mark.asyncio
async def test_bundle_events(framework, listener):
context = framework.get_context()
context.add_bundle_listener(listener)
events = listener.events
bundle = await framework.install_bundle(SIMPLE_BUNDLE)
assert events[0].kind == FrameworkEvent.INSTALLED
await bundle.start()
assert events[1].kind == FrameworkEvent.STARTING
assert events[2].kind == FrameworkEvent.STARTED
await bundle.stop()
assert events[3].kind == FrameworkEvent.STOPPING
assert events[4].kind == FrameworkEvent.STOPPED
await framework.uninstall_bundle(bundle)
assert events[5].kind == FrameworkEvent.UNINSTALLED
assert len(events) == 6
@pytest.mark.asyncio
async def test_service_events(framework, listener):
context = framework.get_context()
context.add_service_listener(listener)
events = listener.events
bundle = await framework.install_bundle(TRANSLATE_BUNDLE)
await bundle.start()
assert events[0].kind == ServiceEvent.REGISTERED
await bundle.stop()
assert events[1].kind == ServiceEvent.UNREGISTERING
await framework.uninstall_bundle(bundle)
assert len(events) == 2
context.remove_service_listener(listener)
await bundle.start()
await bundle.stop()
assert len(events) == 2
@pytest.mark.asyncio
async def test_service_events_modified(framework, events, listener):
context = framework.get_context()
context.add_service_listener(listener, ITextService)
reg = await context.register_service(ITextService, "mock service")
ref = reg.get_reference()
old_sort_value = ref.get_sort_value()
await reg.set_properties(
{
"foo": "bar",
OBJECTCLASS: "test",
SERVICE_ID: 12345,
SERVICE_BUNDLE_ID: 12345,
SERVICE_RANKING: 12345,
}
)
assert ref.get_sort_value() != old_sort_value
props = ref.get_properties()
assert props[OBJECTCLASS] != "test"
assert props[SERVICE_ID] != 12345
assert props[SERVICE_BUNDLE_ID] != 12345
assert props[SERVICE_RANKING] == 12345
await reg.unregister()
assert len(listener) == 3
assert listener.events[0].kind == ServiceEvent.REGISTERED
assert listener.events[1].kind == ServiceEvent.MODIFIED
assert listener.events[2].kind == ServiceEvent.UNREGISTERING
| import pytest
from odss.core import Callback, create_framework
from odss.core.consts import OBJECTCLASS, SERVICE_ID, SERVICE_BUNDLE_ID, SERVICE_RANKING
from odss.core.errors import BundleException
from odss.core.events import BundleEvent, FrameworkEvent, ServiceEvent
from odss.core.registry import ServiceReference
from tests.core.interfaces import ITextService
from tests.bundles.translate import Activator as TActivator
from tests.utils import SIMPLE_BUNDLE, TRANSLATE_BUNDLE
pytestmark = pytest.mark.asyncio
def test_add_incorrect_bundle_listener(events):
class Listener:
pass
with pytest.raises(BundleException):
events.add_bundle_listener(Listener())
@pytest.mark.asyncio
async def test_fire_bundle_listener(events, listener):
assert events.add_bundle_listener(listener)
assert not events.add_bundle_listener(listener)
event = BundleEvent(BundleEvent.INSTALLED, "bundle", "origin")
await events.fire_bundle_event(event)
assert listener.last_event() == event
assert len(listener) == 1
assert event.kind == BundleEvent.INSTALLED
assert event.bundle == "bundle"
assert event.origin == "origin"
assert events.remove_bundle_listener(listener)
assert not events.remove_bundle_listener(listener)
await events.fire_bundle_event(event)
assert len(listener) == 1
def test_incorrect_framework_listener(events):
class Listener:
pass
with pytest.raises(BundleException):
events.add_framework_listener(Listener())
async def test_error_in_listener(events, listener):
class ErrorListener:
@Callback
def bundle_changed(self, event):
raise Exception("buu")
events.add_bundle_listener(ErrorListener())
events.add_bundle_listener(listener)
event = BundleEvent(BundleEvent.INSTALLED, "bundle", "origin")
await events.fire_bundle_event(event)
assert listener.last_event() == event
assert len(listener) == 1
@pytest.mark.asyncio
async def test_framework_listener(events, listener):
event = BundleEvent(BundleEvent.STARTING, "bundle", "origin")
assert events.add_framework_listener(listener)
assert not events.add_framework_listener(listener)
await events.fire_framework_event(event)
assert len(listener) == 1
assert events.remove_framework_listener(listener)
assert not events.remove_framework_listener(listener)
await events.fire_framework_event(event)
assert len(listener) == 1
def test_incorrect_service_listener(events):
class Listener:
pass
with pytest.raises(BundleException):
events.add_service_listener(Listener())
@pytest.mark.asyncio
async def test_service_listener_all_interfaces(events, listener):
reference = ServiceReference("bundle", {SERVICE_ID: 1, OBJECTCLASS: ["interface"]})
event = ServiceEvent(ServiceEvent.REGISTERED, reference)
assert events.add_service_listener(listener)
assert not events.add_service_listener(listener)
await events.fire_service_event(event)
assert len(listener) == 1
assert listener.last_event() == event
assert events.remove_service_listener(listener)
assert not events.remove_service_listener(listener)
await events.fire_service_event(event)
assert len(listener) == 1
@pytest.mark.asyncio
async def test_service_listener_with_interface(framework, events, listener):
context = framework.get_context()
context.add_service_listener(listener, ITextService)
reg = await context.register_service(ITextService, "mock service")
await reg.unregister()
assert len(listener) == 2
assert listener.events[0].kind == ServiceEvent.REGISTERED
assert listener.events[1].kind == ServiceEvent.UNREGISTERING
@pytest.mark.asyncio
async def test_framework_events(listener):
framework = await create_framework()
context = framework.get_context()
context.add_framework_listener(listener)
await framework.start()
await framework.stop()
events = listener.events
assert len(events) == 4
assert events[0].kind == FrameworkEvent.STARTING
assert events[1].kind == FrameworkEvent.STARTED
assert events[2].kind == FrameworkEvent.STOPPING
assert events[3].kind == FrameworkEvent.STOPPED
@pytest.mark.asyncio
async def test_bundle_events(framework, listener):
context = framework.get_context()
context.add_bundle_listener(listener)
events = listener.events
bundle = await framework.install_bundle(SIMPLE_BUNDLE)
assert events[0].kind == FrameworkEvent.INSTALLED
await bundle.start()
assert events[1].kind == FrameworkEvent.STARTING
assert events[2].kind == FrameworkEvent.STARTED
await bundle.stop()
assert events[3].kind == FrameworkEvent.STOPPING
assert events[4].kind == FrameworkEvent.STOPPED
await framework.uninstall_bundle(bundle)
assert events[5].kind == FrameworkEvent.UNINSTALLED
assert len(events) == 6
@pytest.mark.asyncio
async def test_service_events(framework, listener):
context = framework.get_context()
context.add_service_listener(listener)
events = listener.events
bundle = await framework.install_bundle(TRANSLATE_BUNDLE)
await bundle.start()
assert events[0].kind == ServiceEvent.REGISTERED
await bundle.stop()
assert events[1].kind == ServiceEvent.UNREGISTERING
await framework.uninstall_bundle(bundle)
assert len(events) == 2
context.remove_service_listener(listener)
await bundle.start()
await bundle.stop()
assert len(events) == 2
@pytest.mark.asyncio
async def test_service_events_modified(framework, events, listener):
context = framework.get_context()
context.add_service_listener(listener, ITextService)
reg = await context.register_service(ITextService, "mock service")
ref = reg.get_reference()
old_sort_value = ref.get_sort_value()
await reg.set_properties(
{
"foo": "bar",
OBJECTCLASS: "test",
SERVICE_ID: 12345,
SERVICE_BUNDLE_ID: 12345,
SERVICE_RANKING: 12345,
}
)
assert ref.get_sort_value() != old_sort_value
props = ref.get_properties()
assert props[OBJECTCLASS] != "test"
assert props[SERVICE_ID] != 12345
assert props[SERVICE_BUNDLE_ID] != 12345
assert props[SERVICE_RANKING] == 12345
await reg.unregister()
assert len(listener) == 3
assert listener.events[0].kind == ServiceEvent.REGISTERED
assert listener.events[1].kind == ServiceEvent.MODIFIED
assert listener.events[2].kind == ServiceEvent.UNREGISTERING | none | 1 | 1.96192 | 2 | |
src/__init__.py | alokmenghrajani/ctfd-timed-releases-plugin | 4 | 6614370 | <reponame>alokmenghrajani/ctfd-timed-releases-plugin
from CTFd.plugins import register_plugin_assets_directory, register_plugin_script
from models import TimedReleases
from routes import plugin_blueprint, get_available_challenges
from utils import satisfies_challenge_timed_releases
def load(app):
def wrap_method(name, wrapper):
old = app.view_functions[name]
app.view_functions[name] = wrapper(old)
app.db.create_all()
# override code which renders challenges to show when future challenges will be released.
app.view_functions["challenges.chals"] = get_available_challenges
# override method which render's challenge's data
wrap_method("challenges.chal_view", satisfies_challenge_timed_releases)
# disallow attempts to solve future challenges
wrap_method("challenges.chal", satisfies_challenge_timed_releases)
app.register_blueprint(plugin_blueprint)
register_plugin_assets_directory(app, base_path='/plugins/ctfd-timed-releases-plugin/src/assets/')
register_plugin_script('/plugins/ctfd-timed-releases-plugin/src/assets/countdown.js')
| from CTFd.plugins import register_plugin_assets_directory, register_plugin_script
from models import TimedReleases
from routes import plugin_blueprint, get_available_challenges
from utils import satisfies_challenge_timed_releases
def load(app):
def wrap_method(name, wrapper):
old = app.view_functions[name]
app.view_functions[name] = wrapper(old)
app.db.create_all()
# override code which renders challenges to show when future challenges will be released.
app.view_functions["challenges.chals"] = get_available_challenges
# override method which render's challenge's data
wrap_method("challenges.chal_view", satisfies_challenge_timed_releases)
# disallow attempts to solve future challenges
wrap_method("challenges.chal", satisfies_challenge_timed_releases)
app.register_blueprint(plugin_blueprint)
register_plugin_assets_directory(app, base_path='/plugins/ctfd-timed-releases-plugin/src/assets/')
register_plugin_script('/plugins/ctfd-timed-releases-plugin/src/assets/countdown.js') | en | 0.890488 | # override code which renders challenges to show when future challenges will be released. # override method which render's challenge's data # disallow attempts to solve future challenges | 2.031478 | 2 |
src/data/db_users.py | lickorice/archive-shalltear | 0 | 6614371 | """
A more specific database helper for the 'users' database.
To generate the database, use db_generate.py; also, this
helper inherits functions from the DBHelper class.
(coded by lickorice, 2018)
"""
import sqlite3
from data import db_helper
from conf import DATABASE_PATH
# Level config
base_exp, factor = 50, 1.5
class UserHelper(db_helper.DBHelper):
def __init__(self, is_logged=True):
self.database_path = './data/db/user.db'
self.is_logged = is_logged
def new_user(self, user_id, user_level=1, user_xp=0, user_xp_to_next=50, user_gil=10, user_materia=0):
"""Adds a new user to the database (user_id unique)."""
try:
self.insert_row(
table_name="users",
user_id=user_id,
user_level=user_level,
user_xp=user_xp,
user_xp_to_next=user_xp_to_next,
user_gil=user_gil,
user_materia=0,
user_bg_id=0
)
except sqlite3.IntegrityError:
pass
try:
self.insert_row(
table_name="activities",
user_id=user_id,
can_receive_xp=True,
can_free_pack=True,
can_daily=True,
count_free_gil=0,
count_commands=0,
count_rolls=0,
count_cards=0
)
except sqlite3.IntegrityError:
pass
try:
self.insert_row(
table_name="social",
user_id=user_id,
followed_twitter=False,
followed_facebook=False,
is_patron=False
)
except sqlite3.IntegrityError:
pass
def get_user(self, user_id):
"""Fetches user data given a user_id."""
try:
x = {
"users": self.fetch_rows("users", user_id=user_id)[0],
"activities": self.fetch_rows("activities", user_id=user_id)[0],
"social": self.fetch_rows("social", user_id=user_id)[0]
}
except IndexError:
return False
return x
def add_gil(self, user_id, value):
"""Adds gil to the user account (may be negative)."""
self.increment_value(user_id, "users", "user_gil", value)
def add_materia(self, user_id, value):
"""Adds materia to the user account (may be negative)."""
self.increment_value(user_id, "users", "user_materia", value)
def add_xp(self, user_id, value):
"""Adds xp to the user account (may be negative)."""
self.increment_value(user_id, "users", "user_xp", value)
user = self.get_user(user_id)
if user["users"]["user_xp"] >= user["users"]["user_xp_to_next"]:
return True
return False
def add_item(self, user_id, item_id, item_equipped=False):
"""Adds an item to the user account, given an id."""
self.insert_row(
table_name="inventory",
owner_id=user_id,
item_id=item_id,
item_equipped=item_equipped
)
def add_bg(self, user_id, bg_id):
"""Adds a background to the user account, given an id."""
self.insert_row(
table_name="backgrounds",
owner_id=user_id,
bg_id=bg_id
)
def remove_item(self, user_id, item_id):
"""Removes an item from the user account, given an id."""
self.remove_rows(
table_name="inventory",
owner_id=user_id,
item_id=item_id
)
def check_item(self, user_id, item_id):
"""
Returns False if the item exists in the account, else,
it returns the number of such items in the account.
"""
item_query = self.fetch_rows(
"inventory", True,
owner_id=user_id,
item_id=item_id
)
result = True if len(item_query) != 0 else len(item_query)
return result
def get_items(self, user_id, is_equipped=False):
"""
Fetches all the items a user owns.
Can be explicitly ordered to fetch only equipped items.
"""
if is_equipped:
return self.fetch_rows("inventory", True, owner_id=user_id, item_equipped=1)
return self.fetch_rows("inventory", True, owner_id=user_id)
def get_backgrounds(self, user_id):
"""
Fetches all backgrounds the user owns.
"""
return self.fetch_rows("backgrounds", True, owner_id=user_id)
def toggle_item(self, user_id, item_id):
"""Toggles the equipped status of an item."""
all_items = self.get_items(user_id)
chosen_item = 'empty'
for item in all_items:
if item["item_id"] == item_id:
chosen_item = item
break
if chosen_item == 'empty':
return 3
toggled = True if not item["item_equipped"] else False
self.update_column(
"inventory",
"item_equipped",
toggled,
owner_id=user_id,
item_id=item_id
)
if toggled:
return 1
return 2
def change_bg(self, user_id, bg_id):
"""Changes the background (id) of the user."""
self.update_column("users", "user_bg_id", bg_id, user_id=user_id)
def next_level(self, user_id):
"""Automatically increments the user's level."""
current_user = self.get_user(user_id)["users"]
remainder_exp = current_user["user_xp"] - current_user["user_xp_to_next"]
new_next = int(base_exp * ((current_user["user_level"]+1) ** factor))
self.increment_value(user_id, "users", "user_level", 1)
self.update_column("users", "user_xp", remainder_exp, user_id=user_id)
self.update_column("users", "user_xp_to_next", new_next, user_id=user_id)
def increment_value(self, user_id, table_name, column, value):
"""
Automatically increments a certain value in the database.
Args:
user_id (int): The user ID of a discord User.
table_name (str): The name of the table the column belongs to.
column (str): The name of the column to be incremented.
value (int): The value to be added (if positive) or subtracted (if otherwise).
"""
# Utility function. Don't call directly.
first_value = self.get_user(user_id)[table_name][column]
self.update_column(table_name, column, (first_value+value), user_id=user_id)
def add_lock(self, social_id, social_type):
"""
Adds a lock to a certain user to prevent multiple Discord accounts
from using a single social media account to accumulate rewards.
Args:
social_id (str): The ID of the user for the corresponding network
social_type (str): The network of the user. ("TWT"/"FB")
"""
try:
self.insert_row(
"social_lock",
user_type = social_type,
user_id = social_id+social_type
)
return True
except sqlite3.IntegrityError:
return False
def check_premium(self, user_id):
x = self.fetch_rows("premium_users", True, user_id=user_id)
return len(x) >= 1
def add_premium(self, user_id):
try:
self.insert_row("premium_users", user_id=user_id)
return True
except sqlite3.IntegrityError:
return False
def rm_premium(self, user_id):
self.remove_rows("premium_users", user_id=user_id)
def main():
test = UserHelper(DATABASE_PATH+"user.db")
x = test.connect()
test.new_user(11)
print(test.get_user(11)["users"])
test.change_bg(11, 2)
print(test.get_user(11)["users"])
if __name__ == '__main__':
main() | """
A more specific database helper for the 'users' database.
To generate the database, use db_generate.py; also, this
helper inherits functions from the DBHelper class.
(coded by lickorice, 2018)
"""
import sqlite3
from data import db_helper
from conf import DATABASE_PATH
# Level config
base_exp, factor = 50, 1.5
class UserHelper(db_helper.DBHelper):
def __init__(self, is_logged=True):
self.database_path = './data/db/user.db'
self.is_logged = is_logged
def new_user(self, user_id, user_level=1, user_xp=0, user_xp_to_next=50, user_gil=10, user_materia=0):
"""Adds a new user to the database (user_id unique)."""
try:
self.insert_row(
table_name="users",
user_id=user_id,
user_level=user_level,
user_xp=user_xp,
user_xp_to_next=user_xp_to_next,
user_gil=user_gil,
user_materia=0,
user_bg_id=0
)
except sqlite3.IntegrityError:
pass
try:
self.insert_row(
table_name="activities",
user_id=user_id,
can_receive_xp=True,
can_free_pack=True,
can_daily=True,
count_free_gil=0,
count_commands=0,
count_rolls=0,
count_cards=0
)
except sqlite3.IntegrityError:
pass
try:
self.insert_row(
table_name="social",
user_id=user_id,
followed_twitter=False,
followed_facebook=False,
is_patron=False
)
except sqlite3.IntegrityError:
pass
def get_user(self, user_id):
"""Fetches user data given a user_id."""
try:
x = {
"users": self.fetch_rows("users", user_id=user_id)[0],
"activities": self.fetch_rows("activities", user_id=user_id)[0],
"social": self.fetch_rows("social", user_id=user_id)[0]
}
except IndexError:
return False
return x
def add_gil(self, user_id, value):
"""Adds gil to the user account (may be negative)."""
self.increment_value(user_id, "users", "user_gil", value)
def add_materia(self, user_id, value):
"""Adds materia to the user account (may be negative)."""
self.increment_value(user_id, "users", "user_materia", value)
def add_xp(self, user_id, value):
"""Adds xp to the user account (may be negative)."""
self.increment_value(user_id, "users", "user_xp", value)
user = self.get_user(user_id)
if user["users"]["user_xp"] >= user["users"]["user_xp_to_next"]:
return True
return False
def add_item(self, user_id, item_id, item_equipped=False):
"""Adds an item to the user account, given an id."""
self.insert_row(
table_name="inventory",
owner_id=user_id,
item_id=item_id,
item_equipped=item_equipped
)
def add_bg(self, user_id, bg_id):
"""Adds a background to the user account, given an id."""
self.insert_row(
table_name="backgrounds",
owner_id=user_id,
bg_id=bg_id
)
def remove_item(self, user_id, item_id):
"""Removes an item from the user account, given an id."""
self.remove_rows(
table_name="inventory",
owner_id=user_id,
item_id=item_id
)
def check_item(self, user_id, item_id):
"""
Returns False if the item exists in the account, else,
it returns the number of such items in the account.
"""
item_query = self.fetch_rows(
"inventory", True,
owner_id=user_id,
item_id=item_id
)
result = True if len(item_query) != 0 else len(item_query)
return result
def get_items(self, user_id, is_equipped=False):
"""
Fetches all the items a user owns.
Can be explicitly ordered to fetch only equipped items.
"""
if is_equipped:
return self.fetch_rows("inventory", True, owner_id=user_id, item_equipped=1)
return self.fetch_rows("inventory", True, owner_id=user_id)
def get_backgrounds(self, user_id):
"""
Fetches all backgrounds the user owns.
"""
return self.fetch_rows("backgrounds", True, owner_id=user_id)
def toggle_item(self, user_id, item_id):
"""Toggles the equipped status of an item."""
all_items = self.get_items(user_id)
chosen_item = 'empty'
for item in all_items:
if item["item_id"] == item_id:
chosen_item = item
break
if chosen_item == 'empty':
return 3
toggled = True if not item["item_equipped"] else False
self.update_column(
"inventory",
"item_equipped",
toggled,
owner_id=user_id,
item_id=item_id
)
if toggled:
return 1
return 2
def change_bg(self, user_id, bg_id):
"""Changes the background (id) of the user."""
self.update_column("users", "user_bg_id", bg_id, user_id=user_id)
def next_level(self, user_id):
"""Automatically increments the user's level."""
current_user = self.get_user(user_id)["users"]
remainder_exp = current_user["user_xp"] - current_user["user_xp_to_next"]
new_next = int(base_exp * ((current_user["user_level"]+1) ** factor))
self.increment_value(user_id, "users", "user_level", 1)
self.update_column("users", "user_xp", remainder_exp, user_id=user_id)
self.update_column("users", "user_xp_to_next", new_next, user_id=user_id)
def increment_value(self, user_id, table_name, column, value):
"""
Automatically increments a certain value in the database.
Args:
user_id (int): The user ID of a discord User.
table_name (str): The name of the table the column belongs to.
column (str): The name of the column to be incremented.
value (int): The value to be added (if positive) or subtracted (if otherwise).
"""
# Utility function. Don't call directly.
first_value = self.get_user(user_id)[table_name][column]
self.update_column(table_name, column, (first_value+value), user_id=user_id)
def add_lock(self, social_id, social_type):
"""
Adds a lock to a certain user to prevent multiple Discord accounts
from using a single social media account to accumulate rewards.
Args:
social_id (str): The ID of the user for the corresponding network
social_type (str): The network of the user. ("TWT"/"FB")
"""
try:
self.insert_row(
"social_lock",
user_type = social_type,
user_id = social_id+social_type
)
return True
except sqlite3.IntegrityError:
return False
def check_premium(self, user_id):
x = self.fetch_rows("premium_users", True, user_id=user_id)
return len(x) >= 1
def add_premium(self, user_id):
try:
self.insert_row("premium_users", user_id=user_id)
return True
except sqlite3.IntegrityError:
return False
def rm_premium(self, user_id):
self.remove_rows("premium_users", user_id=user_id)
def main():
test = UserHelper(DATABASE_PATH+"user.db")
x = test.connect()
test.new_user(11)
print(test.get_user(11)["users"])
test.change_bg(11, 2)
print(test.get_user(11)["users"])
if __name__ == '__main__':
main() | en | 0.683199 | A more specific database helper for the 'users' database. To generate the database, use db_generate.py; also, this helper inherits functions from the DBHelper class. (coded by lickorice, 2018) # Level config Adds a new user to the database (user_id unique). Fetches user data given a user_id. Adds gil to the user account (may be negative). Adds materia to the user account (may be negative). Adds xp to the user account (may be negative). Adds an item to the user account, given an id. Adds a background to the user account, given an id. Removes an item from the user account, given an id. Returns False if the item exists in the account, else, it returns the number of such items in the account. Fetches all the items a user owns. Can be explicitly ordered to fetch only equipped items. Fetches all backgrounds the user owns. Toggles the equipped status of an item. Changes the background (id) of the user. Automatically increments the user's level. Automatically increments a certain value in the database. Args: user_id (int): The user ID of a discord User. table_name (str): The name of the table the column belongs to. column (str): The name of the column to be incremented. value (int): The value to be added (if positive) or subtracted (if otherwise). # Utility function. Don't call directly. Adds a lock to a certain user to prevent multiple Discord accounts from using a single social media account to accumulate rewards. Args: social_id (str): The ID of the user for the corresponding network social_type (str): The network of the user. ("TWT"/"FB") | 3.345798 | 3 |
web_api/worker.py | Wason1797/FastAPI-Celery-example | 3 | 6614372 | import os
from celery import Celery
BROKER_URL = os.getenv('BROKER_URL')
BACKEND_URL = os.getenv('BACKEND_URL')
celery = Celery('celery', backend=BACKEND_URL, broker=BROKER_URL)
| import os
from celery import Celery
BROKER_URL = os.getenv('BROKER_URL')
BACKEND_URL = os.getenv('BACKEND_URL')
celery = Celery('celery', backend=BACKEND_URL, broker=BROKER_URL)
| none | 1 | 1.531154 | 2 | |
demo1/demo/library.py | arjunsatyapal/lantern | 1 | 6614373 | # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copied substantially from rietveld:
#
# http://code.google.com/p/rietveld
#
# Removed all rietveld-specific codereview templates.
# TODO(vchen): Determine what other functionality to retain.
"""Django template library for Lantern."""
import base64
import cgi
import logging
import os
import re
import urlparse
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import users
import django.template
import django.utils.safestring
from django.core.urlresolvers import reverse
import constants
import models
import yaml
import notify
# For registering filter and tag libs.
register = django.template.Library()
@register.filter
def subtract_one(arg):
"""Subtracts one from the provided number."""
num = int(arg)
return num-1
@register.filter
def get_element(list, pos):
"""Subtracts one from the provided number."""
return list[pos]
@register.filter
def get_range(upper):
"""Returns a list with integer between the range provided."""
return range(upper)
@register.filter
def class_name(cls):
"""Returns name of the class."""
return cls.__class__.__name__
@register.filter
def get_key(cls):
"""Returns key for an object if it exists in datastore."""
try:
object_key = cls.key()
except db.NotSavedError:
return None
return str(object_key)
@register.filter
def show_user(email, arg=None, autoescape=None, memcache_results=None):
"""Render a link to the user's dashboard, with text being the nickname."""
if isinstance(email, users.User):
email = email.email()
if not arg:
user = users.get_current_user()
if user is not None and email == user.email():
return 'me'
if memcache_results is not None:
ret = memcache_results.get(email)
else:
ret = memcache.get('show_user:' + email)
if ret is None:
logging.debug('memcache miss for %r', email)
account = models.Account.get_account_for_email(email)
if account is not None and account.user_has_selected_nickname:
ret = ('<a href="%s" onMouseOver="M_showUserInfoPopup(this)">%s</a>' %
(reverse('demo.views.show_user', args=[account.nickname]),
cgi.escape(account.nickname)))
else:
# No account. Let's not create a hyperlink.
nick = email
if '@' in nick:
nick = nick.split('@', 1)[0]
ret = cgi.escape(nick)
memcache.add('show_user:%s' % email, ret, 300)
# populate the dict with the results, so same user in the list later
# will have a memcache "hit" on "read".
if memcache_results is not None:
memcache_results[email] = ret
return django.utils.safestring.mark_safe(ret)
@register.filter
def show_users(email_list, arg=None):
"""Render list of links to each user's dashboard."""
if not email_list:
# Don't wast time calling memcache with an empty list.
return ''
memcache_results = memcache.get_multi(email_list, key_prefix='show_user:')
return django.utils.safestring.mark_safe(', '.join(
show_user(email, arg, memcache_results=memcache_results)
for email in email_list))
def get_nickname(email, never_me=False, request=None):
"""Return a nickname for an email address.
If 'never_me' is True, 'me' is not returned if 'email' belongs to the
current logged in user. If 'request' is a HttpRequest, it is used to
cache the nickname returned by models.Account.get_nickname_for_email().
"""
if isinstance(email, users.User):
email = email.email()
if not never_me:
if request is not None:
user = request.user
else:
user = users.get_current_user()
if user is not None and email == user.email():
return 'me'
if request is None:
return models.Account.get_nickname_for_email(email)
else:
if getattr(request, '_nicknames', None) is None:
request._nicknames = {}
if email in request._nicknames:
return request._nicknames[email]
result = models.Account.get_nickname_for_email(email)
request._nicknames[email] = result
return result
class NicknameNode(django.template.Node):
"""Renders a nickname for a given email address.
The return value is cached if a HttpRequest is available in a
'request' template variable.
The template tag accepts one or two arguments. The first argument is
the template variable for the email address. If the optional second
argument evaluates to True, 'me' as nickname is never rendered.
Example usage:
{% cached_nickname msg.sender %}
{% cached_nickname msg.sender True %}
"""
def __init__(self, email_address, never_me=''):
"""Constructor.
'email_address' is the name of the template variable that holds an
email address. If 'never_me' evaluates to True, 'me' won't be returned.
"""
self.email_address = django.template.Variable(email_address)
self.never_me = bool(never_me.strip())
self.is_multi = False
def render(self, context):
try:
email = self.email_address.resolve(context)
except django.template.VariableDoesNotExist:
return ''
request = context.get('request')
if self.is_multi:
return ', '.join(get_nickname(e, self.never_me, request) for e in email)
return get_nickname(email, self.never_me, request)
@register.tag
def nickname(parser, token):
"""Almost the same as nickname filter but the result is cached."""
try:
tag_name, email_address, never_me = token.split_contents()
except ValueError:
try:
tag_name, email_address = token.split_contents()
never_me = ''
except ValueError:
raise django.template.TemplateSyntaxError(
"%r requires exactly one or two arguments" % token.contents.split()[0])
return NicknameNode(email_address, never_me)
@register.tag
def nicknames(parser, token):
"""Wrapper for nickname tag with is_multi flag enabled."""
node = nickname(parser, token)
node.is_multi = True
return node
### functions to parse yaml files ###
def parse_yaml(path):
"""Parses input yaml file and returns a dictionary object with yaml content.
Validation of the content is done by parse_leaf and parse_node functions.
Args:
path: Path to yaml file.
Returns:
A dict object with yaml_content mapped with corresponding keys.
Raises:
IOError: If file path is not correct.
YAMLError: If unable to load yaml file.
If an error occours the dictionary object returned will contain
element 'errorMsg' containing the error message.
"""
# Read the yaml file.
try:
data_file_content = open(path).read()
# If file not valid return dictObejct with corresponding error message.
except IOError:
return {'errorMsg':'ERROR: File path not correct ' + path}
try:
data_dict = yaml.load(data_file_content)
# If file unable to load yaml content return dictObejct with corresponding
# error message.
except yaml.YAMLError, exc:
return {'errorMsg':'Error: Unable to load yaml content from %s<br> ' +
'Details:<br>\n%s'% (path, str(exc))}
if not isinstance(data_dict, dict):
return {'errorMsg':'ERROR: (DICTIONARY OBJECT EXPECTED) Error loading yaml' +
'content from ' + path }
return data_dict
def parse_node(path):
"""Parses a yaml file and validates if the file is of type node.
Args:
path: Path to yaml file.
Returns:
A dict object with doc_contents mapped with corresponding keys,
or with appropriate error message.
"""
data_dict = parse_yaml(path)
if 'errorMsg' in data_dict:
return data_dict
if data_dict.get(constants.YAML_TYPE_KEY) != "group":
return {'errorMsg':'Error loading yaml file ( '+path+' ): invalid leaf'}
return data_dict
def parse_leaf(path):
"""Parses a yaml file and validates if the file is of type leaf.
Args:
path: Path to yaml file.
Returns:
A dict object with yaml_content mapped with corresponding keys,
or with appropriate error message, if there is a type mismatch.
"""
data_dict = parse_yaml(path)
if 'errorMsg' in data_dict:
return data_dict
if data_dict.get(constants.YAML_TYPE_KEY) != "content":
return {'errorMsg':'Error loading yaml file ( '+path+' ): invalid leaf'}
return data_dict
### Library function to interact with datastore ###
def insert_with_new_key(cls, parent=None, **kwargs):
"""Insert model into datastore with a random key.
Args:
cls: Data model class (ex. models.DocModel).
parent: optional parent argument to bind models in same entity group.
NOTE: If parent argument is passed, key_name may not be unique across
all entities.
Returns:
Data model entity or None if error.
TODO(mukundjha): Check for race condition.
"""
return models.insert_model_with_new_key(cls, parent=parent, **kwargs)
def create_new_trunk_with_doc(doc_id, commit_message=None):
"""Creates a new trunk with given document as head.
WARNING: Since we are passing parent parameter in insert_with_new_key,
function will only check for uniqueness of key among entities having 'trunk'
as an ancestor. This no longer guarantees unique key_name across all entities.
NOTE(mukundjha): No check is done on doc_id, it's responsibility of
other functions calling create_new_trunk_with_doc to check the parameter
before its passed.
Args:
doc_id: String value of key of the document to be added.
commit_message: Message to commit, If None, uses the message,
'Committed a new revision'.
Returns:
Returns created trunk.
Raises:
InvalidDocumentError: If the doc_id is invalid.
"""
trunk = insert_with_new_key(models.TrunkModel)
message = commit_message or 'Committed a new revision'
trunk_revision = insert_with_new_key(models.TrunkRevisionModel, parent=trunk,
obj_ref=doc_id, commit_message=message)
trunk.setHead(doc_id)
trunk.put()
return trunk
def append_to_trunk(trunk_id, doc_id, commit_message=None):
"""Appends a document to end of the trunk.
NOTE(mukundjha): No check is done on doc_id, it's responsibility of
other functions calling append_to_trunk to check the parameter
before its passed.
Args:
trunk_id: Key of the trunk.
doc_id: String value of key of the document to be added.
commit_message: Message to commit, If None, uses the message,
'Committed a new revision'.
Returns:
Returns modified trunk.
Raises:
InvalidDocumentError: If the doc_id is invalid.
InvalidTrunkError: If the trunk_id is invalid.
"""
try:
trunk = db.get(trunk_id)
except db.BadKeyError, e:
raise models.InvalidTrunkError('Trunk is not valid %s',
trunk_id)
message = commit_message or 'Committed a new revision'
trunk_revision = insert_with_new_key(models.TrunkRevisionModel, parent=trunk,
obj_ref=doc_id, commit_message=message)
trunk.setHead(doc_id)
trunk.put()
return trunk
def create_new_doc(trunk_id=None, **kwargs):
"""Creates a new document in datastore.
If trunk_id is provided, new document is appended to the trunk.
Else a new trunk is created.
Args:
trunk_id: key(string) to the trunk to which the new document belongs.
Returns:
A DocModel object.
Raises:
InvalidTrunkError: If an invalid trunk id is provided
InvalidDocumentError: If unable to save document in data store
TODO(mukundjha): Check all db.put statements for exceptions.
"""
if trunk_id:
try:
trunk = db.get(trunk_id)
except db.BadKeyError, e:
raise models.InvalidTrunkError('Invalid Trunk id %s', str(trunk_id))
doc = insert_with_new_key(models.DocModel)
doc_key = str(doc.key())
trunk = db.run_in_transaction(append_to_trunk, trunk.key(), doc_key,
**kwargs)
else:
doc = insert_with_new_key(models.DocModel)
doc_key = str(doc.key())
trunk = db.run_in_transaction(create_new_trunk_with_doc, doc_key,
**kwargs)
if not trunk:
doc.delete()
raise models.InvalidDocumentError('Unable to create/append to trunk')
try:
tip = db.get(trunk.head)
if isinstance(tip, models.DocModel):
trunk.title = tip.title
trunk.put()
except db.BadKeyError, e:
pass
doc.trunk_ref = trunk.key()
doc.put()
return doc
def fetch_doc(trunk_id, doc_id=None):
"""Fetches a document from datastore or raises InvalidDocumentError.
If both trunk_id and doc_id are provided, return particular doc if it belongs
to the given trunk, else return head of the trunk.
Args:
trunk_id: Trunk to fetch the document from.
doc_id: Document id to fetch a particular version of document.
Returns:
A DocModel object which having provided trunk_id and doc_id, if only
trunk_id is provided or an invalid doc_id is provided head of the
trunk is returned.
Raises:
InvalidDocumentError: If trunk_id passed is invalid.
"""
try:
trunk = db.get(trunk_id)
except db.BadKeyError, e:
raise models.InvalidTrunkError('Invalid trunk id: %s', trunk_id)
if doc_id:
try:
doc = db.get(doc_id)
except db.BadKeyError, e:
raise models.InvalidDocumentError('No document Found with provided key')
trunk_revisions = models.TrunkRevisionModel.all().ancestor(trunk)
trunk_revision_with_doc = trunk_revisions.filter('obj_ref =',
str(doc.key()))
if trunk_revision_with_doc.count():
return doc
else:
raise models.InvalidDocumentError("No document Found")
# Using cached value of head stored in trunk, should be fine since all
# writes are atomic and updates head.
if trunk.head:
return db.get(trunk.head)
else:
raise models.InvalidDocumentError("Trunk has no head document!")
def get_doc_for_user(trunk_id, user):
"""Retrieves document based on user's visit history.
If the user has visited a particular revision (document of a trunk),
user will see that document, else user will be directed to the
latest revision.
We pass user instead of using users.get_current_user, so that this function
could also be used while creating other pages like teacher's dashboard etc.,
where student will not be looged in.
NOTE(mukundjha): This does not update the datastore with new entry.
It is upto the view to update the datastore.
Args:
trunk_id: Key to the referenced trunk.
user: User whose history is to be used.
Returns:
Document based on user's visit history.
Raises:
InvalidTrunkError: If trunk_id is not valid.
"""
try:
trunk = db.get(trunk_id)
except db.BadKeyError, e:
raise models.InvalidTrunkError('Invalid trunk %s', trunk_id)
query = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', trunk).order('-last_visit')
if query.count():
doc_entry = query.get()
return doc_entry.doc_ref
else:
doc = db.get(trunk.head)
return doc
def get_parent(doc):
"""Returns a parent for a document.
If multiple parents are present, choose one based on ranking function.
Note(mukundjha): Taking history into account makes it a very heavy on
datastore.
Args:
doc: DocModel object from datastore.
Returns:
Document which is parent of doc passed or None if there are no
parents.
"""
parent_entry = models.DocLinkModel.all().filter('doc_ref =', doc).order(
'-created').get()
if parent_entry:
return parent_entry.from_doc_ref
else:
return None
def get_score_for_link(link_element, user, use_history=False, recurse=False):
"""Calculates score for the DocLink object.
Score for a link is essentially score for the trunk pointed by the link.
If dirty bit is set for the visit entry for the referred trunk scores
for the doc are re-computed by calling get_accumulated_score, else
score entry for the trunk is fetched.
NOTE(mukundjha): Does not take care of cycles.
Args:
link_element: Link object for which score is required.
user: User whose score is desired.
use_history: If set user's history is used to fetch the doc.
recurse: If set True, all the scores will be recursively computed
and updated.
Returns:
Score for the link object.
"""
if recurse:
if use_history:
doc = get_doc_for_user(link_element.trunk_ref.key(), user)
else:
doc = fetch_doc(link_element.trunk_ref.key())
if use_history:
doc_contents = get_doc_contents(doc, user, use_history=use_history)
else:
doc_contents = get_doc_contents_simple(doc, user)
return get_accumulated_score(doc, user, doc_contents,
use_history=use_history,
recurse=recurse)
else:
visit_state = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', link_element.trunk_ref).get()
if visit_state and visit_state.dirty_bit:
if use_history:
new_doc = get_doc_for_user(link_element.trunk_ref.key(), user)
else:
new_doc = fetch_doc(link_element.trunk_ref.key())
if use_history:
doc_contents = get_doc_contents(new_doc, user,
use_history=use_history)
else:
doc_contents = get_doc_contents_simple(new_doc, user)
score = get_accumulated_score(new_doc, user, doc_contents,
use_history=use_history,
recurse=recurse)
return score
elif visit_state:
return visit_state.progress_score
else:
return 0
def get_accumulated_score(doc, user, doc_contents, use_history=False,
recurse=False):
"""Calculate score for a doc by accumulating scores from its objects.
Averages score, no weights. It also updates the score for element.
Args:
doc: Document fetching the score.
doc_contents: List of objects referenced in list of contents of the doc.
the list is passed separately to prevent repeated calls to data-store
for objects.
user: User associated with the score.
use_history: If set user's history is used to fetch the document.
recurse: If set True scores are recursively re-computed instead of just
picking entries from datastore.
Returns:
Average score based on content of the document. Also adds score attribute
to each 'scorable' element.
"""
total, count = 0, 0
for element in doc_contents:
if not isinstance(element, models.DocLinkModel):
element.score = element.get_score(user)
else:
element.score = get_score_for_link(element, user,
use_history=use_history,
recurse=recurse)
if element.score is not None:
total += element.score
count += 1
if total and count:
total = int(round(float(total)/count))
put_doc_score(doc, user, total)
return total
else:
put_doc_score(doc, user, 0)
return 0
def put_doc_score(doc, user, score):
"""Stores progress score for a doc.
Updates the entry with new score if present, else makes a new entry.
We could also just append if we want to track the progress over time.
Args:
doc: Document fetching the score.
user: User associated with the score.
score: Current score.
TODO(mukundjha): Determine if this needs to be run in a transaction.
"""
visit_state = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', doc.trunk_ref).get()
if visit_state:
visit_state.progress_score = score
visit_state.doc_ref = doc
visit_state.dirty_bit = False
visit_state.put()
else:
visit_state = insert_with_new_key(models.DocVisitState, user=user,
trunk_ref=doc.trunk_ref.key(),
doc_ref=doc.key(), progress_score=score)
def get_base_url(url):
"""Returns the base of the specified URL.
Given: http://localhost:8080/exercise?exid=trigonometry_1
Returns: http://localhost:8080/
Given /quiz?quiz_id=kjeiia;sk
Returns /quiz/
"""
result = urlparse.urlparse(url)
if result.netloc: # Has full network path, so remove path
return urlparse.urlunparse(
(result.scheme, result.netloc, '/', '', '', ''))
return result.path + '/'
def get_doc_contents_simple(doc, user):
"""Return a list of objects referred by keys in content list of a doc.
This version loads only the referenced objects and does not try to resolve
links, scores, etc.
TODO(vchen): Opportunity to use memcache to store results.
Args:
doc: DocModel used for populating content objects.
user: User in consideration.
Returns:
An ordered list of objects referenced in content list of passed doc.
The objects are doc-content models, e.g., RichTextModel, DocLinkModel, etc.
"""
if not isinstance(doc, models.DocModel):
return None
try:
# First try a bulk load.
content_list = db.get(doc.content)
except db.BadKeyError:
# Unfortunately, any bad key results in the exception, so now need to
# look up one by one, omitting any bad keys.
content_list = []
for content_id in doc.content:
try:
content = db.get(content_id)
content_list.append(content)
except db.BadKeyError:
pass
for element in content_list:
if isinstance(element, models.WidgetModel):
element.base_url = get_base_url(element.widget_url)
return content_list
def get_doc_contents(doc, user, resolve_links=False, use_history=False,
fetch_score=False, fetch_video_state=False):
"""Return a list of objects referred by keys in content list of a doc.
NOTE(mukundjha): doc is a DocModel object and not an id.
Args:
doc: DocModel used for populating content objects.
user: User in consideration.
resolve_links: If resolve_links is true, then links are resolved to
get appropriate title for links.
use_history: Use history to resolve links.
fetch_score: If set true score is also appended to all objects.
fetch_video_state: If set VideoModel object is appended with video's
state (stored paused time).
Returns:
An ordered list of objects referenced in content list of passed doc.
Raises:
BadKeyError: If element referred is invalid.
TODO(mukundjha): Develop Better method to extract base url.
"""
if not isinstance(doc, models.DocModel):
return None
# Get just the list of contents
content_list = get_doc_contents_simple(doc, user)
# Now perform any additional resolution of titles, scores, etc.
for element in content_list:
if not isinstance(element, models.DocLinkModel):
if fetch_score:
element.score = element.get_score(user)
# If video object and fetch_video_status is true, status is fetched.
elif fetch_video_state and isinstance(element, models.VideoModel):
video_state = models.VideoState.all().filter(
'video_ref =', element).filter(
'user =', users.get_current_user()).get()
if video_state:
element.current_time = video_state.paused_time
else:
link = element
if resolve_links and use_history:
link_doc = get_doc_for_user(link.trunk_ref.key(), user)
link.default_title = link_doc.title
elif resolve_links:
link_doc = fetch_doc(link.trunk_ref.key())
link.default_title = link_doc.title
if fetch_score:
link.score = link_doc.get_score(user)
return content_list
def put_widget_score(widget, user, score, user_data=None):
"""Stores progress score for a widget.
Updates the entry with new score if present, else makes a new entry.
Args:
widget: WidgetModel object for which score is being updated.
user: User associated with the score.
score: Current score. If None, do not update it.
user_data: Optional per-user data to be persisted on behalf of the
widget.
TODO(mukundjha): Determine if this needs to be run in a transaction.
"""
visit_state = models.WidgetProgressState.all().filter('user =', user).filter(
'widget_ref =', widget).get()
if visit_state:
if score is not None:
visit_state.progress_score = score
if user_data:
visit_state.user_data = user_data
visit_state.put()
else:
score = score or 0 # Make sure it is not None
if user_data:
visit_state = insert_with_new_key(
models.WidgetProgressState, user=user,
widget_ref=widget, progress_score=score, user_data=user_data)
else:
visit_state = insert_with_new_key(
models.WidgetProgressState, user=user,
widget_ref=widget, progress_score=score)
def get_path_till_course(doc, path=None, path_trunk_set=None):
"""Gets a list of parents with root as a course.
Useful in cases where a user lands on a random page and page
needs to be linked to a course.
Currently just picking the most latest parent recursively up
until a course is reached or there are no more parents to pick.
NOTE(mukundjha): This function is very heavy on datastore.
* Checking for first 1000 entries for an existing course
is slightly better than checking all entries.
Args:
doc: DocModel object in consideration.
path: starting path
Returns:
A list of parents doc_ids with root as a course.
"""
logging.info('****Path RCVD %r', path)
trunk_set = set()
if path is None:
path = []
if path_trunk_set is None:
path_trunk_set = set([doc.trunk_ref.key()])
parent_entry = models.DocLinkModel.all().filter(
'trunk_ref =', doc.trunk_ref).order(
'-created').fetch(1000)
# Flag is set if an alternate path is picked.
alternate_picked_flag = 0
alternate_parent = None
for parent in parent_entry:
if parent.from_trunk_ref.key() not in trunk_set:
trunk_set.add(parent.from_trunk_ref.key())
if parent.from_trunk_ref.key() not in path_trunk_set:
if not alternate_picked_flag:
alternate_parent = parent
alternate_picked_flag = 1
if parent.from_doc_ref.label == models.AllowedLabels.COURSE:
path_trunk_set.add(parent.from_trunk_ref.key())
path.append(parent.from_doc_ref)
path.reverse()
path_to_return = [el.key() for el in path]
return path_to_return
if alternate_parent:
parent = alternate_parent
if parent.from_trunk_ref.key() not in path_trunk_set:
path_trunk_set.add(parent.from_trunk_ref.key())
path.append(parent.from_doc_ref)
path_to_return = get_path_till_course(parent.from_doc_ref,
path, path_trunk_set)
else:
path.reverse()
path_to_return = [el.key() for el in path]
else:
path.reverse()
path_to_return = [el.key() for el in path]
return path_to_return
def get_or_create_session(widget, user):
"""Retrieves or creates a new session for the (user, widget) pair.
Session id is assumed to be the key for WidgetProgressState entry
for the widget. If no entry is present, a new entry is made. Currently,
we are setting dirty bits to report stale scores.
Args:
widget: WidgetModel object for which session id is required.
user: Associated user.
Returns:
An instance of the WidgetProgressState model.
"""
visit_state = models.WidgetProgressState.all().filter('user =', user).filter(
'widget_ref =', widget).get()
if not visit_state:
visit_state = insert_with_new_key(models.WidgetProgressState, user=user,
widget_ref=widget, progress_score=None)
return visit_state
def set_dirty_bits_for_doc(doc, user):
"""Sets dirty bit for all the parents in the path used to reach doc.
Dirty bit indicates the score for the doc are stale and needs to be
recomputed.
TODO(mukundjha): We should check for the path, or pass path as
parameter.
TODO(mukundjha): Maybe we should bind this with actual doc rather than
trunk.
Args:
doc: Document for which score has just been updated.
user: Associated user.
"""
doc_visit_stack = models.TraversalPath.all().filter(
'current_trunk =', doc.trunk_ref).get()
if not doc_visit_stack:
return
for el in doc_visit_stack.path:
parent = db.get(el)
visit_entry = models.DocVisitState.all().filter(
'trunk_ref =', parent.trunk_ref).filter(
'user =', user).get()
if visit_entry:
visit_entry.dirty_bit = True
visit_entry.put()
def update_visit_stack(doc, parent, user):
"""Updates the visit stack for a particular doc.
Path appends parent to parent's path and sets as path for curernt doc.
If parent is itself a course, only parent is added to the path as paths
are rooted at course level.
NOTE(mukundjha): Currently stack stores doc_ids, we could replace this with,
trunk_id, doc_id, doc.title to reduce the datastore load.
Args:
doc: DocModel object for which visit stack is to be updated.
parent: DocModel object - parent of the provided doc or None.
user: Associated user.
Returns:
Updated visit stack entry object.
"""
doc_visit_stack = models.TraversalPath.all().filter(
'current_trunk =', doc.trunk_ref).filter(
'user =', user).get()
if parent:
if parent.label == models.AllowedLabels.COURSE:
path = [parent.key()]
else:
parent_visit_stack = models.TraversalPath.all().filter(
'current_trunk =', parent.trunk_ref).filter(
'user =', user).get()
if not parent_visit_stack:
path_for_parent = get_path_till_course(parent)
parent_visit_stack = insert_with_new_key(
models.TraversalPath, current_trunk=parent.trunk_ref,
current_doc=parent, path=path_for_parent, user=user)
path = []
cycle_detected = 0
# Checking for loop
for el in parent_visit_stack.path:
element = db.get(el)
if element.trunk_ref.key() == doc.trunk_ref.key():
cycle_detected = 1
break
elif element.trunk_ref.key() == parent.trunk_ref.key():
path.append(el)
cycle_detected = 1
break
else:
path.append(el)
if not cycle_detected:
path.append(parent.key())
if doc_visit_stack:
doc_visit_stack.path = path
doc_visit_stack.put()
else:
doc_visit_stack = insert_with_new_key(
models.TraversalPath, current_doc=doc,
current_trunk=doc.trunk_ref, path=path, user=user)
# If parent is not present
elif not doc_visit_stack:
# Gets set of parents.
path = get_path_till_course(doc)
doc_visit_stack = insert_with_new_key(
models.TraversalPath, current_trunk=doc.trunk_ref,
current_doc=doc, path=path, user=user)
return doc_visit_stack
def update_recent_course_entry(recent_doc, course, user):
"""Updates the entry for recent course visited/accesed.
Note(mukundjha): instead of using course.get_score() we should
use the get_accumulated_score() with recurse=True, but it would
be too costly to do it on every update. Therefore its better to
push the score-change/delta up the tree on every update.
Args:
recent_doc: Latest doc accessed for the course.
course: Course to be updated.
user: User for whom update is to be made.
"""
# Update course entry only if the doc passed is a course.
if course.label != models.AllowedLabels.COURSE:
return None
course_entry = models.RecentCourseState.all().filter('user =', user).filter(
'course_trunk_ref =', course.trunk_ref).get()
visit_state = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', course.trunk_ref).get()
if visit_state and visit_state.dirty_bit:
doc_contents = get_doc_contents_simple(course, user)
score = get_accumulated_score(course, user, doc_contents)
else:
score = course.get_score(user)
if not course_entry:
course_entry = insert_with_new_key(models.RecentCourseState,
course_trunk_ref=course.trunk_ref,
course_doc_ref=course,
last_visited_doc_ref=recent_doc,
course_score=score,
user=user)
else:
course_entry.last_visited_doc_ref = recent_doc
course_entry.course_doc_ref=course
course_entry.course_score = score
course_entry.put()
return course_entry
def get_recent_in_progress_courses(user):
"""Gets a list of recent courses in progress.
Recomputes scores if score entry for course is stale.
Args:
user: User under consideration.
Returns:
List of recent course entry.
"""
recent_list = models.RecentCourseState.all().filter('user =', user).order(
'-time_stamp')
in_progress = []
num_to_pick = 5
for entry in recent_list:
visit_state = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', entry.course_trunk_ref).get()
if visit_state and visit_state.dirty_bit:
course = fetch_doc(entry.course_trunk_ref.key())
doc_contents = get_doc_contents_simple(course, user)
score = get_accumulated_score(course, user, doc_contents)
entry.course_score = score
entry.put()
else:
score = entry.course_score
if score < 100 and num_to_pick:
num_to_pick -= 1
in_progress.append(entry)
return in_progress
def expand_path(path, user, use_history, absolute):
"""Expands the path into objects based on the parameters.
Absolute is given preference over others.
Args:
path: List of doc_ids forming a traversal path.
absolute: If set absolute addressing is used. Docs with same doc_ids in
the list are fetched.
user: User associated with request.
use_history: If set then user's history is used to expand all
the links.
Returns:
Returns list of DocModel objects corresponding to the doc_ids in the path
passed.
"""
path = db.get(path) # Returns a list
if absolute:
return path
elif use_history:
path = [ get_doc_for_user(el.trunk_ref.key(), user) for el in path ]
else:
# Fetch latest
path = [ fetch_doc(el.trunk_ref.key()) for el in path ]
return path
def show_changes(pre, post):
"""Displays diffs between two models."""
return pre.HtmlDiff(pre, post)
def get_doc_annotation(doc, user, doc_contents=None):
"""Retrieve annotation keys for a given doc.
NOTE: This no longer retrieves the actual annotations, but just placeholders
using the content IDs. The annotations will be retrieved via AJAX using
the foreign keys: (trunk_id, doc_id, content_id, user).
Args:
doc: DocModel that is possibly annotated
user: User in consideration
doc_contents: Optional list of doc's contents. If None, gets the list from
the database.
Returns:
A dictionary of { obj_id: annotation_spec } for component documents in doc
"""
if not isinstance(doc, models.DocModel) or (user is None):
return None
if not doc_contents:
doc_contents = get_doc_contents_simple(doc, user)
annotation = {}
for content in doc_contents:
key_string = str(content.key())
annotation[key_string] = {
'data': '',
'key': key_string,
}
return annotation
def _get_doc_content_annotation(trunk_id, doc_id, content_id, user):
"""Retrieves user-annotation for a given doc content.
This is an internal work routine that uses the memcache to cache the key name
of the annotation object associated with the specified content.
Args:
trunk_id: Trunk ID of the doc that contains the annotation.
doc_id: Doc ID of the doc that contains the annotation.
content_id: ID of the content model (e.g., RichTextModel, DocLinkModel,
etc.).
user: User in consideration.
Returns:
An instance of models.AnnotationState or None.
"""
if not user:
return None
cache_key = '|'.join([trunk_id, doc_id, content_id, str(user)])
key_name = memcache.get(cache_key, namespace='anno')
if key_name:
return models.AnnotationState.get_by_key_name(key_name)
try:
doc = fetch_doc(trunk_id, doc_id=doc_id)
except models.InvalidTrunkError, e:
logging.error('Error loading doc for annotation: %r' % e)
return None
content = None
try:
content = db.get(content_id)
except db.BadKeyError:
pass
if not content:
logging.error('Cannot locate content for annotation: %r' % content_id)
return None
query = (models.AnnotationState.all()
.filter('user =', user)
.filter('trunk_ref =', doc.trunk_ref)
.filter('doc_ref =', doc)
.filter('object_ref =', content))
if query.count() == 0:
anno = models.AnnotationState(user=user,
doc_ref=doc,
trunk_ref=doc.trunk_ref,
object_ref=content)
anno.annotation_data = ''
anno.put()
else:
anno = query.get()
memcache.set(cache_key, anno.key().name(), namespace='anno')
return anno
def get_annotation_data(trunk_id, doc_id, content_id, user):
"""Retrieves user-annotation contents for a given doc content.
Args:
trunk_id: Trunk ID of the doc that contains the annotation.
doc_id: Doc ID of the doc that contains the annotation.
content_id: ID of the content model (e.g., RichTextModel, DocLinkModel, etc.)
user: User in consideration
Returns:
The user annotation data as a JSON encoded blob.
"""
anno = _get_doc_content_annotation(trunk_id, doc_id, content_id, user)
if anno:
return anno.annotation_data
return ''
def update_doc_content_annotation(trunk_id, doc_id, content_id, user, data):
"""Updates user-annotation for a given doc content.
Uses the memcache.
Args:
trunk_id: Trunk ID of the doc that contains the annotation.
doc_id: Doc ID of the doc that contains the annotation.
content_id: ID of the content model (e.g., RichTextModel, DocLinkModel, etc.)
user: User in consideration
data: The annotation data as a string. It will be encoded as utf-8
"""
anno = _get_doc_content_annotation(trunk_id, doc_id, content_id, user)
if anno:
# NEEDSWORK(jch):
# Since AnnotationState model wants annotation_data as serialized
# blob of everything, the data here is opaque at this library layer
# as the serialization is done between views layer and the JS in
# the browser (and we probably do not want to do JSON at the library
# layer). This is somewhat awkward. Perhaps AnnotationState should
# be modified to learn logical fields as it grows??? I dunno.
anno.annotation_data = data.encode('utf-8')
anno.put()
def get_notepad(key, user):
"""Get notepad contents
Args:
key: key to a NotePadModel object
user: the user the NotePadState belongs to
"""
ob = db.get(key)
notepad = (models.NotePadState.all()
.filter('user =', user)
.filter('object_ref =', ob))
if notepad.count() == 0:
return ""
return "\n\n".join([x.notepad_data for x in notepad])
def update_notepad(key, user, text):
"""Update notepad contents
Args:
key: key to a NotePadModel object
user: the user the NotePadState belongs to
text: the updated contents
"""
ob = db.get(key)
notepad = (models.NotePadState.all()
.filter('user =', user)
.filter('object_ref =', ob))
if notepad.count() == 0:
notepad = models.NotePadState(user=user, object_ref=ob)
else:
notepad = notepad[0]
notepad.notepad_data = text
notepad.put()
def view_doc_param(doc, visit, current, came_from):
"""Helper for getPrevNextLinks
Args:
doc: target document to go to
visit: current visit stack
current: current document (logically the tip of visit)
came_from: document we are leaving from
Returns:
URL parameter to visit the doc, marking that it came from here
"""
if not doc:
return None
param = [ ('trunk_id', str(doc.trunk_ref.key())),
('doc_id', str(doc.key())) ]
if visit:
depth = len(visit.path) - 1
while (0 < depth) and (visit.path[depth] != doc.key()):
depth -= 1
if 0 < depth:
parent = db.get(visit.path[depth - 1])
else:
parent = None
if parent:
param.extend([ ('parent_trunk', str(parent.trunk_ref.key())),
('parent_id', str(parent.key())) ])
if came_from:
param.extend([ ('came_from', str(came_from.key())) ])
return param
def getPrevNextLinks(doc, visit, came_from):
"""Compute where to go next
Args:
doc: this document
visit: traversal path from top to this document
came_from: the document the user came from, when different from parent
Returns:
A (prev_param, next_param) tuple, where
prev_param: URL parameters to feed to view to go to natural "previous" page
next_param: URL parameters to feed to view to go to natural "next" page
"""
# TODO: the "prev" half is not yet computed nor used.
prev_param = None
# If we came back from down below, visit the next child (no "immediate
# adjacency" required --- we have been showing this document already).
# If we came from top-down navigation, we do not have came_from; visit
# the first child in that case, and pretend as if the user just navigated
# in the usual top-down fashion (i.e. no need for came_from).
next = doc.first_child_after(came_from)
next_came_from = None
here = doc
if (not next) and visit and visit.path:
# We ran out of our children, so go back to our parent.
# visit.path should be the path from the root down to doc.
depth, child = len(visit.path), doc
while (0 < depth):
depth -= 1
parent = db.get(visit.path[depth])
# After visiting child inside parent, "next_child_or_self" is either
# the target of a link to the child that immediately follows the
# link to this child, or the parent itself if the link to this child
# is followed by a non-link material, or None which tells us to
# ask the grandparent what to do.
next = parent.next_child_or_self(child)
if next:
if next == parent:
# parent has a non-link after the link to this child
# revisit the parent to show that non-link, and remember
# to visit the link after that child
next_came_from = child
else:
# following the link to the next child, as if we came
# directly from the top
next_came_from = None
here = parent
break
else:
child = parent
next_param = view_doc_param(next, visit, here, next_came_from)
return (prev_param, next_param)
def auto_subscribe(user, trunk):
"""Auto-subscribe the user who edited to further changes of the page.
Args:
user: the user who edited this page
trunk: the trunk object that represents the page
"""
return notify.setSubscription(user, trunk, 1)
| # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copied substantially from rietveld:
#
# http://code.google.com/p/rietveld
#
# Removed all rietveld-specific codereview templates.
# TODO(vchen): Determine what other functionality to retain.
"""Django template library for Lantern."""
import base64
import cgi
import logging
import os
import re
import urlparse
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import users
import django.template
import django.utils.safestring
from django.core.urlresolvers import reverse
import constants
import models
import yaml
import notify
# For registering filter and tag libs.
register = django.template.Library()
@register.filter
def subtract_one(arg):
"""Subtracts one from the provided number."""
num = int(arg)
return num-1
@register.filter
def get_element(list, pos):
"""Subtracts one from the provided number."""
return list[pos]
@register.filter
def get_range(upper):
"""Returns a list with integer between the range provided."""
return range(upper)
@register.filter
def class_name(cls):
"""Returns name of the class."""
return cls.__class__.__name__
@register.filter
def get_key(cls):
"""Returns key for an object if it exists in datastore."""
try:
object_key = cls.key()
except db.NotSavedError:
return None
return str(object_key)
@register.filter
def show_user(email, arg=None, autoescape=None, memcache_results=None):
"""Render a link to the user's dashboard, with text being the nickname."""
if isinstance(email, users.User):
email = email.email()
if not arg:
user = users.get_current_user()
if user is not None and email == user.email():
return 'me'
if memcache_results is not None:
ret = memcache_results.get(email)
else:
ret = memcache.get('show_user:' + email)
if ret is None:
logging.debug('memcache miss for %r', email)
account = models.Account.get_account_for_email(email)
if account is not None and account.user_has_selected_nickname:
ret = ('<a href="%s" onMouseOver="M_showUserInfoPopup(this)">%s</a>' %
(reverse('demo.views.show_user', args=[account.nickname]),
cgi.escape(account.nickname)))
else:
# No account. Let's not create a hyperlink.
nick = email
if '@' in nick:
nick = nick.split('@', 1)[0]
ret = cgi.escape(nick)
memcache.add('show_user:%s' % email, ret, 300)
# populate the dict with the results, so same user in the list later
# will have a memcache "hit" on "read".
if memcache_results is not None:
memcache_results[email] = ret
return django.utils.safestring.mark_safe(ret)
@register.filter
def show_users(email_list, arg=None):
"""Render list of links to each user's dashboard."""
if not email_list:
# Don't wast time calling memcache with an empty list.
return ''
memcache_results = memcache.get_multi(email_list, key_prefix='show_user:')
return django.utils.safestring.mark_safe(', '.join(
show_user(email, arg, memcache_results=memcache_results)
for email in email_list))
def get_nickname(email, never_me=False, request=None):
"""Return a nickname for an email address.
If 'never_me' is True, 'me' is not returned if 'email' belongs to the
current logged in user. If 'request' is a HttpRequest, it is used to
cache the nickname returned by models.Account.get_nickname_for_email().
"""
if isinstance(email, users.User):
email = email.email()
if not never_me:
if request is not None:
user = request.user
else:
user = users.get_current_user()
if user is not None and email == user.email():
return 'me'
if request is None:
return models.Account.get_nickname_for_email(email)
else:
if getattr(request, '_nicknames', None) is None:
request._nicknames = {}
if email in request._nicknames:
return request._nicknames[email]
result = models.Account.get_nickname_for_email(email)
request._nicknames[email] = result
return result
class NicknameNode(django.template.Node):
"""Renders a nickname for a given email address.
The return value is cached if a HttpRequest is available in a
'request' template variable.
The template tag accepts one or two arguments. The first argument is
the template variable for the email address. If the optional second
argument evaluates to True, 'me' as nickname is never rendered.
Example usage:
{% cached_nickname msg.sender %}
{% cached_nickname msg.sender True %}
"""
def __init__(self, email_address, never_me=''):
"""Constructor.
'email_address' is the name of the template variable that holds an
email address. If 'never_me' evaluates to True, 'me' won't be returned.
"""
self.email_address = django.template.Variable(email_address)
self.never_me = bool(never_me.strip())
self.is_multi = False
def render(self, context):
try:
email = self.email_address.resolve(context)
except django.template.VariableDoesNotExist:
return ''
request = context.get('request')
if self.is_multi:
return ', '.join(get_nickname(e, self.never_me, request) for e in email)
return get_nickname(email, self.never_me, request)
@register.tag
def nickname(parser, token):
"""Almost the same as nickname filter but the result is cached."""
try:
tag_name, email_address, never_me = token.split_contents()
except ValueError:
try:
tag_name, email_address = token.split_contents()
never_me = ''
except ValueError:
raise django.template.TemplateSyntaxError(
"%r requires exactly one or two arguments" % token.contents.split()[0])
return NicknameNode(email_address, never_me)
@register.tag
def nicknames(parser, token):
"""Wrapper for nickname tag with is_multi flag enabled."""
node = nickname(parser, token)
node.is_multi = True
return node
### functions to parse yaml files ###
def parse_yaml(path):
"""Parses input yaml file and returns a dictionary object with yaml content.
Validation of the content is done by parse_leaf and parse_node functions.
Args:
path: Path to yaml file.
Returns:
A dict object with yaml_content mapped with corresponding keys.
Raises:
IOError: If file path is not correct.
YAMLError: If unable to load yaml file.
If an error occours the dictionary object returned will contain
element 'errorMsg' containing the error message.
"""
# Read the yaml file.
try:
data_file_content = open(path).read()
# If file not valid return dictObejct with corresponding error message.
except IOError:
return {'errorMsg':'ERROR: File path not correct ' + path}
try:
data_dict = yaml.load(data_file_content)
# If file unable to load yaml content return dictObejct with corresponding
# error message.
except yaml.YAMLError, exc:
return {'errorMsg':'Error: Unable to load yaml content from %s<br> ' +
'Details:<br>\n%s'% (path, str(exc))}
if not isinstance(data_dict, dict):
return {'errorMsg':'ERROR: (DICTIONARY OBJECT EXPECTED) Error loading yaml' +
'content from ' + path }
return data_dict
def parse_node(path):
"""Parses a yaml file and validates if the file is of type node.
Args:
path: Path to yaml file.
Returns:
A dict object with doc_contents mapped with corresponding keys,
or with appropriate error message.
"""
data_dict = parse_yaml(path)
if 'errorMsg' in data_dict:
return data_dict
if data_dict.get(constants.YAML_TYPE_KEY) != "group":
return {'errorMsg':'Error loading yaml file ( '+path+' ): invalid leaf'}
return data_dict
def parse_leaf(path):
"""Parses a yaml file and validates if the file is of type leaf.
Args:
path: Path to yaml file.
Returns:
A dict object with yaml_content mapped with corresponding keys,
or with appropriate error message, if there is a type mismatch.
"""
data_dict = parse_yaml(path)
if 'errorMsg' in data_dict:
return data_dict
if data_dict.get(constants.YAML_TYPE_KEY) != "content":
return {'errorMsg':'Error loading yaml file ( '+path+' ): invalid leaf'}
return data_dict
### Library function to interact with datastore ###
def insert_with_new_key(cls, parent=None, **kwargs):
"""Insert model into datastore with a random key.
Args:
cls: Data model class (ex. models.DocModel).
parent: optional parent argument to bind models in same entity group.
NOTE: If parent argument is passed, key_name may not be unique across
all entities.
Returns:
Data model entity or None if error.
TODO(mukundjha): Check for race condition.
"""
return models.insert_model_with_new_key(cls, parent=parent, **kwargs)
def create_new_trunk_with_doc(doc_id, commit_message=None):
"""Creates a new trunk with given document as head.
WARNING: Since we are passing parent parameter in insert_with_new_key,
function will only check for uniqueness of key among entities having 'trunk'
as an ancestor. This no longer guarantees unique key_name across all entities.
NOTE(mukundjha): No check is done on doc_id, it's responsibility of
other functions calling create_new_trunk_with_doc to check the parameter
before its passed.
Args:
doc_id: String value of key of the document to be added.
commit_message: Message to commit, If None, uses the message,
'Committed a new revision'.
Returns:
Returns created trunk.
Raises:
InvalidDocumentError: If the doc_id is invalid.
"""
trunk = insert_with_new_key(models.TrunkModel)
message = commit_message or 'Committed a new revision'
trunk_revision = insert_with_new_key(models.TrunkRevisionModel, parent=trunk,
obj_ref=doc_id, commit_message=message)
trunk.setHead(doc_id)
trunk.put()
return trunk
def append_to_trunk(trunk_id, doc_id, commit_message=None):
"""Appends a document to end of the trunk.
NOTE(mukundjha): No check is done on doc_id, it's responsibility of
other functions calling append_to_trunk to check the parameter
before its passed.
Args:
trunk_id: Key of the trunk.
doc_id: String value of key of the document to be added.
commit_message: Message to commit, If None, uses the message,
'Committed a new revision'.
Returns:
Returns modified trunk.
Raises:
InvalidDocumentError: If the doc_id is invalid.
InvalidTrunkError: If the trunk_id is invalid.
"""
try:
trunk = db.get(trunk_id)
except db.BadKeyError, e:
raise models.InvalidTrunkError('Trunk is not valid %s',
trunk_id)
message = commit_message or 'Committed a new revision'
trunk_revision = insert_with_new_key(models.TrunkRevisionModel, parent=trunk,
obj_ref=doc_id, commit_message=message)
trunk.setHead(doc_id)
trunk.put()
return trunk
def create_new_doc(trunk_id=None, **kwargs):
"""Creates a new document in datastore.
If trunk_id is provided, new document is appended to the trunk.
Else a new trunk is created.
Args:
trunk_id: key(string) to the trunk to which the new document belongs.
Returns:
A DocModel object.
Raises:
InvalidTrunkError: If an invalid trunk id is provided
InvalidDocumentError: If unable to save document in data store
TODO(mukundjha): Check all db.put statements for exceptions.
"""
if trunk_id:
try:
trunk = db.get(trunk_id)
except db.BadKeyError, e:
raise models.InvalidTrunkError('Invalid Trunk id %s', str(trunk_id))
doc = insert_with_new_key(models.DocModel)
doc_key = str(doc.key())
trunk = db.run_in_transaction(append_to_trunk, trunk.key(), doc_key,
**kwargs)
else:
doc = insert_with_new_key(models.DocModel)
doc_key = str(doc.key())
trunk = db.run_in_transaction(create_new_trunk_with_doc, doc_key,
**kwargs)
if not trunk:
doc.delete()
raise models.InvalidDocumentError('Unable to create/append to trunk')
try:
tip = db.get(trunk.head)
if isinstance(tip, models.DocModel):
trunk.title = tip.title
trunk.put()
except db.BadKeyError, e:
pass
doc.trunk_ref = trunk.key()
doc.put()
return doc
def fetch_doc(trunk_id, doc_id=None):
"""Fetches a document from datastore or raises InvalidDocumentError.
If both trunk_id and doc_id are provided, return particular doc if it belongs
to the given trunk, else return head of the trunk.
Args:
trunk_id: Trunk to fetch the document from.
doc_id: Document id to fetch a particular version of document.
Returns:
A DocModel object which having provided trunk_id and doc_id, if only
trunk_id is provided or an invalid doc_id is provided head of the
trunk is returned.
Raises:
InvalidDocumentError: If trunk_id passed is invalid.
"""
try:
trunk = db.get(trunk_id)
except db.BadKeyError, e:
raise models.InvalidTrunkError('Invalid trunk id: %s', trunk_id)
if doc_id:
try:
doc = db.get(doc_id)
except db.BadKeyError, e:
raise models.InvalidDocumentError('No document Found with provided key')
trunk_revisions = models.TrunkRevisionModel.all().ancestor(trunk)
trunk_revision_with_doc = trunk_revisions.filter('obj_ref =',
str(doc.key()))
if trunk_revision_with_doc.count():
return doc
else:
raise models.InvalidDocumentError("No document Found")
# Using cached value of head stored in trunk, should be fine since all
# writes are atomic and updates head.
if trunk.head:
return db.get(trunk.head)
else:
raise models.InvalidDocumentError("Trunk has no head document!")
def get_doc_for_user(trunk_id, user):
"""Retrieves document based on user's visit history.
If the user has visited a particular revision (document of a trunk),
user will see that document, else user will be directed to the
latest revision.
We pass user instead of using users.get_current_user, so that this function
could also be used while creating other pages like teacher's dashboard etc.,
where student will not be looged in.
NOTE(mukundjha): This does not update the datastore with new entry.
It is upto the view to update the datastore.
Args:
trunk_id: Key to the referenced trunk.
user: User whose history is to be used.
Returns:
Document based on user's visit history.
Raises:
InvalidTrunkError: If trunk_id is not valid.
"""
try:
trunk = db.get(trunk_id)
except db.BadKeyError, e:
raise models.InvalidTrunkError('Invalid trunk %s', trunk_id)
query = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', trunk).order('-last_visit')
if query.count():
doc_entry = query.get()
return doc_entry.doc_ref
else:
doc = db.get(trunk.head)
return doc
def get_parent(doc):
"""Returns a parent for a document.
If multiple parents are present, choose one based on ranking function.
Note(mukundjha): Taking history into account makes it a very heavy on
datastore.
Args:
doc: DocModel object from datastore.
Returns:
Document which is parent of doc passed or None if there are no
parents.
"""
parent_entry = models.DocLinkModel.all().filter('doc_ref =', doc).order(
'-created').get()
if parent_entry:
return parent_entry.from_doc_ref
else:
return None
def get_score_for_link(link_element, user, use_history=False, recurse=False):
"""Calculates score for the DocLink object.
Score for a link is essentially score for the trunk pointed by the link.
If dirty bit is set for the visit entry for the referred trunk scores
for the doc are re-computed by calling get_accumulated_score, else
score entry for the trunk is fetched.
NOTE(mukundjha): Does not take care of cycles.
Args:
link_element: Link object for which score is required.
user: User whose score is desired.
use_history: If set user's history is used to fetch the doc.
recurse: If set True, all the scores will be recursively computed
and updated.
Returns:
Score for the link object.
"""
if recurse:
if use_history:
doc = get_doc_for_user(link_element.trunk_ref.key(), user)
else:
doc = fetch_doc(link_element.trunk_ref.key())
if use_history:
doc_contents = get_doc_contents(doc, user, use_history=use_history)
else:
doc_contents = get_doc_contents_simple(doc, user)
return get_accumulated_score(doc, user, doc_contents,
use_history=use_history,
recurse=recurse)
else:
visit_state = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', link_element.trunk_ref).get()
if visit_state and visit_state.dirty_bit:
if use_history:
new_doc = get_doc_for_user(link_element.trunk_ref.key(), user)
else:
new_doc = fetch_doc(link_element.trunk_ref.key())
if use_history:
doc_contents = get_doc_contents(new_doc, user,
use_history=use_history)
else:
doc_contents = get_doc_contents_simple(new_doc, user)
score = get_accumulated_score(new_doc, user, doc_contents,
use_history=use_history,
recurse=recurse)
return score
elif visit_state:
return visit_state.progress_score
else:
return 0
def get_accumulated_score(doc, user, doc_contents, use_history=False,
recurse=False):
"""Calculate score for a doc by accumulating scores from its objects.
Averages score, no weights. It also updates the score for element.
Args:
doc: Document fetching the score.
doc_contents: List of objects referenced in list of contents of the doc.
the list is passed separately to prevent repeated calls to data-store
for objects.
user: User associated with the score.
use_history: If set user's history is used to fetch the document.
recurse: If set True scores are recursively re-computed instead of just
picking entries from datastore.
Returns:
Average score based on content of the document. Also adds score attribute
to each 'scorable' element.
"""
total, count = 0, 0
for element in doc_contents:
if not isinstance(element, models.DocLinkModel):
element.score = element.get_score(user)
else:
element.score = get_score_for_link(element, user,
use_history=use_history,
recurse=recurse)
if element.score is not None:
total += element.score
count += 1
if total and count:
total = int(round(float(total)/count))
put_doc_score(doc, user, total)
return total
else:
put_doc_score(doc, user, 0)
return 0
def put_doc_score(doc, user, score):
"""Stores progress score for a doc.
Updates the entry with new score if present, else makes a new entry.
We could also just append if we want to track the progress over time.
Args:
doc: Document fetching the score.
user: User associated with the score.
score: Current score.
TODO(mukundjha): Determine if this needs to be run in a transaction.
"""
visit_state = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', doc.trunk_ref).get()
if visit_state:
visit_state.progress_score = score
visit_state.doc_ref = doc
visit_state.dirty_bit = False
visit_state.put()
else:
visit_state = insert_with_new_key(models.DocVisitState, user=user,
trunk_ref=doc.trunk_ref.key(),
doc_ref=doc.key(), progress_score=score)
def get_base_url(url):
"""Returns the base of the specified URL.
Given: http://localhost:8080/exercise?exid=trigonometry_1
Returns: http://localhost:8080/
Given /quiz?quiz_id=kjeiia;sk
Returns /quiz/
"""
result = urlparse.urlparse(url)
if result.netloc: # Has full network path, so remove path
return urlparse.urlunparse(
(result.scheme, result.netloc, '/', '', '', ''))
return result.path + '/'
def get_doc_contents_simple(doc, user):
"""Return a list of objects referred by keys in content list of a doc.
This version loads only the referenced objects and does not try to resolve
links, scores, etc.
TODO(vchen): Opportunity to use memcache to store results.
Args:
doc: DocModel used for populating content objects.
user: User in consideration.
Returns:
An ordered list of objects referenced in content list of passed doc.
The objects are doc-content models, e.g., RichTextModel, DocLinkModel, etc.
"""
if not isinstance(doc, models.DocModel):
return None
try:
# First try a bulk load.
content_list = db.get(doc.content)
except db.BadKeyError:
# Unfortunately, any bad key results in the exception, so now need to
# look up one by one, omitting any bad keys.
content_list = []
for content_id in doc.content:
try:
content = db.get(content_id)
content_list.append(content)
except db.BadKeyError:
pass
for element in content_list:
if isinstance(element, models.WidgetModel):
element.base_url = get_base_url(element.widget_url)
return content_list
def get_doc_contents(doc, user, resolve_links=False, use_history=False,
fetch_score=False, fetch_video_state=False):
"""Return a list of objects referred by keys in content list of a doc.
NOTE(mukundjha): doc is a DocModel object and not an id.
Args:
doc: DocModel used for populating content objects.
user: User in consideration.
resolve_links: If resolve_links is true, then links are resolved to
get appropriate title for links.
use_history: Use history to resolve links.
fetch_score: If set true score is also appended to all objects.
fetch_video_state: If set VideoModel object is appended with video's
state (stored paused time).
Returns:
An ordered list of objects referenced in content list of passed doc.
Raises:
BadKeyError: If element referred is invalid.
TODO(mukundjha): Develop Better method to extract base url.
"""
if not isinstance(doc, models.DocModel):
return None
# Get just the list of contents
content_list = get_doc_contents_simple(doc, user)
# Now perform any additional resolution of titles, scores, etc.
for element in content_list:
if not isinstance(element, models.DocLinkModel):
if fetch_score:
element.score = element.get_score(user)
# If video object and fetch_video_status is true, status is fetched.
elif fetch_video_state and isinstance(element, models.VideoModel):
video_state = models.VideoState.all().filter(
'video_ref =', element).filter(
'user =', users.get_current_user()).get()
if video_state:
element.current_time = video_state.paused_time
else:
link = element
if resolve_links and use_history:
link_doc = get_doc_for_user(link.trunk_ref.key(), user)
link.default_title = link_doc.title
elif resolve_links:
link_doc = fetch_doc(link.trunk_ref.key())
link.default_title = link_doc.title
if fetch_score:
link.score = link_doc.get_score(user)
return content_list
def put_widget_score(widget, user, score, user_data=None):
"""Stores progress score for a widget.
Updates the entry with new score if present, else makes a new entry.
Args:
widget: WidgetModel object for which score is being updated.
user: User associated with the score.
score: Current score. If None, do not update it.
user_data: Optional per-user data to be persisted on behalf of the
widget.
TODO(mukundjha): Determine if this needs to be run in a transaction.
"""
visit_state = models.WidgetProgressState.all().filter('user =', user).filter(
'widget_ref =', widget).get()
if visit_state:
if score is not None:
visit_state.progress_score = score
if user_data:
visit_state.user_data = user_data
visit_state.put()
else:
score = score or 0 # Make sure it is not None
if user_data:
visit_state = insert_with_new_key(
models.WidgetProgressState, user=user,
widget_ref=widget, progress_score=score, user_data=user_data)
else:
visit_state = insert_with_new_key(
models.WidgetProgressState, user=user,
widget_ref=widget, progress_score=score)
def get_path_till_course(doc, path=None, path_trunk_set=None):
"""Gets a list of parents with root as a course.
Useful in cases where a user lands on a random page and page
needs to be linked to a course.
Currently just picking the most latest parent recursively up
until a course is reached or there are no more parents to pick.
NOTE(mukundjha): This function is very heavy on datastore.
* Checking for first 1000 entries for an existing course
is slightly better than checking all entries.
Args:
doc: DocModel object in consideration.
path: starting path
Returns:
A list of parents doc_ids with root as a course.
"""
logging.info('****Path RCVD %r', path)
trunk_set = set()
if path is None:
path = []
if path_trunk_set is None:
path_trunk_set = set([doc.trunk_ref.key()])
parent_entry = models.DocLinkModel.all().filter(
'trunk_ref =', doc.trunk_ref).order(
'-created').fetch(1000)
# Flag is set if an alternate path is picked.
alternate_picked_flag = 0
alternate_parent = None
for parent in parent_entry:
if parent.from_trunk_ref.key() not in trunk_set:
trunk_set.add(parent.from_trunk_ref.key())
if parent.from_trunk_ref.key() not in path_trunk_set:
if not alternate_picked_flag:
alternate_parent = parent
alternate_picked_flag = 1
if parent.from_doc_ref.label == models.AllowedLabels.COURSE:
path_trunk_set.add(parent.from_trunk_ref.key())
path.append(parent.from_doc_ref)
path.reverse()
path_to_return = [el.key() for el in path]
return path_to_return
if alternate_parent:
parent = alternate_parent
if parent.from_trunk_ref.key() not in path_trunk_set:
path_trunk_set.add(parent.from_trunk_ref.key())
path.append(parent.from_doc_ref)
path_to_return = get_path_till_course(parent.from_doc_ref,
path, path_trunk_set)
else:
path.reverse()
path_to_return = [el.key() for el in path]
else:
path.reverse()
path_to_return = [el.key() for el in path]
return path_to_return
def get_or_create_session(widget, user):
"""Retrieves or creates a new session for the (user, widget) pair.
Session id is assumed to be the key for WidgetProgressState entry
for the widget. If no entry is present, a new entry is made. Currently,
we are setting dirty bits to report stale scores.
Args:
widget: WidgetModel object for which session id is required.
user: Associated user.
Returns:
An instance of the WidgetProgressState model.
"""
visit_state = models.WidgetProgressState.all().filter('user =', user).filter(
'widget_ref =', widget).get()
if not visit_state:
visit_state = insert_with_new_key(models.WidgetProgressState, user=user,
widget_ref=widget, progress_score=None)
return visit_state
def set_dirty_bits_for_doc(doc, user):
"""Sets dirty bit for all the parents in the path used to reach doc.
Dirty bit indicates the score for the doc are stale and needs to be
recomputed.
TODO(mukundjha): We should check for the path, or pass path as
parameter.
TODO(mukundjha): Maybe we should bind this with actual doc rather than
trunk.
Args:
doc: Document for which score has just been updated.
user: Associated user.
"""
doc_visit_stack = models.TraversalPath.all().filter(
'current_trunk =', doc.trunk_ref).get()
if not doc_visit_stack:
return
for el in doc_visit_stack.path:
parent = db.get(el)
visit_entry = models.DocVisitState.all().filter(
'trunk_ref =', parent.trunk_ref).filter(
'user =', user).get()
if visit_entry:
visit_entry.dirty_bit = True
visit_entry.put()
def update_visit_stack(doc, parent, user):
"""Updates the visit stack for a particular doc.
Path appends parent to parent's path and sets as path for curernt doc.
If parent is itself a course, only parent is added to the path as paths
are rooted at course level.
NOTE(mukundjha): Currently stack stores doc_ids, we could replace this with,
trunk_id, doc_id, doc.title to reduce the datastore load.
Args:
doc: DocModel object for which visit stack is to be updated.
parent: DocModel object - parent of the provided doc or None.
user: Associated user.
Returns:
Updated visit stack entry object.
"""
doc_visit_stack = models.TraversalPath.all().filter(
'current_trunk =', doc.trunk_ref).filter(
'user =', user).get()
if parent:
if parent.label == models.AllowedLabels.COURSE:
path = [parent.key()]
else:
parent_visit_stack = models.TraversalPath.all().filter(
'current_trunk =', parent.trunk_ref).filter(
'user =', user).get()
if not parent_visit_stack:
path_for_parent = get_path_till_course(parent)
parent_visit_stack = insert_with_new_key(
models.TraversalPath, current_trunk=parent.trunk_ref,
current_doc=parent, path=path_for_parent, user=user)
path = []
cycle_detected = 0
# Checking for loop
for el in parent_visit_stack.path:
element = db.get(el)
if element.trunk_ref.key() == doc.trunk_ref.key():
cycle_detected = 1
break
elif element.trunk_ref.key() == parent.trunk_ref.key():
path.append(el)
cycle_detected = 1
break
else:
path.append(el)
if not cycle_detected:
path.append(parent.key())
if doc_visit_stack:
doc_visit_stack.path = path
doc_visit_stack.put()
else:
doc_visit_stack = insert_with_new_key(
models.TraversalPath, current_doc=doc,
current_trunk=doc.trunk_ref, path=path, user=user)
# If parent is not present
elif not doc_visit_stack:
# Gets set of parents.
path = get_path_till_course(doc)
doc_visit_stack = insert_with_new_key(
models.TraversalPath, current_trunk=doc.trunk_ref,
current_doc=doc, path=path, user=user)
return doc_visit_stack
def update_recent_course_entry(recent_doc, course, user):
"""Updates the entry for recent course visited/accesed.
Note(mukundjha): instead of using course.get_score() we should
use the get_accumulated_score() with recurse=True, but it would
be too costly to do it on every update. Therefore its better to
push the score-change/delta up the tree on every update.
Args:
recent_doc: Latest doc accessed for the course.
course: Course to be updated.
user: User for whom update is to be made.
"""
# Update course entry only if the doc passed is a course.
if course.label != models.AllowedLabels.COURSE:
return None
course_entry = models.RecentCourseState.all().filter('user =', user).filter(
'course_trunk_ref =', course.trunk_ref).get()
visit_state = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', course.trunk_ref).get()
if visit_state and visit_state.dirty_bit:
doc_contents = get_doc_contents_simple(course, user)
score = get_accumulated_score(course, user, doc_contents)
else:
score = course.get_score(user)
if not course_entry:
course_entry = insert_with_new_key(models.RecentCourseState,
course_trunk_ref=course.trunk_ref,
course_doc_ref=course,
last_visited_doc_ref=recent_doc,
course_score=score,
user=user)
else:
course_entry.last_visited_doc_ref = recent_doc
course_entry.course_doc_ref=course
course_entry.course_score = score
course_entry.put()
return course_entry
def get_recent_in_progress_courses(user):
"""Gets a list of recent courses in progress.
Recomputes scores if score entry for course is stale.
Args:
user: User under consideration.
Returns:
List of recent course entry.
"""
recent_list = models.RecentCourseState.all().filter('user =', user).order(
'-time_stamp')
in_progress = []
num_to_pick = 5
for entry in recent_list:
visit_state = models.DocVisitState.all().filter('user =', user).filter(
'trunk_ref =', entry.course_trunk_ref).get()
if visit_state and visit_state.dirty_bit:
course = fetch_doc(entry.course_trunk_ref.key())
doc_contents = get_doc_contents_simple(course, user)
score = get_accumulated_score(course, user, doc_contents)
entry.course_score = score
entry.put()
else:
score = entry.course_score
if score < 100 and num_to_pick:
num_to_pick -= 1
in_progress.append(entry)
return in_progress
def expand_path(path, user, use_history, absolute):
"""Expands the path into objects based on the parameters.
Absolute is given preference over others.
Args:
path: List of doc_ids forming a traversal path.
absolute: If set absolute addressing is used. Docs with same doc_ids in
the list are fetched.
user: User associated with request.
use_history: If set then user's history is used to expand all
the links.
Returns:
Returns list of DocModel objects corresponding to the doc_ids in the path
passed.
"""
path = db.get(path) # Returns a list
if absolute:
return path
elif use_history:
path = [ get_doc_for_user(el.trunk_ref.key(), user) for el in path ]
else:
# Fetch latest
path = [ fetch_doc(el.trunk_ref.key()) for el in path ]
return path
def show_changes(pre, post):
"""Displays diffs between two models."""
return pre.HtmlDiff(pre, post)
def get_doc_annotation(doc, user, doc_contents=None):
"""Retrieve annotation keys for a given doc.
NOTE: This no longer retrieves the actual annotations, but just placeholders
using the content IDs. The annotations will be retrieved via AJAX using
the foreign keys: (trunk_id, doc_id, content_id, user).
Args:
doc: DocModel that is possibly annotated
user: User in consideration
doc_contents: Optional list of doc's contents. If None, gets the list from
the database.
Returns:
A dictionary of { obj_id: annotation_spec } for component documents in doc
"""
if not isinstance(doc, models.DocModel) or (user is None):
return None
if not doc_contents:
doc_contents = get_doc_contents_simple(doc, user)
annotation = {}
for content in doc_contents:
key_string = str(content.key())
annotation[key_string] = {
'data': '',
'key': key_string,
}
return annotation
def _get_doc_content_annotation(trunk_id, doc_id, content_id, user):
"""Retrieves user-annotation for a given doc content.
This is an internal work routine that uses the memcache to cache the key name
of the annotation object associated with the specified content.
Args:
trunk_id: Trunk ID of the doc that contains the annotation.
doc_id: Doc ID of the doc that contains the annotation.
content_id: ID of the content model (e.g., RichTextModel, DocLinkModel,
etc.).
user: User in consideration.
Returns:
An instance of models.AnnotationState or None.
"""
if not user:
return None
cache_key = '|'.join([trunk_id, doc_id, content_id, str(user)])
key_name = memcache.get(cache_key, namespace='anno')
if key_name:
return models.AnnotationState.get_by_key_name(key_name)
try:
doc = fetch_doc(trunk_id, doc_id=doc_id)
except models.InvalidTrunkError, e:
logging.error('Error loading doc for annotation: %r' % e)
return None
content = None
try:
content = db.get(content_id)
except db.BadKeyError:
pass
if not content:
logging.error('Cannot locate content for annotation: %r' % content_id)
return None
query = (models.AnnotationState.all()
.filter('user =', user)
.filter('trunk_ref =', doc.trunk_ref)
.filter('doc_ref =', doc)
.filter('object_ref =', content))
if query.count() == 0:
anno = models.AnnotationState(user=user,
doc_ref=doc,
trunk_ref=doc.trunk_ref,
object_ref=content)
anno.annotation_data = ''
anno.put()
else:
anno = query.get()
memcache.set(cache_key, anno.key().name(), namespace='anno')
return anno
def get_annotation_data(trunk_id, doc_id, content_id, user):
"""Retrieves user-annotation contents for a given doc content.
Args:
trunk_id: Trunk ID of the doc that contains the annotation.
doc_id: Doc ID of the doc that contains the annotation.
content_id: ID of the content model (e.g., RichTextModel, DocLinkModel, etc.)
user: User in consideration
Returns:
The user annotation data as a JSON encoded blob.
"""
anno = _get_doc_content_annotation(trunk_id, doc_id, content_id, user)
if anno:
return anno.annotation_data
return ''
def update_doc_content_annotation(trunk_id, doc_id, content_id, user, data):
"""Updates user-annotation for a given doc content.
Uses the memcache.
Args:
trunk_id: Trunk ID of the doc that contains the annotation.
doc_id: Doc ID of the doc that contains the annotation.
content_id: ID of the content model (e.g., RichTextModel, DocLinkModel, etc.)
user: User in consideration
data: The annotation data as a string. It will be encoded as utf-8
"""
anno = _get_doc_content_annotation(trunk_id, doc_id, content_id, user)
if anno:
# NEEDSWORK(jch):
# Since AnnotationState model wants annotation_data as serialized
# blob of everything, the data here is opaque at this library layer
# as the serialization is done between views layer and the JS in
# the browser (and we probably do not want to do JSON at the library
# layer). This is somewhat awkward. Perhaps AnnotationState should
# be modified to learn logical fields as it grows??? I dunno.
anno.annotation_data = data.encode('utf-8')
anno.put()
def get_notepad(key, user):
"""Get notepad contents
Args:
key: key to a NotePadModel object
user: the user the NotePadState belongs to
"""
ob = db.get(key)
notepad = (models.NotePadState.all()
.filter('user =', user)
.filter('object_ref =', ob))
if notepad.count() == 0:
return ""
return "\n\n".join([x.notepad_data for x in notepad])
def update_notepad(key, user, text):
"""Update notepad contents
Args:
key: key to a NotePadModel object
user: the user the NotePadState belongs to
text: the updated contents
"""
ob = db.get(key)
notepad = (models.NotePadState.all()
.filter('user =', user)
.filter('object_ref =', ob))
if notepad.count() == 0:
notepad = models.NotePadState(user=user, object_ref=ob)
else:
notepad = notepad[0]
notepad.notepad_data = text
notepad.put()
def view_doc_param(doc, visit, current, came_from):
"""Helper for getPrevNextLinks
Args:
doc: target document to go to
visit: current visit stack
current: current document (logically the tip of visit)
came_from: document we are leaving from
Returns:
URL parameter to visit the doc, marking that it came from here
"""
if not doc:
return None
param = [ ('trunk_id', str(doc.trunk_ref.key())),
('doc_id', str(doc.key())) ]
if visit:
depth = len(visit.path) - 1
while (0 < depth) and (visit.path[depth] != doc.key()):
depth -= 1
if 0 < depth:
parent = db.get(visit.path[depth - 1])
else:
parent = None
if parent:
param.extend([ ('parent_trunk', str(parent.trunk_ref.key())),
('parent_id', str(parent.key())) ])
if came_from:
param.extend([ ('came_from', str(came_from.key())) ])
return param
def getPrevNextLinks(doc, visit, came_from):
"""Compute where to go next
Args:
doc: this document
visit: traversal path from top to this document
came_from: the document the user came from, when different from parent
Returns:
A (prev_param, next_param) tuple, where
prev_param: URL parameters to feed to view to go to natural "previous" page
next_param: URL parameters to feed to view to go to natural "next" page
"""
# TODO: the "prev" half is not yet computed nor used.
prev_param = None
# If we came back from down below, visit the next child (no "immediate
# adjacency" required --- we have been showing this document already).
# If we came from top-down navigation, we do not have came_from; visit
# the first child in that case, and pretend as if the user just navigated
# in the usual top-down fashion (i.e. no need for came_from).
next = doc.first_child_after(came_from)
next_came_from = None
here = doc
if (not next) and visit and visit.path:
# We ran out of our children, so go back to our parent.
# visit.path should be the path from the root down to doc.
depth, child = len(visit.path), doc
while (0 < depth):
depth -= 1
parent = db.get(visit.path[depth])
# After visiting child inside parent, "next_child_or_self" is either
# the target of a link to the child that immediately follows the
# link to this child, or the parent itself if the link to this child
# is followed by a non-link material, or None which tells us to
# ask the grandparent what to do.
next = parent.next_child_or_self(child)
if next:
if next == parent:
# parent has a non-link after the link to this child
# revisit the parent to show that non-link, and remember
# to visit the link after that child
next_came_from = child
else:
# following the link to the next child, as if we came
# directly from the top
next_came_from = None
here = parent
break
else:
child = parent
next_param = view_doc_param(next, visit, here, next_came_from)
return (prev_param, next_param)
def auto_subscribe(user, trunk):
"""Auto-subscribe the user who edited to further changes of the page.
Args:
user: the user who edited this page
trunk: the trunk object that represents the page
"""
return notify.setSubscription(user, trunk, 1)
| en | 0.820054 | # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copied substantially from rietveld: # # http://code.google.com/p/rietveld # # Removed all rietveld-specific codereview templates. # TODO(vchen): Determine what other functionality to retain. Django template library for Lantern. # For registering filter and tag libs. Subtracts one from the provided number. Subtracts one from the provided number. Returns a list with integer between the range provided. Returns name of the class. Returns key for an object if it exists in datastore. Render a link to the user's dashboard, with text being the nickname. # No account. Let's not create a hyperlink. # populate the dict with the results, so same user in the list later # will have a memcache "hit" on "read". Render list of links to each user's dashboard. # Don't wast time calling memcache with an empty list. Return a nickname for an email address. If 'never_me' is True, 'me' is not returned if 'email' belongs to the current logged in user. If 'request' is a HttpRequest, it is used to cache the nickname returned by models.Account.get_nickname_for_email(). Renders a nickname for a given email address. The return value is cached if a HttpRequest is available in a 'request' template variable. The template tag accepts one or two arguments. The first argument is the template variable for the email address. If the optional second argument evaluates to True, 'me' as nickname is never rendered. Example usage: {% cached_nickname msg.sender %} {% cached_nickname msg.sender True %} Constructor. 'email_address' is the name of the template variable that holds an email address. If 'never_me' evaluates to True, 'me' won't be returned. Almost the same as nickname filter but the result is cached. Wrapper for nickname tag with is_multi flag enabled. ### functions to parse yaml files ### Parses input yaml file and returns a dictionary object with yaml content. Validation of the content is done by parse_leaf and parse_node functions. Args: path: Path to yaml file. Returns: A dict object with yaml_content mapped with corresponding keys. Raises: IOError: If file path is not correct. YAMLError: If unable to load yaml file. If an error occours the dictionary object returned will contain element 'errorMsg' containing the error message. # Read the yaml file. # If file not valid return dictObejct with corresponding error message. # If file unable to load yaml content return dictObejct with corresponding # error message. Parses a yaml file and validates if the file is of type node. Args: path: Path to yaml file. Returns: A dict object with doc_contents mapped with corresponding keys, or with appropriate error message. Parses a yaml file and validates if the file is of type leaf. Args: path: Path to yaml file. Returns: A dict object with yaml_content mapped with corresponding keys, or with appropriate error message, if there is a type mismatch. ### Library function to interact with datastore ### Insert model into datastore with a random key. Args: cls: Data model class (ex. models.DocModel). parent: optional parent argument to bind models in same entity group. NOTE: If parent argument is passed, key_name may not be unique across all entities. Returns: Data model entity or None if error. TODO(mukundjha): Check for race condition. Creates a new trunk with given document as head. WARNING: Since we are passing parent parameter in insert_with_new_key, function will only check for uniqueness of key among entities having 'trunk' as an ancestor. This no longer guarantees unique key_name across all entities. NOTE(mukundjha): No check is done on doc_id, it's responsibility of other functions calling create_new_trunk_with_doc to check the parameter before its passed. Args: doc_id: String value of key of the document to be added. commit_message: Message to commit, If None, uses the message, 'Committed a new revision'. Returns: Returns created trunk. Raises: InvalidDocumentError: If the doc_id is invalid. Appends a document to end of the trunk. NOTE(mukundjha): No check is done on doc_id, it's responsibility of other functions calling append_to_trunk to check the parameter before its passed. Args: trunk_id: Key of the trunk. doc_id: String value of key of the document to be added. commit_message: Message to commit, If None, uses the message, 'Committed a new revision'. Returns: Returns modified trunk. Raises: InvalidDocumentError: If the doc_id is invalid. InvalidTrunkError: If the trunk_id is invalid. Creates a new document in datastore. If trunk_id is provided, new document is appended to the trunk. Else a new trunk is created. Args: trunk_id: key(string) to the trunk to which the new document belongs. Returns: A DocModel object. Raises: InvalidTrunkError: If an invalid trunk id is provided InvalidDocumentError: If unable to save document in data store TODO(mukundjha): Check all db.put statements for exceptions. Fetches a document from datastore or raises InvalidDocumentError. If both trunk_id and doc_id are provided, return particular doc if it belongs to the given trunk, else return head of the trunk. Args: trunk_id: Trunk to fetch the document from. doc_id: Document id to fetch a particular version of document. Returns: A DocModel object which having provided trunk_id and doc_id, if only trunk_id is provided or an invalid doc_id is provided head of the trunk is returned. Raises: InvalidDocumentError: If trunk_id passed is invalid. # Using cached value of head stored in trunk, should be fine since all # writes are atomic and updates head. Retrieves document based on user's visit history. If the user has visited a particular revision (document of a trunk), user will see that document, else user will be directed to the latest revision. We pass user instead of using users.get_current_user, so that this function could also be used while creating other pages like teacher's dashboard etc., where student will not be looged in. NOTE(mukundjha): This does not update the datastore with new entry. It is upto the view to update the datastore. Args: trunk_id: Key to the referenced trunk. user: User whose history is to be used. Returns: Document based on user's visit history. Raises: InvalidTrunkError: If trunk_id is not valid. Returns a parent for a document. If multiple parents are present, choose one based on ranking function. Note(mukundjha): Taking history into account makes it a very heavy on datastore. Args: doc: DocModel object from datastore. Returns: Document which is parent of doc passed or None if there are no parents. Calculates score for the DocLink object. Score for a link is essentially score for the trunk pointed by the link. If dirty bit is set for the visit entry for the referred trunk scores for the doc are re-computed by calling get_accumulated_score, else score entry for the trunk is fetched. NOTE(mukundjha): Does not take care of cycles. Args: link_element: Link object for which score is required. user: User whose score is desired. use_history: If set user's history is used to fetch the doc. recurse: If set True, all the scores will be recursively computed and updated. Returns: Score for the link object. Calculate score for a doc by accumulating scores from its objects. Averages score, no weights. It also updates the score for element. Args: doc: Document fetching the score. doc_contents: List of objects referenced in list of contents of the doc. the list is passed separately to prevent repeated calls to data-store for objects. user: User associated with the score. use_history: If set user's history is used to fetch the document. recurse: If set True scores are recursively re-computed instead of just picking entries from datastore. Returns: Average score based on content of the document. Also adds score attribute to each 'scorable' element. Stores progress score for a doc. Updates the entry with new score if present, else makes a new entry. We could also just append if we want to track the progress over time. Args: doc: Document fetching the score. user: User associated with the score. score: Current score. TODO(mukundjha): Determine if this needs to be run in a transaction. Returns the base of the specified URL. Given: http://localhost:8080/exercise?exid=trigonometry_1 Returns: http://localhost:8080/ Given /quiz?quiz_id=kjeiia;sk Returns /quiz/ # Has full network path, so remove path Return a list of objects referred by keys in content list of a doc. This version loads only the referenced objects and does not try to resolve links, scores, etc. TODO(vchen): Opportunity to use memcache to store results. Args: doc: DocModel used for populating content objects. user: User in consideration. Returns: An ordered list of objects referenced in content list of passed doc. The objects are doc-content models, e.g., RichTextModel, DocLinkModel, etc. # First try a bulk load. # Unfortunately, any bad key results in the exception, so now need to # look up one by one, omitting any bad keys. Return a list of objects referred by keys in content list of a doc. NOTE(mukundjha): doc is a DocModel object and not an id. Args: doc: DocModel used for populating content objects. user: User in consideration. resolve_links: If resolve_links is true, then links are resolved to get appropriate title for links. use_history: Use history to resolve links. fetch_score: If set true score is also appended to all objects. fetch_video_state: If set VideoModel object is appended with video's state (stored paused time). Returns: An ordered list of objects referenced in content list of passed doc. Raises: BadKeyError: If element referred is invalid. TODO(mukundjha): Develop Better method to extract base url. # Get just the list of contents # Now perform any additional resolution of titles, scores, etc. # If video object and fetch_video_status is true, status is fetched. Stores progress score for a widget. Updates the entry with new score if present, else makes a new entry. Args: widget: WidgetModel object for which score is being updated. user: User associated with the score. score: Current score. If None, do not update it. user_data: Optional per-user data to be persisted on behalf of the widget. TODO(mukundjha): Determine if this needs to be run in a transaction. # Make sure it is not None Gets a list of parents with root as a course. Useful in cases where a user lands on a random page and page needs to be linked to a course. Currently just picking the most latest parent recursively up until a course is reached or there are no more parents to pick. NOTE(mukundjha): This function is very heavy on datastore. * Checking for first 1000 entries for an existing course is slightly better than checking all entries. Args: doc: DocModel object in consideration. path: starting path Returns: A list of parents doc_ids with root as a course. # Flag is set if an alternate path is picked. Retrieves or creates a new session for the (user, widget) pair. Session id is assumed to be the key for WidgetProgressState entry for the widget. If no entry is present, a new entry is made. Currently, we are setting dirty bits to report stale scores. Args: widget: WidgetModel object for which session id is required. user: Associated user. Returns: An instance of the WidgetProgressState model. Sets dirty bit for all the parents in the path used to reach doc. Dirty bit indicates the score for the doc are stale and needs to be recomputed. TODO(mukundjha): We should check for the path, or pass path as parameter. TODO(mukundjha): Maybe we should bind this with actual doc rather than trunk. Args: doc: Document for which score has just been updated. user: Associated user. Updates the visit stack for a particular doc. Path appends parent to parent's path and sets as path for curernt doc. If parent is itself a course, only parent is added to the path as paths are rooted at course level. NOTE(mukundjha): Currently stack stores doc_ids, we could replace this with, trunk_id, doc_id, doc.title to reduce the datastore load. Args: doc: DocModel object for which visit stack is to be updated. parent: DocModel object - parent of the provided doc or None. user: Associated user. Returns: Updated visit stack entry object. # Checking for loop # If parent is not present # Gets set of parents. Updates the entry for recent course visited/accesed. Note(mukundjha): instead of using course.get_score() we should use the get_accumulated_score() with recurse=True, but it would be too costly to do it on every update. Therefore its better to push the score-change/delta up the tree on every update. Args: recent_doc: Latest doc accessed for the course. course: Course to be updated. user: User for whom update is to be made. # Update course entry only if the doc passed is a course. Gets a list of recent courses in progress. Recomputes scores if score entry for course is stale. Args: user: User under consideration. Returns: List of recent course entry. Expands the path into objects based on the parameters. Absolute is given preference over others. Args: path: List of doc_ids forming a traversal path. absolute: If set absolute addressing is used. Docs with same doc_ids in the list are fetched. user: User associated with request. use_history: If set then user's history is used to expand all the links. Returns: Returns list of DocModel objects corresponding to the doc_ids in the path passed. # Returns a list # Fetch latest Displays diffs between two models. Retrieve annotation keys for a given doc. NOTE: This no longer retrieves the actual annotations, but just placeholders using the content IDs. The annotations will be retrieved via AJAX using the foreign keys: (trunk_id, doc_id, content_id, user). Args: doc: DocModel that is possibly annotated user: User in consideration doc_contents: Optional list of doc's contents. If None, gets the list from the database. Returns: A dictionary of { obj_id: annotation_spec } for component documents in doc Retrieves user-annotation for a given doc content. This is an internal work routine that uses the memcache to cache the key name of the annotation object associated with the specified content. Args: trunk_id: Trunk ID of the doc that contains the annotation. doc_id: Doc ID of the doc that contains the annotation. content_id: ID of the content model (e.g., RichTextModel, DocLinkModel, etc.). user: User in consideration. Returns: An instance of models.AnnotationState or None. Retrieves user-annotation contents for a given doc content. Args: trunk_id: Trunk ID of the doc that contains the annotation. doc_id: Doc ID of the doc that contains the annotation. content_id: ID of the content model (e.g., RichTextModel, DocLinkModel, etc.) user: User in consideration Returns: The user annotation data as a JSON encoded blob. Updates user-annotation for a given doc content. Uses the memcache. Args: trunk_id: Trunk ID of the doc that contains the annotation. doc_id: Doc ID of the doc that contains the annotation. content_id: ID of the content model (e.g., RichTextModel, DocLinkModel, etc.) user: User in consideration data: The annotation data as a string. It will be encoded as utf-8 # NEEDSWORK(jch): # Since AnnotationState model wants annotation_data as serialized # blob of everything, the data here is opaque at this library layer # as the serialization is done between views layer and the JS in # the browser (and we probably do not want to do JSON at the library # layer). This is somewhat awkward. Perhaps AnnotationState should # be modified to learn logical fields as it grows??? I dunno. Get notepad contents Args: key: key to a NotePadModel object user: the user the NotePadState belongs to Update notepad contents Args: key: key to a NotePadModel object user: the user the NotePadState belongs to text: the updated contents Helper for getPrevNextLinks Args: doc: target document to go to visit: current visit stack current: current document (logically the tip of visit) came_from: document we are leaving from Returns: URL parameter to visit the doc, marking that it came from here Compute where to go next Args: doc: this document visit: traversal path from top to this document came_from: the document the user came from, when different from parent Returns: A (prev_param, next_param) tuple, where prev_param: URL parameters to feed to view to go to natural "previous" page next_param: URL parameters to feed to view to go to natural "next" page # TODO: the "prev" half is not yet computed nor used. # If we came back from down below, visit the next child (no "immediate # adjacency" required --- we have been showing this document already). # If we came from top-down navigation, we do not have came_from; visit # the first child in that case, and pretend as if the user just navigated # in the usual top-down fashion (i.e. no need for came_from). # We ran out of our children, so go back to our parent. # visit.path should be the path from the root down to doc. # After visiting child inside parent, "next_child_or_self" is either # the target of a link to the child that immediately follows the # link to this child, or the parent itself if the link to this child # is followed by a non-link material, or None which tells us to # ask the grandparent what to do. # parent has a non-link after the link to this child # revisit the parent to show that non-link, and remember # to visit the link after that child # following the link to the next child, as if we came # directly from the top Auto-subscribe the user who edited to further changes of the page. Args: user: the user who edited this page trunk: the trunk object that represents the page | 1.813088 | 2 |
code/compilingClusters/salaris_coords.py | andrewbowen19/ClusterEclipsingBinaries | 0 | 6614374 | <reponame>andrewbowen19/ClusterEclipsingBinaries<filename>code/compilingClusters/salaris_coords.py
# Adding Salaris 2004 coords to the data we have - want RA/Dec for every Salaris Cluster
import pandas as pd
path = '/Users/andrewbowen/ceb_project/data/OC_data/'
# names from new salaris data table
new_names = ['N','Identifier','Otype','RA','Dec','Mag U','Mag B','Mag V','Mag R','Mag I','Sp type','#ref 1850 - 2019','#notes']
# old salaris column names, from: Solaris2004_viaWEBDA_plusvandenbergh2006_diam_dist.txt
sol_names = ['name', 'deltaV', 'sigdV', '[FeH]', 'sigFeH', 't', 'sigt', 'logt', 'Rgc', 'z','Diam[pc]', 'd[pc]']
# Reading in 2 Salaris files
new_sol = pd.read_table(path + 'new_salaris_coords.txt', sep = '\t', header = 0, names = new_names)
old_sol = pd.read_table(path + 'Solaris2004_viaWEBDA_plusvandenbergh2006_diam_dist.txt', delim_whitespace = True, \
header = 0, names = sol_names)
sol_RA = new_sol['RA']
sol_dec = new_sol['Dec']
# Maybe try to dump excess stuff and merge on names column?
# print(new_sol['RA'])
new_names = ['N','name','Otype','RA','Dec','Mag U','Mag B','Mag V','Mag R','Mag I','Sp type','#ref 1850 - 2019','#notes']
new_sol.columns = new_names
new_sol = new_sol[new_names]#resetting column names
# Making names standrard across both files (from file_compile)
New_Sol_Names = new_sol['name']
New_Sol_Names = New_Sol_Names.str.replace(' ', '_')
new_sol['name'] = New_Sol_Names#putting it back into the df
# merging new and old salaris dfs
all_sol = old_sol.join(new_sol.set_index('name'), on = 'name', how = 'outer')
# print(all_sol.columns)
# List of columns to drop from all_sol
dropped_cols = ['N','Otype','Mag U','Mag B', 'Mag V', 'Mag R', 'Mag I', 'Sp type', '#ref 1850 - 2019','#notes']
Salaris_df = all_sol.drop(labels = dropped_cols, axis = 1)
# Final columns to use for our table
final_salaris_cols = ['name', 'RA', 'Dec','deltaV', 'sigdV', '[FeH]', 'sigFeH', 't', 'sigt', 'logt',\
'Rgc', 'z', 'Diam[pc]', 'd[pc]']
Salaris_df = Salaris_df[final_salaris_cols]#rearrangin column order
# Sending new df to csv file - will need to fill in missing 10 RA/Dec values manually
Salaris_df.to_csv(path + 'SalarisData_withCoords.csv', sep = ',', header = final_salaris_cols)
| # Adding Salaris 2004 coords to the data we have - want RA/Dec for every Salaris Cluster
import pandas as pd
path = '/Users/andrewbowen/ceb_project/data/OC_data/'
# names from new salaris data table
new_names = ['N','Identifier','Otype','RA','Dec','Mag U','Mag B','Mag V','Mag R','Mag I','Sp type','#ref 1850 - 2019','#notes']
# old salaris column names, from: Solaris2004_viaWEBDA_plusvandenbergh2006_diam_dist.txt
sol_names = ['name', 'deltaV', 'sigdV', '[FeH]', 'sigFeH', 't', 'sigt', 'logt', 'Rgc', 'z','Diam[pc]', 'd[pc]']
# Reading in 2 Salaris files
new_sol = pd.read_table(path + 'new_salaris_coords.txt', sep = '\t', header = 0, names = new_names)
old_sol = pd.read_table(path + 'Solaris2004_viaWEBDA_plusvandenbergh2006_diam_dist.txt', delim_whitespace = True, \
header = 0, names = sol_names)
sol_RA = new_sol['RA']
sol_dec = new_sol['Dec']
# Maybe try to dump excess stuff and merge on names column?
# print(new_sol['RA'])
new_names = ['N','name','Otype','RA','Dec','Mag U','Mag B','Mag V','Mag R','Mag I','Sp type','#ref 1850 - 2019','#notes']
new_sol.columns = new_names
new_sol = new_sol[new_names]#resetting column names
# Making names standrard across both files (from file_compile)
New_Sol_Names = new_sol['name']
New_Sol_Names = New_Sol_Names.str.replace(' ', '_')
new_sol['name'] = New_Sol_Names#putting it back into the df
# merging new and old salaris dfs
all_sol = old_sol.join(new_sol.set_index('name'), on = 'name', how = 'outer')
# print(all_sol.columns)
# List of columns to drop from all_sol
dropped_cols = ['N','Otype','Mag U','Mag B', 'Mag V', 'Mag R', 'Mag I', 'Sp type', '#ref 1850 - 2019','#notes']
Salaris_df = all_sol.drop(labels = dropped_cols, axis = 1)
# Final columns to use for our table
final_salaris_cols = ['name', 'RA', 'Dec','deltaV', 'sigdV', '[FeH]', 'sigFeH', 't', 'sigt', 'logt',\
'Rgc', 'z', 'Diam[pc]', 'd[pc]']
Salaris_df = Salaris_df[final_salaris_cols]#rearrangin column order
# Sending new df to csv file - will need to fill in missing 10 RA/Dec values manually
Salaris_df.to_csv(path + 'SalarisData_withCoords.csv', sep = ',', header = final_salaris_cols) | en | 0.702613 | # Adding Salaris 2004 coords to the data we have - want RA/Dec for every Salaris Cluster # names from new salaris data table # old salaris column names, from: Solaris2004_viaWEBDA_plusvandenbergh2006_diam_dist.txt # Reading in 2 Salaris files # Maybe try to dump excess stuff and merge on names column? # print(new_sol['RA']) #resetting column names # Making names standrard across both files (from file_compile) #putting it back into the df # merging new and old salaris dfs # print(all_sol.columns) # List of columns to drop from all_sol # Final columns to use for our table #rearrangin column order # Sending new df to csv file - will need to fill in missing 10 RA/Dec values manually | 2.150109 | 2 |
Lessons/Stacks and Queues/Nesting/solution.py | matheuscordeiro/Codility | 0 | 6614375 | #!/usr/local/bin/python3
def solution(S):
stack = []
for i in S:
if i == '(':
stack.append(i)
elif stack:
stack.pop()
else:
return 0
if stack:
return 0
else:
return 1 | #!/usr/local/bin/python3
def solution(S):
stack = []
for i in S:
if i == '(':
stack.append(i)
elif stack:
stack.pop()
else:
return 0
if stack:
return 0
else:
return 1 | en | 0.387222 | #!/usr/local/bin/python3 | 3.675394 | 4 |
maps/tests.py | OrangeKing/django-maps | 0 | 6614376 | from django.test import TestCase
from maps.models import Post
from time import strftime
from django.contrib.auth.models import User
from .forms import *
class PostModelTest(TestCase):
TIMESTAMP_AUTHOR = strftime("U%d%m%y%H%M%S")
TIMESTAMP_TITLE = strftime("T%d%m%y%H%M%S")
CONTENT = """Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer
took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries,
but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with
the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software
like Aldus PageMaker including versions of Lorem Ipsum."""
LOCATION = "Warsaw"
SLUG = "Mobica"
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
test_user = User.objects.create_user(cls.TIMESTAMP_AUTHOR)
test_id = test_user.id
Post.objects.create(author_id=test_id, title=cls.TIMESTAMP_TITLE,
contents=cls.CONTENT, location=cls.LOCATION, slug=cls.SLUG)
def test_author_id_value(self):
author = User.objects.get(username=self.TIMESTAMP_AUTHOR)
expected_id_value = author.id
self.assertEquals(expected_id_value, 1)
def test_title_value(self):
post = Post.objects.get(id=1)
expected_title_value = post.title
self.assertEquals(expected_title_value, self.TIMESTAMP_TITLE)
def test_title_label_max_length(self):
post = Post.objects.get(id=1)
max_length = post._meta.get_field('title').max_length
self.assertEquals(max_length, 200)
def test_was_published_recently(self):
post = Post.objects.get(id=1)
self.assertEquals(post.was_published_recently(), True)
def test_get_contents_preview(self):
post = Post.objects.get(id=1)
short_contents = post.get_contents_preview()
self.assertEquals(len(post.contents) > len(short_contents), True)
def test_to_str(self):
post = Post.objects.get(id=1)
title = post.title
self.assertEquals(title, post.__str__())
def test_get_absolute_url(self):
post = Post.objects.get(id=1)
url = post.get_absolute_url()
self.assertEquals(url, "/posts/{}/".format(self.SLUG))
class PostViewTest(TestCase):
TIMESTAMP_AUTHOR = strftime("U%d%m%y%H%M%S")
TIMESTAMP_TITLE = strftime("T%d%m%y%H%M%S")
CONTENT = """Short test content value"""
LOCATION = "Warsaw"
SLUG = "Mobica"
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
test_user = User.objects.create_user(cls.TIMESTAMP_AUTHOR)
test_id = test_user.id
Post.objects.create(author_id=test_id, title=cls.TIMESTAMP_TITLE,
contents=cls.CONTENT, location=cls.LOCATION, slug=cls.SLUG)
def test_call_view_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'index.html')
def test_call_view_about(self):
response = self.client.get('/about/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'about.html')
def test_call_view_contact(self):
response = self.client.get('/contact/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'contact.html')
def test_call_view_denies_anonymous(self):
response = self.client.get('/posts/add/')
self.assertTemplateNotUsed(response)
def test_call_view_loads(self):
self.client.login(username=self.TIMESTAMP_AUTHOR, password=None) # defined in fixture or with factory in setUp()
response = self.client.get('/posts/?q={TIMESTAMP_AUTHOR}')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'post_list.html')
def test_call_view_fails_blank(self):
self.client.login(username='user', password='<PASSWORD>')
response = self.client.post('/nonexistent')
self.assertEqual(response.status_code, 404)
class PostValidatorsTest(TestCase):
TIMESTAMP_AUTHOR = strftime("U%d%m%y%H%M%S")
TIMESTAMP_TITLE = strftime("T%d%m%y%H%M%S")
CONTENT = """Short test content value"""
LOCATION = "Warsaw"
SLUG = "Mobica"
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
test_user = User.objects.create_user(cls.TIMESTAMP_AUTHOR)
test_id = test_user.id
Post.objects.create(author_id=test_id, title=cls.TIMESTAMP_TITLE,
contents=cls.CONTENT, location=cls.LOCATION, slug=cls.SLUG)
def test_validate_post(self):
author = User.objects.get(username=self.TIMESTAMP_AUTHOR)
author_id = author.id
valid_post = Post.objects.create(author_id=author_id)
test_post_form = PostAddForm(instance=valid_post)
self.assertEqual(test_post_form.is_valid(), False) # No data has been supplied yet.
test_post_form = PostAddForm({'title': self.TIMESTAMP_AUTHOR, 'contents': "password", 'location': "Warsaw" }, instance=valid_post)
self.assertEqual(test_post_form.is_valid(), True) # Now that you have given it data, it can validate.
| from django.test import TestCase
from maps.models import Post
from time import strftime
from django.contrib.auth.models import User
from .forms import *
class PostModelTest(TestCase):
TIMESTAMP_AUTHOR = strftime("U%d%m%y%H%M%S")
TIMESTAMP_TITLE = strftime("T%d%m%y%H%M%S")
CONTENT = """Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer
took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries,
but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with
the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software
like Aldus PageMaker including versions of Lorem Ipsum."""
LOCATION = "Warsaw"
SLUG = "Mobica"
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
test_user = User.objects.create_user(cls.TIMESTAMP_AUTHOR)
test_id = test_user.id
Post.objects.create(author_id=test_id, title=cls.TIMESTAMP_TITLE,
contents=cls.CONTENT, location=cls.LOCATION, slug=cls.SLUG)
def test_author_id_value(self):
author = User.objects.get(username=self.TIMESTAMP_AUTHOR)
expected_id_value = author.id
self.assertEquals(expected_id_value, 1)
def test_title_value(self):
post = Post.objects.get(id=1)
expected_title_value = post.title
self.assertEquals(expected_title_value, self.TIMESTAMP_TITLE)
def test_title_label_max_length(self):
post = Post.objects.get(id=1)
max_length = post._meta.get_field('title').max_length
self.assertEquals(max_length, 200)
def test_was_published_recently(self):
post = Post.objects.get(id=1)
self.assertEquals(post.was_published_recently(), True)
def test_get_contents_preview(self):
post = Post.objects.get(id=1)
short_contents = post.get_contents_preview()
self.assertEquals(len(post.contents) > len(short_contents), True)
def test_to_str(self):
post = Post.objects.get(id=1)
title = post.title
self.assertEquals(title, post.__str__())
def test_get_absolute_url(self):
post = Post.objects.get(id=1)
url = post.get_absolute_url()
self.assertEquals(url, "/posts/{}/".format(self.SLUG))
class PostViewTest(TestCase):
TIMESTAMP_AUTHOR = strftime("U%d%m%y%H%M%S")
TIMESTAMP_TITLE = strftime("T%d%m%y%H%M%S")
CONTENT = """Short test content value"""
LOCATION = "Warsaw"
SLUG = "Mobica"
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
test_user = User.objects.create_user(cls.TIMESTAMP_AUTHOR)
test_id = test_user.id
Post.objects.create(author_id=test_id, title=cls.TIMESTAMP_TITLE,
contents=cls.CONTENT, location=cls.LOCATION, slug=cls.SLUG)
def test_call_view_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'index.html')
def test_call_view_about(self):
response = self.client.get('/about/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'about.html')
def test_call_view_contact(self):
response = self.client.get('/contact/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'contact.html')
def test_call_view_denies_anonymous(self):
response = self.client.get('/posts/add/')
self.assertTemplateNotUsed(response)
def test_call_view_loads(self):
self.client.login(username=self.TIMESTAMP_AUTHOR, password=None) # defined in fixture or with factory in setUp()
response = self.client.get('/posts/?q={TIMESTAMP_AUTHOR}')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'post_list.html')
def test_call_view_fails_blank(self):
self.client.login(username='user', password='<PASSWORD>')
response = self.client.post('/nonexistent')
self.assertEqual(response.status_code, 404)
class PostValidatorsTest(TestCase):
TIMESTAMP_AUTHOR = strftime("U%d%m%y%H%M%S")
TIMESTAMP_TITLE = strftime("T%d%m%y%H%M%S")
CONTENT = """Short test content value"""
LOCATION = "Warsaw"
SLUG = "Mobica"
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
test_user = User.objects.create_user(cls.TIMESTAMP_AUTHOR)
test_id = test_user.id
Post.objects.create(author_id=test_id, title=cls.TIMESTAMP_TITLE,
contents=cls.CONTENT, location=cls.LOCATION, slug=cls.SLUG)
def test_validate_post(self):
author = User.objects.get(username=self.TIMESTAMP_AUTHOR)
author_id = author.id
valid_post = Post.objects.create(author_id=author_id)
test_post_form = PostAddForm(instance=valid_post)
self.assertEqual(test_post_form.is_valid(), False) # No data has been supplied yet.
test_post_form = PostAddForm({'title': self.TIMESTAMP_AUTHOR, 'contents': "password", 'location': "Warsaw" }, instance=valid_post)
self.assertEqual(test_post_form.is_valid(), True) # Now that you have given it data, it can validate.
| en | 0.94043 | Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum. # Set up non-modified objects used by all test methods Short test content value # Set up non-modified objects used by all test methods # defined in fixture or with factory in setUp() Short test content value # Set up non-modified objects used by all test methods # No data has been supplied yet. # Now that you have given it data, it can validate. | 2.520046 | 3 |
docassemble/MilitaryAffidavit/macourts.py | jacqsull/docassemble-MilitaryAffidavit | 1 | 6614377 | <reponame>jacqsull/docassemble-MilitaryAffidavit
from docassemble.base.core import DAObject, DAList, DADict
from docassemble.base.util import path_and_mimetype, Address, LatitudeLongitude, DAStaticFile, text_type
from docassemble.base.legal import Court
import io, json, sys, requests, bs4, re, os #, cbor
# from operator import itemgetter
# from docassemble.base.logger import logmessage
from docassemble.webapp.playground import PlaygroundSection
def get_courts_from_massgov_url(url, shim_ehc_middlesex=True, shim_nhc_woburn=True):
"""Load specified court directory page on Mass.gov and returns an MACourtList
Properties include name, phone, fax, address, description (usually includes cities or county served), latitude, longitude
"""
page = requests.get(url)
soup = bs4.BeautifulSoup(page.text, 'html.parser')
jstring = soup.find_all( attrs={"data-drupal-selector":"drupal-settings-json"} )[0].text # this is the element that has the JSON data as of 6/19/2018
jdata = json.loads(jstring)
markers = jdata['locations']['googleMap']['markers']
courts = []
# The address and description are in a different part of the JSON
for marker in markers:
html_name = marker['infoWindow']['name']
for item in jdata['locations']['imagePromos']['items']:
description = ''
if item['title']['text'] in html_name:
name = item['title']['text']
description = item['description']['richText']['rteElements'][0]['data']['rawHtml']['content']['#context']['value']
break
address = Address()
orig_address = marker['infoWindow']['address'] # The geolocate method does _not_ work with PO Boxes (silently discards)
clean_address = re.sub(r' *PO Box .*?,',"",orig_address)
has_po_box = not clean_address == orig_address # We want to track if there was a PO Box where mail should be delivered
address.address = orig_address
if address.address == '':
address.city = ''
address.state = ''
address.zip = ''
address.county = ''
address.unit = ''
else:
address.geolocate(clean_address)
if not hasattr(address,'address'):
address.address = ''
if not hasattr(address, 'city'):
address.city = ''
if not hasattr(address, 'state'):
address.state = ''
if not hasattr(address, 'zip'):
address.zip = ''
if not hasattr(address, 'county'):
address.county = ''
#if not hasattr(address, 'unit'):
#address.unit = ''
# store the data in a serializable format. maybe could refactor to use object_hooks, but would need to go all the way down to DAObject?
court = {
'name': name,
'description': description,
'has_po_box' : has_po_box,
'phone':marker['infoWindow']['phone'],
'fax':marker['infoWindow']['fax'],
'address': {
'city': address.city,
'address': address.address,
'state': address.state,
'zip': address.zip,
'county': address.county,
'orig_address': orig_address # the one-line original address, which may include a PO Box
},
'location': {
'latitude': marker['position']['lat'],
'longitude': marker['position']['lng']
}
}
if hasattr(address, 'unit'):
court['address']['unit']= address.unit
courts.append(court)
if shim_ehc_middlesex and url == 'https://www.mass.gov/orgs/housing-court/locations':
court = {
'name': "Eastern Housing Court - Middlesex Session",
'description': "The Middlesex Session of the Eastern Housing Court serves Arlington, Belmont, and Cambridge, Medford and Somerville",
'has_po_box' : False,
'phone': "(781) 306-2715",
'fax':"",
'address': {
'city': "Medford",
'address': "4040 Mystic Valley Parkway",
'state': "MA",
'zip': "02155",
'county': "Middlesex",
'orig_address': "4040 Mystic Valley Parkway, Medford, MA 02155"
},
'location': {
'latitude': 42.4048336,
'longitude': -71.0893853
}
}
courts.append(court)
if shim_nhc_woburn and url == 'https://www.mass.gov/orgs/housing-court/locations':
court = {
'name': "Northeast Housing Court - Woburn Session",
'description': "The Woburn session of the Northeast Housing Court serves Bedford, Burlington, Concord, Everett,Lexington, Lincoln, Malden, Melrose, North Reading, Reading, Stoneham, Wakefield, Waltham, Watertown, Weston, Wilmington, Winchester, and Woburn.",
'has_po_box' : False,
'phone': "(978) 689-7833",
'fax':"",
'address': {
'city': "Woburn",
'address': "200 Trade Center",
'unit': "Courtroom 540 - 5th Floor",
'state': "MA",
'zip': "01801",
'county': "Middlesex",
'orig_address': "200 Trade Center, Courtroom 540 - 5th Floor, Woburn, MA 01801"
},
'location': {
'latitude': 42.500543,
'longitude': -71.1656604
}
}
courts.append(court)
courts.sort(key=lambda k: k['name']) # We want to sort within category of court
return courts
def save_courts_to_file():
''' Writes all courts to .json files in Playground data sources folder'''
courts = [
[
'district_courts', 'https://www.mass.gov/orgs/district-court/locations'
],
[
'housing_courts', 'https://www.mass.gov/orgs/housing-court/locations'
],
[
'bmc', 'https://www.mass.gov/orgs/boston-municipal-court/locations'
],
[
'superior_courts', 'https://www.mass.gov/orgs/superior-court/locations'
],
[
'land_courts', 'https://www.mass.gov/orgs/land-court/locations'
],
[
'juvenile_courts', 'https://www.mass.gov/orgs/juvenile-court/locations'
],
[
'probate_and_family_courts', 'https://www.mass.gov/orgs/probate-and-family-court/locations'
]
]
try:
for court in courts:
area = PlaygroundSection('sources').get_area()
fpath = os.path.join(area.directory, court[0] + '.json')
jdata = text_type(json.dumps(get_courts_from_massgov_url(court[1])))
f = open(fpath, 'w')
f.write(jdata)
f.close()
area.finalize()
except:
e = sys.exc_info()[0]
return e
else:
return "Finished saving courts"
def test_write():
area = PlaygroundSection('sources').get_area()
fpath = os.path.join(area.directory, "test" + '.json')
jdata = "test"
f = open(fpath, 'w')
f.write(jdata)
f.close()
area.finalize()
return fpath
class MACourt(Court):
def init(self, *pargs, **kwargs):
super(MACourt, self).init(*pargs, **kwargs)
if 'address' not in kwargs:
self.initializeAttribute('address', Address)
if 'jurisdiction' not in kwargs:
self.jurisdiction = list()
if 'location' not in kwargs:
self.initializeAttribute('location', LatitudeLongitude)
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__
class MACourtList(DAList):
"""Represents a list of courts in Massachusetts. Package includes a cached list that is scraped from mass.gov"""
def init(self, *pargs, **kwargs):
super(MACourtList, self).init(*pargs, **kwargs)
self.auto_gather = False
self.gathered = True
self.object_type = MACourt
if hasattr(self,'courts'):
if isinstance(self.courts, list):
self.load_courts(courts=self.courts)
elif self.courts is True:
self.load_courts()
def load_courts(self, courts=['housing_courts','bmc','district_courts','superior_courts'], data_path='docassemble.MACourts:data/sources/'):
"""Load a set of courts into the MACourtList. Courts should be a list of names of JSON files in the data/sources directory.
Will fall back on loading default set of courts from Mass.gov. Default set of courts is applicable to housing cases"""
try:
for court in courts:
self.load_courts_from_file(court, data_path=data_path)
except IOError:
if courts == ['housing_courts','bmc','district_courts','superior_courts']:
self.load_from_massgov()
else:
self.load_from_massgov(housing_only=False)
def load_from_massgov(self, housing_only=True):
"""Load courts directly from Mass.gov: fallback if cached files don't exist. URLs hardcoded."""
if housing_only:
urls = ['https://www.mass.gov/orgs/housing-court/locations',
'https://www.mass.gov/orgs/boston-municipal-court/locations',
'https://www.mass.gov/orgs/district-court/locations',
'https://www.mass.gov/orgs/superior-court/locations']
else:
urls = ['https://www.mass.gov/orgs/district-court/locations',
'https://www.mass.gov/orgs/housing-court/locations',
'https://www.mass.gov/orgs/boston-municipal-court/locations',
'https://www.mass.gov/orgs/superior-court/locations',
'https://www.mass.gov/orgs/land-court/locations',
'https://www.mass.gov/orgs/juvenile-court/locations',
'https://www.mass.gov/orgs/probate-and-family-court/locations']
for url in urls:
courts = get_courts_from_massgov_url(url)
for item in courts:
# translate the dictionary data into an MACourtList
court = self.appendObject()
court.name = item['name']
court.phone = item['phone']
court.fax = item['fax']
court.location.latitude = item['location']['latitude']
court.location.longitude = item['location']['longitude']
court.has_po_box = item.get('has_po_box')
court.description = item.get('description')
court.address.address = item['address']['address']
court.address.city = item['address']['city']
court.address.state = item['address']['state']
court.address.zip = item['address']['zip']
court.address.county = item['address']['county']
court.address.orig_address = item['address'].get('orig_address')
def load_courts_from_file(self, json_path, data_path='docassemble.MACourts:data/sources/'):
"""Add the list of courts at the specified JSON file into the current list"""
path = path_and_mimetype(os.path.join(data_path,json_path+'.json'))[0]
with open(path) as courts_json:
courts = json.load(courts_json)
for item in courts:
# translate the dictionary data into an MACourtList
court = self.appendObject()
court.name = item['name']
court.phone = item['phone']
court.fax = item['fax']
court.location.latitude = item['location']['latitude']
court.location.longitude = item['location']['longitude']
court.has_po_box = item.get('has_po_box')
court.description = item.get('description')
court.address.address = item['address']['address']
court.address.city = item['address']['city']
court.address.state = item['address']['state']
court.address.zip = item['address']['zip']
court.address.county = item['address']['county']
court.address.orig_address = item['address'].get('orig_address')
def matching_housing_court(self, address):
"""Return the MACourt representing the Housing Court serving the given address"""
court_name = self.matching_housing_court_name(address)
return next ((court for court in self.elements if court.name == court_name), None)
def matching_housing_court_name(self,address):
"""Returns the name of the MACourt representing the housing court that covers the specified address.
Harcoded and must be updated if court jurisdictions or names change."""
if (address.county == "Suffolk County") or (address.city in ["Newton","Brookline"]):
local_housing_court = "Eastern Housing Court"
elif address.city in ["Arlington","Belmont","Cambridge","Medford","Somerville"]:
local_housing_court = "Eastern Housing Court - Middlesex Session"
elif address.city in ["Ashfield", "Bernardston", "Buckland", "Charlemont", "Colrain", "Conway", "Deerfield", "Erving", "Gill", "Greenfield", "Hawley", "Heath", "Leverett", "Leyden", "Monroe", "Montague", "New Salem", "Northfield", "Orange", "Rowe", "Shelburne", "Shutesbury", "Sunderland", "Warwick", "Wendell", "Whately"]:
local_housing_court = "Western Housing Court - Greenfield Session"
elif address.city in ['Amherst', 'Belchertown', 'Chesterfield', 'Cummington', 'Easthampton', 'Goshen', 'Granby', 'Hadley', 'Hatfield', 'Huntington', 'Middlefield', 'Northampton', 'Pelham', 'Plainfield', 'South Hadley', 'Southampton', 'Ware', 'Westhampton', 'Williamsburg','Worthington']:
local_housing_court = "Western Housing Court - Hadley Session"
elif address.county == "Berkshire":
local_housing_court = "Western Housing Court - Pittsfield Session"
elif address.city in ['Agawam', 'Blandford', 'Brimfield', 'Chester', 'Chicopee', 'East Longmeadow', 'Granville', 'Hampden', 'Holland', 'Holyoke', 'Longmeadow', 'Ludlow', 'Monson', 'Montgomery', 'Palmer', 'Russell', 'Southwick', 'Springfield', 'Tolland', 'Wales', 'West Springfield', 'Westfield','Wilbraham']:
local_housing_court = "Western Housing Court - Springfield Session"
elif address.city in ['Charlton', 'Dudley', 'Oxford', 'Southbridge', 'Sturbridge', 'Webster']:
local_housing_court ="Central Housing Court - Dudley Session"
elif address.city in ['Ashburnham', 'Athol', 'Fitchburg', 'Gardner', 'Holden', 'Hubbardston', 'Leominster', 'Lunenberg', 'Petersham', 'Phillipston', 'Princeton', 'Royalston', 'Templeton', 'Westminster', 'Winchendon']:
local_housing_court = "Central Housing Court - Leominster Session"
elif address.city in ['Ashland', 'Berlin', 'Bolton', 'Framingham', 'Harvard', 'Holliston', 'Hopkinton', 'Hudson', 'Marlborough', 'Natick', 'Northborough', 'Sherborn', 'Southborough', 'Sudbury', 'Wayland', 'Westborough']:
local_housing_court = "Central Housing Court - Marlborough Session"
elif address.city in ['Auburn', 'Barre', 'Bellingham', 'Blackstone', 'Boylston', 'Brookfield', 'Clinton', 'Douglas', 'East Brookfield', 'Grafton', 'Hardwick', 'Hopedale', 'Lancaster', 'Leicester', 'Mendon', 'Milford', 'Millbury', 'Millville', 'New Braintree', 'Northbridge', 'North Brookfield', 'Oakham', 'Oxford', 'Paxton', 'Rutland', 'Shrewsbury', 'Spencer', 'Sterling', 'Sutton', 'Upton', 'Uxbridge', 'Warren', 'West Boylston', 'Worcester']:
local_housing_court = "Central Housing Court - Worcester Session"
elif address.city in ['Abington', 'Avon', 'Bellingham', 'Braintree', 'Bridgewater', 'Brockton', 'Canton', 'Cohasset', 'Dedham', 'Dover', 'East Bridgewater', 'Eastham', 'Foxborough', 'Franklin', 'Holbrook', 'Medfield', 'Medway', 'Millis', 'Milton', 'Needham', 'Norfolk', 'Norwood', 'Plainville', 'Quincy', 'Randolph', 'Sharon', 'Stoughton', 'Walpole', 'Wellesley', 'West Bridgewater', 'Westwood', 'Weymouth', 'Whitman', 'Wrentham']:
local_housing_court = "Metro South Housing Court - Brockton Session"
elif address.county == "Norfolk County" and not address.city in ["Newton","Brookline"]:
local_housing_court = "Metro South Housing Court - Canton Session"
elif address.city in ['Amesbury', 'Andover', 'Boxford', 'Georgetown', 'Groveland', 'Haverhill', 'Lawrence', 'Merrimac', 'Methuen', 'Newbury', 'Newburyport', 'North Andover', 'Rowley', 'Salisbury', 'West Newbury']:
local_housing_court = "Northeast Housing Court - Lawrence Session"
elif address.city in ['Acton', 'Ashby', 'Ayer', 'Billerica', 'Boxborough', 'Carlisle', 'Chelmsford', 'Devens', 'Dracut', 'Dunstable', 'Groton', 'Littleton', 'Lowell', 'Maynard', 'Pepperell', 'Shirley', 'Stow', 'Tewksbury', 'Townsend', 'Tyngsborough', 'Westford']:
local_housing_court = "Northeast Housing Court - Lowell Session"
elif address.city in ['Lynn', 'Nahant', 'Saugus']:
local_housing_court = "Northeast Housing Court - Lynn Session"
elif address.city in ['Beverly', 'Danvers', 'Essex', 'Gloucester', 'Hamilton', 'Ipswich', 'Lynnfield', 'Manchester-by-The-Sea', 'Marblehead', 'Middleton', 'Peabody', 'Rockport', 'Salem', 'Swampscott', 'Topsfield', 'Wenham']:
local_housing_court = "Northeast Housing Court - Salem Session"
elif address.city in ['Bedford', 'Burlington', 'Concord', 'Everett','Lexington', 'Lincoln', 'Malden', 'Melrose', 'North Reading', 'Reading', 'Stoneham', 'Wakefield', 'Waltham', 'Watertown', 'Weston', 'Wilmington', 'Winchester', 'Woburn']:
local_housing_court = "Northeast Housing Court - Woburn Session"
elif address.city in ['Freetown', 'Westport', 'Fall River', 'Somerset','Swansea']:
local_housing_court = "Southeast Housing Court - Fall River Session"
elif address.city in ['Acushnet', 'Dartmouth', 'Fairhaven', 'Freetown', 'New Bedford','Westport']:
local_housing_court = "Southeast Housing Court - New Bedford Session"
elif address.city in ['Aquinnah', 'Barnstable', 'Bourne', 'Brewster', 'Carver', 'Chatham', 'Chilmark', 'Dennis', 'Duxbury', 'Edgartown', 'Falmouth', 'Halifax', 'Hanson', 'Harwich', 'Kingston', 'Lakeville', 'Marion', 'Marshfield', 'Mashpee', 'Mattapoisett', 'Middleborough', 'Nantucket', 'Oak Bluffs', 'Pembroke', 'Plymouth', 'Plympton', 'Provincetown', 'Rochester', 'Sandwich', 'and Wareham.Beginning on August 6', 'the Plymouth session of the Southeast Housing Court will also serve Accord', 'Assinippi', 'Hanover', 'Hingham', 'Hull', 'Humarock', 'Norwell', 'Rockland', 'Scituate']:
local_housing_court = "Southeast Housing Court - Plymouth Session"
elif address.city in ['Attleboro', 'Berkley', 'Dighton', 'Easton', 'Mansfield', 'North Attleborough', 'Norton', 'Raynham', 'Rehoboth', 'Seekonk','Taunton']:
local_housing_court = "Southeast Housing Court - Taunton Session"
else:
local_housing_court = ""
return local_housing_court
if __name__ == '__main__':
import pprint
courts = get_courts_from_massgov_url('https://www.mass.gov/orgs/district-court/locations')
pprint.pprint(courts) | from docassemble.base.core import DAObject, DAList, DADict
from docassemble.base.util import path_and_mimetype, Address, LatitudeLongitude, DAStaticFile, text_type
from docassemble.base.legal import Court
import io, json, sys, requests, bs4, re, os #, cbor
# from operator import itemgetter
# from docassemble.base.logger import logmessage
from docassemble.webapp.playground import PlaygroundSection
def get_courts_from_massgov_url(url, shim_ehc_middlesex=True, shim_nhc_woburn=True):
"""Load specified court directory page on Mass.gov and returns an MACourtList
Properties include name, phone, fax, address, description (usually includes cities or county served), latitude, longitude
"""
page = requests.get(url)
soup = bs4.BeautifulSoup(page.text, 'html.parser')
jstring = soup.find_all( attrs={"data-drupal-selector":"drupal-settings-json"} )[0].text # this is the element that has the JSON data as of 6/19/2018
jdata = json.loads(jstring)
markers = jdata['locations']['googleMap']['markers']
courts = []
# The address and description are in a different part of the JSON
for marker in markers:
html_name = marker['infoWindow']['name']
for item in jdata['locations']['imagePromos']['items']:
description = ''
if item['title']['text'] in html_name:
name = item['title']['text']
description = item['description']['richText']['rteElements'][0]['data']['rawHtml']['content']['#context']['value']
break
address = Address()
orig_address = marker['infoWindow']['address'] # The geolocate method does _not_ work with PO Boxes (silently discards)
clean_address = re.sub(r' *PO Box .*?,',"",orig_address)
has_po_box = not clean_address == orig_address # We want to track if there was a PO Box where mail should be delivered
address.address = orig_address
if address.address == '':
address.city = ''
address.state = ''
address.zip = ''
address.county = ''
address.unit = ''
else:
address.geolocate(clean_address)
if not hasattr(address,'address'):
address.address = ''
if not hasattr(address, 'city'):
address.city = ''
if not hasattr(address, 'state'):
address.state = ''
if not hasattr(address, 'zip'):
address.zip = ''
if not hasattr(address, 'county'):
address.county = ''
#if not hasattr(address, 'unit'):
#address.unit = ''
# store the data in a serializable format. maybe could refactor to use object_hooks, but would need to go all the way down to DAObject?
court = {
'name': name,
'description': description,
'has_po_box' : has_po_box,
'phone':marker['infoWindow']['phone'],
'fax':marker['infoWindow']['fax'],
'address': {
'city': address.city,
'address': address.address,
'state': address.state,
'zip': address.zip,
'county': address.county,
'orig_address': orig_address # the one-line original address, which may include a PO Box
},
'location': {
'latitude': marker['position']['lat'],
'longitude': marker['position']['lng']
}
}
if hasattr(address, 'unit'):
court['address']['unit']= address.unit
courts.append(court)
if shim_ehc_middlesex and url == 'https://www.mass.gov/orgs/housing-court/locations':
court = {
'name': "Eastern Housing Court - Middlesex Session",
'description': "The Middlesex Session of the Eastern Housing Court serves Arlington, Belmont, and Cambridge, Medford and Somerville",
'has_po_box' : False,
'phone': "(781) 306-2715",
'fax':"",
'address': {
'city': "Medford",
'address': "4040 Mystic Valley Parkway",
'state': "MA",
'zip': "02155",
'county': "Middlesex",
'orig_address': "4040 Mystic Valley Parkway, Medford, MA 02155"
},
'location': {
'latitude': 42.4048336,
'longitude': -71.0893853
}
}
courts.append(court)
if shim_nhc_woburn and url == 'https://www.mass.gov/orgs/housing-court/locations':
court = {
'name': "Northeast Housing Court - Woburn Session",
'description': "The Woburn session of the Northeast Housing Court serves Bedford, Burlington, Concord, Everett,Lexington, Lincoln, Malden, Melrose, North Reading, Reading, Stoneham, Wakefield, Waltham, Watertown, Weston, Wilmington, Winchester, and Woburn.",
'has_po_box' : False,
'phone': "(978) 689-7833",
'fax':"",
'address': {
'city': "Woburn",
'address': "200 Trade Center",
'unit': "Courtroom 540 - 5th Floor",
'state': "MA",
'zip': "01801",
'county': "Middlesex",
'orig_address': "200 Trade Center, Courtroom 540 - 5th Floor, Woburn, MA 01801"
},
'location': {
'latitude': 42.500543,
'longitude': -71.1656604
}
}
courts.append(court)
courts.sort(key=lambda k: k['name']) # We want to sort within category of court
return courts
def save_courts_to_file():
''' Writes all courts to .json files in Playground data sources folder'''
courts = [
[
'district_courts', 'https://www.mass.gov/orgs/district-court/locations'
],
[
'housing_courts', 'https://www.mass.gov/orgs/housing-court/locations'
],
[
'bmc', 'https://www.mass.gov/orgs/boston-municipal-court/locations'
],
[
'superior_courts', 'https://www.mass.gov/orgs/superior-court/locations'
],
[
'land_courts', 'https://www.mass.gov/orgs/land-court/locations'
],
[
'juvenile_courts', 'https://www.mass.gov/orgs/juvenile-court/locations'
],
[
'probate_and_family_courts', 'https://www.mass.gov/orgs/probate-and-family-court/locations'
]
]
try:
for court in courts:
area = PlaygroundSection('sources').get_area()
fpath = os.path.join(area.directory, court[0] + '.json')
jdata = text_type(json.dumps(get_courts_from_massgov_url(court[1])))
f = open(fpath, 'w')
f.write(jdata)
f.close()
area.finalize()
except:
e = sys.exc_info()[0]
return e
else:
return "Finished saving courts"
def test_write():
area = PlaygroundSection('sources').get_area()
fpath = os.path.join(area.directory, "test" + '.json')
jdata = "test"
f = open(fpath, 'w')
f.write(jdata)
f.close()
area.finalize()
return fpath
class MACourt(Court):
def init(self, *pargs, **kwargs):
super(MACourt, self).init(*pargs, **kwargs)
if 'address' not in kwargs:
self.initializeAttribute('address', Address)
if 'jurisdiction' not in kwargs:
self.jurisdiction = list()
if 'location' not in kwargs:
self.initializeAttribute('location', LatitudeLongitude)
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__
class MACourtList(DAList):
"""Represents a list of courts in Massachusetts. Package includes a cached list that is scraped from mass.gov"""
def init(self, *pargs, **kwargs):
super(MACourtList, self).init(*pargs, **kwargs)
self.auto_gather = False
self.gathered = True
self.object_type = MACourt
if hasattr(self,'courts'):
if isinstance(self.courts, list):
self.load_courts(courts=self.courts)
elif self.courts is True:
self.load_courts()
def load_courts(self, courts=['housing_courts','bmc','district_courts','superior_courts'], data_path='docassemble.MACourts:data/sources/'):
"""Load a set of courts into the MACourtList. Courts should be a list of names of JSON files in the data/sources directory.
Will fall back on loading default set of courts from Mass.gov. Default set of courts is applicable to housing cases"""
try:
for court in courts:
self.load_courts_from_file(court, data_path=data_path)
except IOError:
if courts == ['housing_courts','bmc','district_courts','superior_courts']:
self.load_from_massgov()
else:
self.load_from_massgov(housing_only=False)
def load_from_massgov(self, housing_only=True):
"""Load courts directly from Mass.gov: fallback if cached files don't exist. URLs hardcoded."""
if housing_only:
urls = ['https://www.mass.gov/orgs/housing-court/locations',
'https://www.mass.gov/orgs/boston-municipal-court/locations',
'https://www.mass.gov/orgs/district-court/locations',
'https://www.mass.gov/orgs/superior-court/locations']
else:
urls = ['https://www.mass.gov/orgs/district-court/locations',
'https://www.mass.gov/orgs/housing-court/locations',
'https://www.mass.gov/orgs/boston-municipal-court/locations',
'https://www.mass.gov/orgs/superior-court/locations',
'https://www.mass.gov/orgs/land-court/locations',
'https://www.mass.gov/orgs/juvenile-court/locations',
'https://www.mass.gov/orgs/probate-and-family-court/locations']
for url in urls:
courts = get_courts_from_massgov_url(url)
for item in courts:
# translate the dictionary data into an MACourtList
court = self.appendObject()
court.name = item['name']
court.phone = item['phone']
court.fax = item['fax']
court.location.latitude = item['location']['latitude']
court.location.longitude = item['location']['longitude']
court.has_po_box = item.get('has_po_box')
court.description = item.get('description')
court.address.address = item['address']['address']
court.address.city = item['address']['city']
court.address.state = item['address']['state']
court.address.zip = item['address']['zip']
court.address.county = item['address']['county']
court.address.orig_address = item['address'].get('orig_address')
def load_courts_from_file(self, json_path, data_path='docassemble.MACourts:data/sources/'):
"""Add the list of courts at the specified JSON file into the current list"""
path = path_and_mimetype(os.path.join(data_path,json_path+'.json'))[0]
with open(path) as courts_json:
courts = json.load(courts_json)
for item in courts:
# translate the dictionary data into an MACourtList
court = self.appendObject()
court.name = item['name']
court.phone = item['phone']
court.fax = item['fax']
court.location.latitude = item['location']['latitude']
court.location.longitude = item['location']['longitude']
court.has_po_box = item.get('has_po_box')
court.description = item.get('description')
court.address.address = item['address']['address']
court.address.city = item['address']['city']
court.address.state = item['address']['state']
court.address.zip = item['address']['zip']
court.address.county = item['address']['county']
court.address.orig_address = item['address'].get('orig_address')
def matching_housing_court(self, address):
"""Return the MACourt representing the Housing Court serving the given address"""
court_name = self.matching_housing_court_name(address)
return next ((court for court in self.elements if court.name == court_name), None)
def matching_housing_court_name(self,address):
"""Returns the name of the MACourt representing the housing court that covers the specified address.
Harcoded and must be updated if court jurisdictions or names change."""
if (address.county == "Suffolk County") or (address.city in ["Newton","Brookline"]):
local_housing_court = "Eastern Housing Court"
elif address.city in ["Arlington","Belmont","Cambridge","Medford","Somerville"]:
local_housing_court = "Eastern Housing Court - Middlesex Session"
elif address.city in ["Ashfield", "Bernardston", "Buckland", "Charlemont", "Colrain", "Conway", "Deerfield", "Erving", "Gill", "Greenfield", "Hawley", "Heath", "Leverett", "Leyden", "Monroe", "Montague", "New Salem", "Northfield", "Orange", "Rowe", "Shelburne", "Shutesbury", "Sunderland", "Warwick", "Wendell", "Whately"]:
local_housing_court = "Western Housing Court - Greenfield Session"
elif address.city in ['Amherst', 'Belchertown', 'Chesterfield', 'Cummington', 'Easthampton', 'Goshen', 'Granby', 'Hadley', 'Hatfield', 'Huntington', 'Middlefield', 'Northampton', 'Pelham', 'Plainfield', 'South Hadley', 'Southampton', 'Ware', 'Westhampton', 'Williamsburg','Worthington']:
local_housing_court = "Western Housing Court - Hadley Session"
elif address.county == "Berkshire":
local_housing_court = "Western Housing Court - Pittsfield Session"
elif address.city in ['Agawam', 'Blandford', 'Brimfield', 'Chester', 'Chicopee', 'East Longmeadow', 'Granville', 'Hampden', 'Holland', 'Holyoke', 'Longmeadow', 'Ludlow', 'Monson', 'Montgomery', 'Palmer', 'Russell', 'Southwick', 'Springfield', 'Tolland', 'Wales', 'West Springfield', 'Westfield','Wilbraham']:
local_housing_court = "Western Housing Court - Springfield Session"
elif address.city in ['Charlton', 'Dudley', 'Oxford', 'Southbridge', 'Sturbridge', 'Webster']:
local_housing_court ="Central Housing Court - Dudley Session"
elif address.city in ['Ashburnham', 'Athol', 'Fitchburg', 'Gardner', 'Holden', 'Hubbardston', 'Leominster', 'Lunenberg', 'Petersham', 'Phillipston', 'Princeton', 'Royalston', 'Templeton', 'Westminster', 'Winchendon']:
local_housing_court = "Central Housing Court - Leominster Session"
elif address.city in ['Ashland', 'Berlin', 'Bolton', 'Framingham', 'Harvard', 'Holliston', 'Hopkinton', 'Hudson', 'Marlborough', 'Natick', 'Northborough', 'Sherborn', 'Southborough', 'Sudbury', 'Wayland', 'Westborough']:
local_housing_court = "Central Housing Court - Marlborough Session"
elif address.city in ['Auburn', 'Barre', 'Bellingham', 'Blackstone', 'Boylston', 'Brookfield', 'Clinton', 'Douglas', 'East Brookfield', 'Grafton', 'Hardwick', 'Hopedale', 'Lancaster', 'Leicester', 'Mendon', 'Milford', 'Millbury', 'Millville', 'New Braintree', 'Northbridge', 'North Brookfield', 'Oakham', 'Oxford', 'Paxton', 'Rutland', 'Shrewsbury', 'Spencer', 'Sterling', 'Sutton', 'Upton', 'Uxbridge', 'Warren', 'West Boylston', 'Worcester']:
local_housing_court = "Central Housing Court - Worcester Session"
elif address.city in ['Abington', 'Avon', 'Bellingham', 'Braintree', 'Bridgewater', 'Brockton', 'Canton', 'Cohasset', 'Dedham', 'Dover', 'East Bridgewater', 'Eastham', 'Foxborough', 'Franklin', 'Holbrook', 'Medfield', 'Medway', 'Millis', 'Milton', 'Needham', 'Norfolk', 'Norwood', 'Plainville', 'Quincy', 'Randolph', 'Sharon', 'Stoughton', 'Walpole', 'Wellesley', 'West Bridgewater', 'Westwood', 'Weymouth', 'Whitman', 'Wrentham']:
local_housing_court = "Metro South Housing Court - Brockton Session"
elif address.county == "Norfolk County" and not address.city in ["Newton","Brookline"]:
local_housing_court = "Metro South Housing Court - Canton Session"
elif address.city in ['Amesbury', 'Andover', 'Boxford', 'Georgetown', 'Groveland', 'Haverhill', 'Lawrence', 'Merrimac', 'Methuen', 'Newbury', 'Newburyport', 'North Andover', 'Rowley', 'Salisbury', 'West Newbury']:
local_housing_court = "Northeast Housing Court - Lawrence Session"
elif address.city in ['Acton', 'Ashby', 'Ayer', 'Billerica', 'Boxborough', 'Carlisle', 'Chelmsford', 'Devens', 'Dracut', 'Dunstable', 'Groton', 'Littleton', 'Lowell', 'Maynard', 'Pepperell', 'Shirley', 'Stow', 'Tewksbury', 'Townsend', 'Tyngsborough', 'Westford']:
local_housing_court = "Northeast Housing Court - Lowell Session"
elif address.city in ['Lynn', 'Nahant', 'Saugus']:
local_housing_court = "Northeast Housing Court - Lynn Session"
elif address.city in ['Beverly', 'Danvers', 'Essex', 'Gloucester', 'Hamilton', 'Ipswich', 'Lynnfield', 'Manchester-by-The-Sea', 'Marblehead', 'Middleton', 'Peabody', 'Rockport', 'Salem', 'Swampscott', 'Topsfield', 'Wenham']:
local_housing_court = "Northeast Housing Court - Salem Session"
elif address.city in ['Bedford', 'Burlington', 'Concord', 'Everett','Lexington', 'Lincoln', 'Malden', 'Melrose', 'North Reading', 'Reading', 'Stoneham', 'Wakefield', 'Waltham', 'Watertown', 'Weston', 'Wilmington', 'Winchester', 'Woburn']:
local_housing_court = "Northeast Housing Court - Woburn Session"
elif address.city in ['Freetown', 'Westport', 'Fall River', 'Somerset','Swansea']:
local_housing_court = "Southeast Housing Court - Fall River Session"
elif address.city in ['Acushnet', 'Dartmouth', 'Fairhaven', 'Freetown', 'New Bedford','Westport']:
local_housing_court = "Southeast Housing Court - New Bedford Session"
elif address.city in ['Aquinnah', 'Barnstable', 'Bourne', 'Brewster', 'Carver', 'Chatham', 'Chilmark', 'Dennis', 'Duxbury', 'Edgartown', 'Falmouth', 'Halifax', 'Hanson', 'Harwich', 'Kingston', 'Lakeville', 'Marion', 'Marshfield', 'Mashpee', 'Mattapoisett', 'Middleborough', 'Nantucket', 'Oak Bluffs', 'Pembroke', 'Plymouth', 'Plympton', 'Provincetown', 'Rochester', 'Sandwich', 'and Wareham.Beginning on August 6', 'the Plymouth session of the Southeast Housing Court will also serve Accord', 'Assinippi', 'Hanover', 'Hingham', 'Hull', 'Humarock', 'Norwell', 'Rockland', 'Scituate']:
local_housing_court = "Southeast Housing Court - Plymouth Session"
elif address.city in ['Attleboro', 'Berkley', 'Dighton', 'Easton', 'Mansfield', 'North Attleborough', 'Norton', 'Raynham', 'Rehoboth', 'Seekonk','Taunton']:
local_housing_court = "Southeast Housing Court - Taunton Session"
else:
local_housing_court = ""
return local_housing_court
if __name__ == '__main__':
import pprint
courts = get_courts_from_massgov_url('https://www.mass.gov/orgs/district-court/locations')
pprint.pprint(courts) | en | 0.871579 | #, cbor # from operator import itemgetter # from docassemble.base.logger import logmessage Load specified court directory page on Mass.gov and returns an MACourtList Properties include name, phone, fax, address, description (usually includes cities or county served), latitude, longitude # this is the element that has the JSON data as of 6/19/2018 # The address and description are in a different part of the JSON # The geolocate method does _not_ work with PO Boxes (silently discards) # We want to track if there was a PO Box where mail should be delivered #if not hasattr(address, 'unit'): #address.unit = '' # store the data in a serializable format. maybe could refactor to use object_hooks, but would need to go all the way down to DAObject? # the one-line original address, which may include a PO Box # We want to sort within category of court Writes all courts to .json files in Playground data sources folder Represents a list of courts in Massachusetts. Package includes a cached list that is scraped from mass.gov Load a set of courts into the MACourtList. Courts should be a list of names of JSON files in the data/sources directory. Will fall back on loading default set of courts from Mass.gov. Default set of courts is applicable to housing cases Load courts directly from Mass.gov: fallback if cached files don't exist. URLs hardcoded. # translate the dictionary data into an MACourtList Add the list of courts at the specified JSON file into the current list # translate the dictionary data into an MACourtList Return the MACourt representing the Housing Court serving the given address Returns the name of the MACourt representing the housing court that covers the specified address. Harcoded and must be updated if court jurisdictions or names change. | 2.432312 | 2 |
web/config.py | tomasff/wwv | 3 | 6614378 | <gh_stars>1-10
import os
from dotenv import load_dotenv
load_dotenv()
class Config:
DEBUG = False
SESSION_TYPE = 'mongodb'
SERVER_NAME = os.getenv('SERVER_NAME')
BASE_URL = os.getenv('BASE_URL')
SESSION_COOKIE_SECURE = (os.getenv('SESSION_COOKIE_SECURE') == 'true')
PERMANENT_SESSION_LIFETIME = int(os.getenv('PERMANENT_SESSION_LIFETIME'))
SECRET_KEY = os.getenv('SECRET_KEY')
MONGO_URI = os.getenv('MONGO_URI')
CONSUMER_SECRET = os.getenv('CONSUMER_SECRET')
CONSUMER_KEY = os.getenv('CONSUMER_KEY')
REDIS_HOSTNAME = os.getenv('REDIS_HOSTNAME')
REDIS_PORT = int(os.getenv('REDIS_PORT'))
REDIS_PUBSUB_CH = os.getenv('REDIS_PUBSUB_CH') | import os
from dotenv import load_dotenv
load_dotenv()
class Config:
DEBUG = False
SESSION_TYPE = 'mongodb'
SERVER_NAME = os.getenv('SERVER_NAME')
BASE_URL = os.getenv('BASE_URL')
SESSION_COOKIE_SECURE = (os.getenv('SESSION_COOKIE_SECURE') == 'true')
PERMANENT_SESSION_LIFETIME = int(os.getenv('PERMANENT_SESSION_LIFETIME'))
SECRET_KEY = os.getenv('SECRET_KEY')
MONGO_URI = os.getenv('MONGO_URI')
CONSUMER_SECRET = os.getenv('CONSUMER_SECRET')
CONSUMER_KEY = os.getenv('CONSUMER_KEY')
REDIS_HOSTNAME = os.getenv('REDIS_HOSTNAME')
REDIS_PORT = int(os.getenv('REDIS_PORT'))
REDIS_PUBSUB_CH = os.getenv('REDIS_PUBSUB_CH') | none | 1 | 2.058781 | 2 | |
src/pygrambank/commands/roundtrip.py | glottobank/pygrambank | 2 | 6614379 | """
"""
from csvw.dsv import UnicodeWriter, reader
from clldutils.clilib import PathType
def register(parser):
parser.add_argument('sheet', type=PathType(type='file'))
def run(args):
rows = list(reader(args.sheet, delimiter='\t'))
with UnicodeWriter(args.sheet, delimiter='\t') as w:
w.writerows(rows)
| """
"""
from csvw.dsv import UnicodeWriter, reader
from clldutils.clilib import PathType
def register(parser):
parser.add_argument('sheet', type=PathType(type='file'))
def run(args):
rows = list(reader(args.sheet, delimiter='\t'))
with UnicodeWriter(args.sheet, delimiter='\t') as w:
w.writerows(rows)
| none | 1 | 2.812286 | 3 | |
src/entities/course.py | makeri89/ohtu-miniprojekti | 0 | 6614380 | from database import db
from entities.course_to_podcast import courses_to_podcasts
from entities.course_to_book import courses_to_books
from entities.course_to_weblink import courses_to_weblinks
class Course(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(500), nullable=False)
podcasts = db.relationship(
'Podcast',
secondary=courses_to_podcasts,
back_populates='courses'
)
books = db.relationship(
'Book',
secondary=courses_to_books,
back_populates='courses'
)
weblinks = db.relationship(
'Weblink',
secondary=courses_to_weblinks,
back_populates='courses'
)
def __init__(self, name):
self.name = name
def __repr__(self):
return f'{self.id}: {self.name}'
| from database import db
from entities.course_to_podcast import courses_to_podcasts
from entities.course_to_book import courses_to_books
from entities.course_to_weblink import courses_to_weblinks
class Course(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(500), nullable=False)
podcasts = db.relationship(
'Podcast',
secondary=courses_to_podcasts,
back_populates='courses'
)
books = db.relationship(
'Book',
secondary=courses_to_books,
back_populates='courses'
)
weblinks = db.relationship(
'Weblink',
secondary=courses_to_weblinks,
back_populates='courses'
)
def __init__(self, name):
self.name = name
def __repr__(self):
return f'{self.id}: {self.name}'
| none | 1 | 2.489214 | 2 | |
tests/cmdargs.py | tiagoshibata/exrspl | 24 | 6614381 | import collections
CmdArgs = collections.namedtuple('CmdArgs', ['split_channels', 'merge', 'image', 'prefix', 'list', 'layer'])
| import collections
CmdArgs = collections.namedtuple('CmdArgs', ['split_channels', 'merge', 'image', 'prefix', 'list', 'layer'])
| none | 1 | 1.640254 | 2 | |
chainer-cifar10/models/vgg.py | shinh/octconv-chainer | 3 | 6614382 | import chainer
import chainer.functions as F
import chainer.links as L
class VGG(chainer.Chain):
def __init__(self, n_class=10):
super(VGG, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(None, 64, 3, pad=1)
self.bn1_1 = L.BatchNormalization(64)
self.conv1_2 = L.Convolution2D(64, 64, 3, pad=1)
self.bn1_2 = L.BatchNormalization(64)
self.conv2_1 = L.Convolution2D(64, 128, 3, pad=1)
self.bn2_1 = L.BatchNormalization(128)
self.conv2_2 = L.Convolution2D(128, 128, 3, pad=1)
self.bn2_2 = L.BatchNormalization(128)
self.conv3_1 = L.Convolution2D(128, 256, 3, pad=1)
self.bn3_1 = L.BatchNormalization(256)
self.conv3_2 = L.Convolution2D(256, 256, 3, pad=1)
self.bn3_2 = L.BatchNormalization(256)
self.conv3_3 = L.Convolution2D(256, 256, 3, pad=1)
self.bn3_3 = L.BatchNormalization(256)
self.conv3_4 = L.Convolution2D(256, 256, 3, pad=1)
self.bn3_4 = L.BatchNormalization(256)
self.fc4 = L.Linear(None, 1024)
self.fc5 = L.Linear(1024, 1024)
self.fc6 = L.Linear(1024, n_class)
def __call__(self, x):
h = F.relu(self.bn1_1(self.conv1_1(x)))
h = F.relu(self.bn1_2(self.conv1_2(h)))
h = F.max_pooling_2d(h, 2, 2)
h = F.dropout(h, ratio=0.25)
h = F.relu(self.bn2_1(self.conv2_1(h)))
h = F.relu(self.bn2_2(self.conv2_2(h)))
h = F.max_pooling_2d(h, 2, 2)
h = F.dropout(h, ratio=0.25)
h = F.relu(self.bn3_1(self.conv3_1(h)))
h = F.relu(self.bn3_2(self.conv3_2(h)))
h = F.relu(self.bn3_3(self.conv3_3(h)))
h = F.relu(self.bn3_4(self.conv3_4(h)))
h = F.max_pooling_2d(h, 2, 2)
h = F.dropout(h, ratio=0.25)
h = F.dropout(F.relu(self.fc4(h)), ratio=0.5)
h = F.dropout(F.relu(self.fc5(h)), ratio=0.5)
h = self.fc6(h)
return h
| import chainer
import chainer.functions as F
import chainer.links as L
class VGG(chainer.Chain):
def __init__(self, n_class=10):
super(VGG, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(None, 64, 3, pad=1)
self.bn1_1 = L.BatchNormalization(64)
self.conv1_2 = L.Convolution2D(64, 64, 3, pad=1)
self.bn1_2 = L.BatchNormalization(64)
self.conv2_1 = L.Convolution2D(64, 128, 3, pad=1)
self.bn2_1 = L.BatchNormalization(128)
self.conv2_2 = L.Convolution2D(128, 128, 3, pad=1)
self.bn2_2 = L.BatchNormalization(128)
self.conv3_1 = L.Convolution2D(128, 256, 3, pad=1)
self.bn3_1 = L.BatchNormalization(256)
self.conv3_2 = L.Convolution2D(256, 256, 3, pad=1)
self.bn3_2 = L.BatchNormalization(256)
self.conv3_3 = L.Convolution2D(256, 256, 3, pad=1)
self.bn3_3 = L.BatchNormalization(256)
self.conv3_4 = L.Convolution2D(256, 256, 3, pad=1)
self.bn3_4 = L.BatchNormalization(256)
self.fc4 = L.Linear(None, 1024)
self.fc5 = L.Linear(1024, 1024)
self.fc6 = L.Linear(1024, n_class)
def __call__(self, x):
h = F.relu(self.bn1_1(self.conv1_1(x)))
h = F.relu(self.bn1_2(self.conv1_2(h)))
h = F.max_pooling_2d(h, 2, 2)
h = F.dropout(h, ratio=0.25)
h = F.relu(self.bn2_1(self.conv2_1(h)))
h = F.relu(self.bn2_2(self.conv2_2(h)))
h = F.max_pooling_2d(h, 2, 2)
h = F.dropout(h, ratio=0.25)
h = F.relu(self.bn3_1(self.conv3_1(h)))
h = F.relu(self.bn3_2(self.conv3_2(h)))
h = F.relu(self.bn3_3(self.conv3_3(h)))
h = F.relu(self.bn3_4(self.conv3_4(h)))
h = F.max_pooling_2d(h, 2, 2)
h = F.dropout(h, ratio=0.25)
h = F.dropout(F.relu(self.fc4(h)), ratio=0.5)
h = F.dropout(F.relu(self.fc5(h)), ratio=0.5)
h = self.fc6(h)
return h
| none | 1 | 2.628128 | 3 | |
autoload/python/coqide/coqtopinstance.py | iandingx/coqide.vim | 0 | 6614383 | #!/usr/bin/env python3
'''Coqtop process handle.'''
import logging
from queue import Queue, Empty
from subprocess import Popen, PIPE, TimeoutExpired
from threading import Thread
import xml.etree.ElementTree as ET
from . import xmlprotocol as xp
logger = logging.getLogger(__name__) # pylint: disable=C0103
_XML_DOCTYPE = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd" [
<!ENTITY nbsp ' '>
<!ENTITY quot '"'>
]>'''
class CoqtopQuit(Exception):
'''The coqtop process quits.'''
class _XMLLogger: # pylint: disable=R0903
'''A class that converts the XML document to its string representation.'''
def __init__(self, xml):
self._xml = xml
def __str__(self):
return ET.tostring(self._xml).decode()
class _CoqtopReader:
'''The output processor for the coqtop process.'''
def __init__(self, pipe):
self._pipe = pipe
self._closed = False
self._thread = Thread(target=self._thread_entry)
self._res_queue = Queue()
def start(self):
'''Start the processor thread.'''
self._thread.start()
def join(self):
'''Wait for the thread to quit.'''
self._thread.join()
def get_response(self):
'''Get a response from coqtop process.
Raise `CoqtopQuit` if the process terminates.
'''
res = self._res_queue.get()
if res is None or self._closed:
raise CoqtopQuit()
return res
def get_responses_nowait(self):
'''Get all the available responses.
The method is non-blocking.'''
responses = []
try:
while True:
response = self._res_queue.get_nowait()
if response is None or self._closed:
break
responses.append(response)
except Empty:
pass
return responses
def _thread_entry(self):
chunks = []
while True:
data = self._pipe.read1(1000)
if not data:
self._closed = True
self._res_queue.put(None)
break
chunks.append(data)
doc = [_XML_DOCTYPE, '<root>'] + chunks + ['</root>']
try:
root = ET.fromstringlist(doc)
for element in root:
logger.debug('Coqtop response: %s', _XMLLogger(element))
self._res_queue.put(element)
chunks = []
except ET.ParseError:
pass
class CoqtopInstance:
'''Manages the connection with a coqtop process.'''
def __init__(self):
self._proc = None
self._reader = None
def spawn(self, exec_args):
'''Create the coqtop process.'''
if self._proc is not None:
raise RuntimeError('CoqtopInstance already spawned.')
self._proc = Popen(exec_args, stdin=PIPE, stdout=PIPE)
self._reader = _CoqtopReader(self._proc.stdout)
self._reader.start()
def call(self, rtype, req):
'''Send the request `req` of request type `rtype` to the coqtop
process.
'''
if self._proc is None:
raise RuntimeError('CoqtopInstance not spawned.')
req_xml = xp.req_to_xml(rtype, req)
req_bytes = ET.tostring(req_xml)
logger.debug('Coqtop request: %s', req_bytes)
self._proc.stdin.write(req_bytes)
self._proc.stdin.flush()
def get_response(self, rtype):
'''Get a reponse from coqtop.
If the response is a value, return `('value', value_dict)`
where `value_dict` is decoded from XML according to the
request type `rtype`.
If the response is a feedback, return `('feedback',
fb_dict)` where `fb_dict` is decoded from XML as a
feedback.'''
xml = self._reader.get_response()
if xml.tag == 'feedback':
return ('feedback', xp.feedback_from_xml(xml))
elif xml.tag == 'value':
return ('value', xp.res_from_xml(rtype, xml))
else:
raise ValueError('Bad coqtop response: {}'.format(
ET.tostring(xml)))
def close(self):
'''Terminate the coqtop process.'''
if self._proc is None:
return
self._proc.stdin.close()
try:
self._proc.wait(timeout=5)
except TimeoutExpired:
self._proc.kill()
self._proc.wait(timeout=5)
self._reader.join()
self._proc = None
self._reader = None
def get_feedbacks(self):
'''Read all the available feedbacks.'''
if self._proc is None:
raise RuntimeError('CoqtopInstance not spawned.')
return list(map(xp.feedback_from_xml, self._reader.get_responses_nowait()))
| #!/usr/bin/env python3
'''Coqtop process handle.'''
import logging
from queue import Queue, Empty
from subprocess import Popen, PIPE, TimeoutExpired
from threading import Thread
import xml.etree.ElementTree as ET
from . import xmlprotocol as xp
logger = logging.getLogger(__name__) # pylint: disable=C0103
_XML_DOCTYPE = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd" [
<!ENTITY nbsp ' '>
<!ENTITY quot '"'>
]>'''
class CoqtopQuit(Exception):
'''The coqtop process quits.'''
class _XMLLogger: # pylint: disable=R0903
'''A class that converts the XML document to its string representation.'''
def __init__(self, xml):
self._xml = xml
def __str__(self):
return ET.tostring(self._xml).decode()
class _CoqtopReader:
'''The output processor for the coqtop process.'''
def __init__(self, pipe):
self._pipe = pipe
self._closed = False
self._thread = Thread(target=self._thread_entry)
self._res_queue = Queue()
def start(self):
'''Start the processor thread.'''
self._thread.start()
def join(self):
'''Wait for the thread to quit.'''
self._thread.join()
def get_response(self):
'''Get a response from coqtop process.
Raise `CoqtopQuit` if the process terminates.
'''
res = self._res_queue.get()
if res is None or self._closed:
raise CoqtopQuit()
return res
def get_responses_nowait(self):
'''Get all the available responses.
The method is non-blocking.'''
responses = []
try:
while True:
response = self._res_queue.get_nowait()
if response is None or self._closed:
break
responses.append(response)
except Empty:
pass
return responses
def _thread_entry(self):
chunks = []
while True:
data = self._pipe.read1(1000)
if not data:
self._closed = True
self._res_queue.put(None)
break
chunks.append(data)
doc = [_XML_DOCTYPE, '<root>'] + chunks + ['</root>']
try:
root = ET.fromstringlist(doc)
for element in root:
logger.debug('Coqtop response: %s', _XMLLogger(element))
self._res_queue.put(element)
chunks = []
except ET.ParseError:
pass
class CoqtopInstance:
'''Manages the connection with a coqtop process.'''
def __init__(self):
self._proc = None
self._reader = None
def spawn(self, exec_args):
'''Create the coqtop process.'''
if self._proc is not None:
raise RuntimeError('CoqtopInstance already spawned.')
self._proc = Popen(exec_args, stdin=PIPE, stdout=PIPE)
self._reader = _CoqtopReader(self._proc.stdout)
self._reader.start()
def call(self, rtype, req):
'''Send the request `req` of request type `rtype` to the coqtop
process.
'''
if self._proc is None:
raise RuntimeError('CoqtopInstance not spawned.')
req_xml = xp.req_to_xml(rtype, req)
req_bytes = ET.tostring(req_xml)
logger.debug('Coqtop request: %s', req_bytes)
self._proc.stdin.write(req_bytes)
self._proc.stdin.flush()
def get_response(self, rtype):
'''Get a reponse from coqtop.
If the response is a value, return `('value', value_dict)`
where `value_dict` is decoded from XML according to the
request type `rtype`.
If the response is a feedback, return `('feedback',
fb_dict)` where `fb_dict` is decoded from XML as a
feedback.'''
xml = self._reader.get_response()
if xml.tag == 'feedback':
return ('feedback', xp.feedback_from_xml(xml))
elif xml.tag == 'value':
return ('value', xp.res_from_xml(rtype, xml))
else:
raise ValueError('Bad coqtop response: {}'.format(
ET.tostring(xml)))
def close(self):
'''Terminate the coqtop process.'''
if self._proc is None:
return
self._proc.stdin.close()
try:
self._proc.wait(timeout=5)
except TimeoutExpired:
self._proc.kill()
self._proc.wait(timeout=5)
self._reader.join()
self._proc = None
self._reader = None
def get_feedbacks(self):
'''Read all the available feedbacks.'''
if self._proc is None:
raise RuntimeError('CoqtopInstance not spawned.')
return list(map(xp.feedback_from_xml, self._reader.get_responses_nowait()))
| en | 0.753503 | #!/usr/bin/env python3 Coqtop process handle. # pylint: disable=C0103 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd" [ <!ENTITY nbsp ' '> <!ENTITY quot '"'> ]> The coqtop process quits. # pylint: disable=R0903 A class that converts the XML document to its string representation. The output processor for the coqtop process. Start the processor thread. Wait for the thread to quit. Get a response from coqtop process. Raise `CoqtopQuit` if the process terminates. Get all the available responses. The method is non-blocking. Manages the connection with a coqtop process. Create the coqtop process. Send the request `req` of request type `rtype` to the coqtop process. Get a reponse from coqtop. If the response is a value, return `('value', value_dict)` where `value_dict` is decoded from XML according to the request type `rtype`. If the response is a feedback, return `('feedback', fb_dict)` where `fb_dict` is decoded from XML as a feedback. Terminate the coqtop process. Read all the available feedbacks. | 2.728726 | 3 |
query_lang/run_script.py | nikitavlaev/formal-languages | 0 | 6614384 | <reponame>nikitavlaev/formal-languages
from pyformlang.cfg import Variable
from context_free_algos.cyk import CYK
from context_free_algos.cfg import custom_CFG
GRAMMAR_PATH = 'query_lang/grammar.txt'
KEYWORDS_PATH = 'query_lang/keywords.txt'
PUNCTUATION = ['"', '.', ',', ':', ';', '(', ')', '[', ']', '{', '}', '_', '->', '&', '!', '|']
def preprocess_script(raw_script):
raw_script = raw_script.strip()
for p in PUNCTUATION:
raw_script = raw_script.replace(p, f" {p} ")
with open(KEYWORDS_PATH, 'r') as f:
keywords = f.read().splitlines()
script = []
for l in raw_script.split():
script.append(l) if (l in keywords or l in PUNCTUATION) else script.extend(l)
return script
def run_script(raw_script, gram=None):
if gram is None:
with open(GRAMMAR_PATH, 'r') as f:
gram = custom_CFG.read_cfg(
f.read(),
start_symbol=Variable("SCRIPT"),
contains_regexes=True,
track_variables=True,
)
gram = gram.to_normal_form()
script = preprocess_script(raw_script)
return CYK(gram, script)
if __name__ == '__main__':
raw_script = """
select count edges from ("graph" intersect [term(a).term(b)*.(term(c)|term(d))+])
"""
print(run_script(raw_script)) | from pyformlang.cfg import Variable
from context_free_algos.cyk import CYK
from context_free_algos.cfg import custom_CFG
GRAMMAR_PATH = 'query_lang/grammar.txt'
KEYWORDS_PATH = 'query_lang/keywords.txt'
PUNCTUATION = ['"', '.', ',', ':', ';', '(', ')', '[', ']', '{', '}', '_', '->', '&', '!', '|']
def preprocess_script(raw_script):
raw_script = raw_script.strip()
for p in PUNCTUATION:
raw_script = raw_script.replace(p, f" {p} ")
with open(KEYWORDS_PATH, 'r') as f:
keywords = f.read().splitlines()
script = []
for l in raw_script.split():
script.append(l) if (l in keywords or l in PUNCTUATION) else script.extend(l)
return script
def run_script(raw_script, gram=None):
if gram is None:
with open(GRAMMAR_PATH, 'r') as f:
gram = custom_CFG.read_cfg(
f.read(),
start_symbol=Variable("SCRIPT"),
contains_regexes=True,
track_variables=True,
)
gram = gram.to_normal_form()
script = preprocess_script(raw_script)
return CYK(gram, script)
if __name__ == '__main__':
raw_script = """
select count edges from ("graph" intersect [term(a).term(b)*.(term(c)|term(d))+])
"""
print(run_script(raw_script)) | en | 0.653305 | select count edges from ("graph" intersect [term(a).term(b)*.(term(c)|term(d))+]) | 2.566056 | 3 |
py_algo/sorting/counting_sort/finding_pairs.py | Sk0uF/Algorithms | 1 | 6614385 | <reponame>Sk0uF/Algorithms
"""
Codemonk link: https://www.hackerearth.com/practice/algorithms/sorting/counting-sort/practice-problems/algorithm/finding-pairs-4/
Given an array A of N numbers, find the number of distinct pairs (i, j) such that j >=i and A[i] = A[j].
Input - Output:
First line of the input contains number of test cases T.
Each test case has two lines, first line is the number N,
followed by a line consisting of N integers which are the
elements of array A.
For each test case print the number of distinct pairs.
Sample input:
3
4
1 2 3 4
3
1 2 1
5
1 1 1 1 1
Sample Output:
4
4
15
"""
"""
Each individual number counts for 1 in the total amount our answer. So, if the array has N elements, the initial value
of our answer is N. Find how many times each number appears in the array. After doing that, if a number occurs k times,
where k > 0 then we add to our answer [(k-1) * k] // 2. It basically contributes k-1 + k-2 + ... + 1. We know that
k + k-1 + ... + 1 = [(k+1) * k] // 2, so we just substitute k with k - 1.
Considering the amount of input cases insignificant, each
"for" has linear complexity.
Final complexity: O(2*N) => O(N)
"""
inp_len = int(input())
for _ in range(inp_len):
n = int(input())
array = list(map(int, input().rstrip().split()))
helper_array = [0] * (2*10**6)
total_amount = n
for element in array:
if element >= 0:
helper_array[element + 10**6 - 1] += 1
else:
helper_array[-element] += 1
for hlp in helper_array:
if hlp > 0:
total_amount += (hlp-1)*hlp//2
print(total_amount)
| """
Codemonk link: https://www.hackerearth.com/practice/algorithms/sorting/counting-sort/practice-problems/algorithm/finding-pairs-4/
Given an array A of N numbers, find the number of distinct pairs (i, j) such that j >=i and A[i] = A[j].
Input - Output:
First line of the input contains number of test cases T.
Each test case has two lines, first line is the number N,
followed by a line consisting of N integers which are the
elements of array A.
For each test case print the number of distinct pairs.
Sample input:
3
4
1 2 3 4
3
1 2 1
5
1 1 1 1 1
Sample Output:
4
4
15
"""
"""
Each individual number counts for 1 in the total amount our answer. So, if the array has N elements, the initial value
of our answer is N. Find how many times each number appears in the array. After doing that, if a number occurs k times,
where k > 0 then we add to our answer [(k-1) * k] // 2. It basically contributes k-1 + k-2 + ... + 1. We know that
k + k-1 + ... + 1 = [(k+1) * k] // 2, so we just substitute k with k - 1.
Considering the amount of input cases insignificant, each
"for" has linear complexity.
Final complexity: O(2*N) => O(N)
"""
inp_len = int(input())
for _ in range(inp_len):
n = int(input())
array = list(map(int, input().rstrip().split()))
helper_array = [0] * (2*10**6)
total_amount = n
for element in array:
if element >= 0:
helper_array[element + 10**6 - 1] += 1
else:
helper_array[-element] += 1
for hlp in helper_array:
if hlp > 0:
total_amount += (hlp-1)*hlp//2
print(total_amount) | en | 0.836955 | Codemonk link: https://www.hackerearth.com/practice/algorithms/sorting/counting-sort/practice-problems/algorithm/finding-pairs-4/
Given an array A of N numbers, find the number of distinct pairs (i, j) such that j >=i and A[i] = A[j].
Input - Output:
First line of the input contains number of test cases T.
Each test case has two lines, first line is the number N,
followed by a line consisting of N integers which are the
elements of array A.
For each test case print the number of distinct pairs.
Sample input:
3
4
1 2 3 4
3
1 2 1
5
1 1 1 1 1
Sample Output:
4
4
15 Each individual number counts for 1 in the total amount our answer. So, if the array has N elements, the initial value
of our answer is N. Find how many times each number appears in the array. After doing that, if a number occurs k times,
where k > 0 then we add to our answer [(k-1) * k] // 2. It basically contributes k-1 + k-2 + ... + 1. We know that
k + k-1 + ... + 1 = [(k+1) * k] // 2, so we just substitute k with k - 1.
Considering the amount of input cases insignificant, each
"for" has linear complexity.
Final complexity: O(2*N) => O(N) | 3.711674 | 4 |
decitala/path_finding/dijkstra.py | Luke-Poeppel/decitala | 6 | 6614386 | # -*- coding: utf-8 -*-
####################################################################################################
# File: dijkstra.py
# Purpose: Implementation of the Dijkstra algorithm for path-finding.
#
# Author: <NAME>
#
# Location: NYC, 2021
####################################################################################################
import numpy as np
import heapq
from tqdm import tqdm
from . import path_finding_utils
# Useful info here: https://stackoverflow.com/questions/22897209/dijkstras-algorithm-in-python.
def dijkstra(
data,
graph,
source,
cost_function_class=path_finding_utils.CostFunction3D(),
):
"""
Dijkstra path-finding algorithm from dynamic programming. Uses a min-heap
data structure for efficiency.
:param list data: a list of :obj:`decitala.search.Extraction` objects.
:param source: an :obj:`decitala.search.Extraction` object.
:param `decitala.path_finding.path_finding_utils.CostFunction` cost_function_class: a cost
function that will be used in calculating the weights between vertices.
"""
source = source.id_
q = []
dist = {x: np.inf for x in graph.keys()}
pred = {}
dist[source] = 0
heapq.heappush(q, (0, source))
while q:
last_w, curr_v = heapq.heappop(q)
for n, n_w in graph[curr_v]:
alt = last_w + n_w
if alt < dist[n]:
dist[n] = alt
pred[n] = curr_v
heapq.heappush(q, (alt, n))
return dist, pred
def dijkstra_best_source_and_sink(
data,
cost_function_class=path_finding_utils.CostFunction3D(),
enforce_earliest_start=False,
verbose=False
):
"""
Function for agnostically choosing the best source and target (and associated predecessor set)
via Dijkstra. Only requires regular data input.
:param list data: a list of :obj:`decitala.search.Extraction` objects.
:param `decitala.path_finding.path_finding_utils.CostFunction` cost_function_class: a cost
function that will be used in calculating the weights between vertices.
:param bool verbose: whether to print logs.
"""
sources, targets = path_finding_utils.sources_and_sinks(
data=data,
enforce_earliest_start=enforce_earliest_start
)
graph = path_finding_utils.build_graph(
data=data,
cost_function_class=cost_function_class,
verbose=verbose
)
# This checks if there exists a fragment in sources/sinks that spans the whole onset range.
# Alternatively if all extracted fragments are overlapping (see test_povel_essen_dijkstra).
def _all_overlap(data):
"""
Relies on the fact that the output data is sorted by onset range.
"""
return data[0].onset_range[1] > data[-1].onset_range[0]
min_onset = min(sources, key=lambda x: x.onset_range[0]).onset_range[0]
max_onset = max(targets, key=lambda x: x.onset_range[1]).onset_range[1]
if _all_overlap(data):
for possible_source in sources:
if possible_source.onset_range == (min_onset, max_onset):
dist, pred = dijkstra(
data,
graph,
possible_source,
cost_function_class
)
return possible_source, possible_source, pred
# otherwise choose the longest source.
max_source = max(sources, key=lambda x: x.fragment.num_onsets)
dist, pred = dijkstra(
data,
graph,
max_source,
cost_function_class
)
return max_source, max_source, pred
best_path_cost = np.inf
best_source = None
best_target = None
best_predecessor_set = None
for source in tqdm(sources, disable=not(verbose)):
dist, pred = dijkstra(
data,
graph,
source,
cost_function_class
)
for target in targets:
if (dist[target.id_] < best_path_cost):
if source.onset_range[1] <= target.onset_range[0]:
best_path_cost = dist[target.id_]
best_source = source
best_target = target
best_predecessor_set = pred
# This allows for fragments at the end to be missed...
# Find final non-overlapping target with most onsets.
final_target = None
final_target_onsets = 0
for target in targets:
if target.onset_range[0] >= best_target.onset_range[1] and \
target.fragment.num_onsets > final_target_onsets:
final_target = target
final_target_onsets = target.fragment.num_onsets
# If none found, use best_target.
if not(final_target):
final_target = best_target
return best_source, final_target, best_predecessor_set
def generate_path(pred, source, target):
"""
Returns the optimal path extracted from Dijkstra.
:param dict pred: the ``pred`` dictionary returned from
:obj:`decitala.path_finding.dijkstra.dijkstra`.
:param dict source: a :obj:`decitala.search.Extraction` object.
:param dict target: a :obj:`decitala.search.Extraction` object.
"""
source_fragment_id = source.id_
target_fragment_id = target.id_
if not pred and source_fragment_id == target_fragment_id: # Second condition is just a guardrail.
return [source_fragment_id]
path = [target_fragment_id]
while True:
key = pred[path[0]]
path.insert(0, key)
if key == source_fragment_id:
break
return path | # -*- coding: utf-8 -*-
####################################################################################################
# File: dijkstra.py
# Purpose: Implementation of the Dijkstra algorithm for path-finding.
#
# Author: <NAME>
#
# Location: NYC, 2021
####################################################################################################
import numpy as np
import heapq
from tqdm import tqdm
from . import path_finding_utils
# Useful info here: https://stackoverflow.com/questions/22897209/dijkstras-algorithm-in-python.
def dijkstra(
data,
graph,
source,
cost_function_class=path_finding_utils.CostFunction3D(),
):
"""
Dijkstra path-finding algorithm from dynamic programming. Uses a min-heap
data structure for efficiency.
:param list data: a list of :obj:`decitala.search.Extraction` objects.
:param source: an :obj:`decitala.search.Extraction` object.
:param `decitala.path_finding.path_finding_utils.CostFunction` cost_function_class: a cost
function that will be used in calculating the weights between vertices.
"""
source = source.id_
q = []
dist = {x: np.inf for x in graph.keys()}
pred = {}
dist[source] = 0
heapq.heappush(q, (0, source))
while q:
last_w, curr_v = heapq.heappop(q)
for n, n_w in graph[curr_v]:
alt = last_w + n_w
if alt < dist[n]:
dist[n] = alt
pred[n] = curr_v
heapq.heappush(q, (alt, n))
return dist, pred
def dijkstra_best_source_and_sink(
data,
cost_function_class=path_finding_utils.CostFunction3D(),
enforce_earliest_start=False,
verbose=False
):
"""
Function for agnostically choosing the best source and target (and associated predecessor set)
via Dijkstra. Only requires regular data input.
:param list data: a list of :obj:`decitala.search.Extraction` objects.
:param `decitala.path_finding.path_finding_utils.CostFunction` cost_function_class: a cost
function that will be used in calculating the weights between vertices.
:param bool verbose: whether to print logs.
"""
sources, targets = path_finding_utils.sources_and_sinks(
data=data,
enforce_earliest_start=enforce_earliest_start
)
graph = path_finding_utils.build_graph(
data=data,
cost_function_class=cost_function_class,
verbose=verbose
)
# This checks if there exists a fragment in sources/sinks that spans the whole onset range.
# Alternatively if all extracted fragments are overlapping (see test_povel_essen_dijkstra).
def _all_overlap(data):
"""
Relies on the fact that the output data is sorted by onset range.
"""
return data[0].onset_range[1] > data[-1].onset_range[0]
min_onset = min(sources, key=lambda x: x.onset_range[0]).onset_range[0]
max_onset = max(targets, key=lambda x: x.onset_range[1]).onset_range[1]
if _all_overlap(data):
for possible_source in sources:
if possible_source.onset_range == (min_onset, max_onset):
dist, pred = dijkstra(
data,
graph,
possible_source,
cost_function_class
)
return possible_source, possible_source, pred
# otherwise choose the longest source.
max_source = max(sources, key=lambda x: x.fragment.num_onsets)
dist, pred = dijkstra(
data,
graph,
max_source,
cost_function_class
)
return max_source, max_source, pred
best_path_cost = np.inf
best_source = None
best_target = None
best_predecessor_set = None
for source in tqdm(sources, disable=not(verbose)):
dist, pred = dijkstra(
data,
graph,
source,
cost_function_class
)
for target in targets:
if (dist[target.id_] < best_path_cost):
if source.onset_range[1] <= target.onset_range[0]:
best_path_cost = dist[target.id_]
best_source = source
best_target = target
best_predecessor_set = pred
# This allows for fragments at the end to be missed...
# Find final non-overlapping target with most onsets.
final_target = None
final_target_onsets = 0
for target in targets:
if target.onset_range[0] >= best_target.onset_range[1] and \
target.fragment.num_onsets > final_target_onsets:
final_target = target
final_target_onsets = target.fragment.num_onsets
# If none found, use best_target.
if not(final_target):
final_target = best_target
return best_source, final_target, best_predecessor_set
def generate_path(pred, source, target):
"""
Returns the optimal path extracted from Dijkstra.
:param dict pred: the ``pred`` dictionary returned from
:obj:`decitala.path_finding.dijkstra.dijkstra`.
:param dict source: a :obj:`decitala.search.Extraction` object.
:param dict target: a :obj:`decitala.search.Extraction` object.
"""
source_fragment_id = source.id_
target_fragment_id = target.id_
if not pred and source_fragment_id == target_fragment_id: # Second condition is just a guardrail.
return [source_fragment_id]
path = [target_fragment_id]
while True:
key = pred[path[0]]
path.insert(0, key)
if key == source_fragment_id:
break
return path | en | 0.614906 | # -*- coding: utf-8 -*- #################################################################################################### # File: dijkstra.py # Purpose: Implementation of the Dijkstra algorithm for path-finding. # # Author: <NAME> # # Location: NYC, 2021 #################################################################################################### # Useful info here: https://stackoverflow.com/questions/22897209/dijkstras-algorithm-in-python. Dijkstra path-finding algorithm from dynamic programming. Uses a min-heap data structure for efficiency. :param list data: a list of :obj:`decitala.search.Extraction` objects. :param source: an :obj:`decitala.search.Extraction` object. :param `decitala.path_finding.path_finding_utils.CostFunction` cost_function_class: a cost function that will be used in calculating the weights between vertices. Function for agnostically choosing the best source and target (and associated predecessor set) via Dijkstra. Only requires regular data input. :param list data: a list of :obj:`decitala.search.Extraction` objects. :param `decitala.path_finding.path_finding_utils.CostFunction` cost_function_class: a cost function that will be used in calculating the weights between vertices. :param bool verbose: whether to print logs. # This checks if there exists a fragment in sources/sinks that spans the whole onset range. # Alternatively if all extracted fragments are overlapping (see test_povel_essen_dijkstra). Relies on the fact that the output data is sorted by onset range. # otherwise choose the longest source. # This allows for fragments at the end to be missed... # Find final non-overlapping target with most onsets. # If none found, use best_target. Returns the optimal path extracted from Dijkstra. :param dict pred: the ``pred`` dictionary returned from :obj:`decitala.path_finding.dijkstra.dijkstra`. :param dict source: a :obj:`decitala.search.Extraction` object. :param dict target: a :obj:`decitala.search.Extraction` object. # Second condition is just a guardrail. | 3.33099 | 3 |
app.py | tduongas/plotply-challenge | 0 | 6614387 | from flask import Flask, render_template
# Create an instance of Flask
app = Flask(__name__)
# Route
@app.route("/")
def main():
# Redirect back to home page
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True) | from flask import Flask, render_template
# Create an instance of Flask
app = Flask(__name__)
# Route
@app.route("/")
def main():
# Redirect back to home page
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True) | en | 0.872083 | # Create an instance of Flask # Route # Redirect back to home page | 2.646996 | 3 |
models/ussd.py | zogxray/sms-gateway | 1 | 6614388 | <reponame>zogxray/sms-gateway<filename>models/ussd.py
from orator import Model
from orator.orm import belongs_to
class Ussd(Model):
__table__ = 'ussd'
__fillable__ = ['ussd', 'answer', 'send_at', 'channel_id', 'received_at']
__dates__ = ['send_at', 'received_at']
@belongs_to
def channel(self):
from models.channel import Channel
return Channel | from orator import Model
from orator.orm import belongs_to
class Ussd(Model):
__table__ = 'ussd'
__fillable__ = ['ussd', 'answer', 'send_at', 'channel_id', 'received_at']
__dates__ = ['send_at', 'received_at']
@belongs_to
def channel(self):
from models.channel import Channel
return Channel | none | 1 | 2.591716 | 3 | |
hospital/migrations/0019_dailymonitor.py | challa-hemanth-github/b2ehrdemo | 0 | 6614389 | # Generated by Django 3.0.5 on 2021-07-08 11:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hospital', '0018_auto_20201015_2036'),
]
operations = [
migrations.CreateModel(
name='DailyMonitor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doctorId', models.PositiveIntegerField(null=True)),
('doctorName', models.CharField(max_length=40, null=True)),
('appointmentDate', models.DateField(auto_now=True)),
('description', models.TextField(max_length=500)),
('pulse', models.PositiveIntegerField(null=True)),
('saturation', models.PositiveIntegerField(null=True)),
('temperature', models.PositiveIntegerField(null=True)),
('status', models.BooleanField(default=False)),
],
),
]
| # Generated by Django 3.0.5 on 2021-07-08 11:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hospital', '0018_auto_20201015_2036'),
]
operations = [
migrations.CreateModel(
name='DailyMonitor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doctorId', models.PositiveIntegerField(null=True)),
('doctorName', models.CharField(max_length=40, null=True)),
('appointmentDate', models.DateField(auto_now=True)),
('description', models.TextField(max_length=500)),
('pulse', models.PositiveIntegerField(null=True)),
('saturation', models.PositiveIntegerField(null=True)),
('temperature', models.PositiveIntegerField(null=True)),
('status', models.BooleanField(default=False)),
],
),
]
| en | 0.824938 | # Generated by Django 3.0.5 on 2021-07-08 11:34 | 1.876769 | 2 |
Zeras/zoo_nn.py | Li-Ming-Fan/Zeras | 2 | 6614390 | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 1 17:14:19 2018
@author: limingfan
"""
import tensorflow as tf
#
def get_shape_list(tensor):
"""
"""
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None: non_static_indexes.append(index)
if not non_static_indexes: return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
#
def get_emb_positioned(x, token_emb, position_emb):
""" x: [None, None]
"""
posi = tf.range(tf.shape(x)[-1])
seq_emb_t = tf.nn.embedding_lookup(token_emb, x)
seq_emb_p = tf.nn.embedding_lookup(position_emb, posi)
return seq_emb_t + seq_emb_p
#
def get_mask_mat_subsequent(size, name="mask_subsequent"):
""" subsequent mask
"""
mask_tensor = tf.constant(1.0, shape = (1, size, size), dtype=tf.float32)
mask_tensor = tf.linalg.band_part(mask_tensor,
num_lower = -1,
num_upper = 0,
name = name)
return mask_tensor
#
def get_tensor_expanded(x, dim, dtype=None):
"""
"""
x = tf.expand_dims(x, dim)
if dtype is not None:
x = tf.cast(x, dtype=dtype)
#
return x
#
def gelu(x):
cdf = 0.5 * (1.0 + tf.tanh((0.79788456 * (x + 0.044715 * tf.pow(x, 3)) )))
return x * cdf
#
def dropout(inputs, keep_prob, feature_stick=True, mode="recurrent"):
#
if feature_stick is False: return tf.nn.dropout(inputs, keep_prob)
#
shape = tf.shape(inputs)
if mode == "embedding" and len(inputs.get_shape().as_list()) == 2:
noise_shape = [shape[0], 1]
scale = keep_prob
out = tf.nn.dropout(inputs, keep_prob, noise_shape=noise_shape) * scale
elif mode == "recurrent" and len(inputs.get_shape().as_list()) == 3:
noise_shape = [shape[0], 1, shape[-1]] # batch_major
out = tf.nn.dropout(inputs, keep_prob, noise_shape=noise_shape)
else:
out = tf.nn.dropout(inputs, keep_prob, noise_shape=None)
return out
#
def get_label_smoothened(onehot_label, num_classes, delta):
"""
"""
new_label = (1.0 - delta) * onehot_label + delta / num_classes
return new_label
| # -*- coding: utf-8 -*-
"""
Created on Sat Sep 1 17:14:19 2018
@author: limingfan
"""
import tensorflow as tf
#
def get_shape_list(tensor):
"""
"""
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None: non_static_indexes.append(index)
if not non_static_indexes: return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
#
def get_emb_positioned(x, token_emb, position_emb):
""" x: [None, None]
"""
posi = tf.range(tf.shape(x)[-1])
seq_emb_t = tf.nn.embedding_lookup(token_emb, x)
seq_emb_p = tf.nn.embedding_lookup(position_emb, posi)
return seq_emb_t + seq_emb_p
#
def get_mask_mat_subsequent(size, name="mask_subsequent"):
""" subsequent mask
"""
mask_tensor = tf.constant(1.0, shape = (1, size, size), dtype=tf.float32)
mask_tensor = tf.linalg.band_part(mask_tensor,
num_lower = -1,
num_upper = 0,
name = name)
return mask_tensor
#
def get_tensor_expanded(x, dim, dtype=None):
"""
"""
x = tf.expand_dims(x, dim)
if dtype is not None:
x = tf.cast(x, dtype=dtype)
#
return x
#
def gelu(x):
cdf = 0.5 * (1.0 + tf.tanh((0.79788456 * (x + 0.044715 * tf.pow(x, 3)) )))
return x * cdf
#
def dropout(inputs, keep_prob, feature_stick=True, mode="recurrent"):
#
if feature_stick is False: return tf.nn.dropout(inputs, keep_prob)
#
shape = tf.shape(inputs)
if mode == "embedding" and len(inputs.get_shape().as_list()) == 2:
noise_shape = [shape[0], 1]
scale = keep_prob
out = tf.nn.dropout(inputs, keep_prob, noise_shape=noise_shape) * scale
elif mode == "recurrent" and len(inputs.get_shape().as_list()) == 3:
noise_shape = [shape[0], 1, shape[-1]] # batch_major
out = tf.nn.dropout(inputs, keep_prob, noise_shape=noise_shape)
else:
out = tf.nn.dropout(inputs, keep_prob, noise_shape=None)
return out
#
def get_label_smoothened(onehot_label, num_classes, delta):
"""
"""
new_label = (1.0 - delta) * onehot_label + delta / num_classes
return new_label
| en | 0.823735 | # -*- coding: utf-8 -*- Created on Sat Sep 1 17:14:19 2018
@author: limingfan # # x: [None, None] # subsequent mask # # # # # # # batch_major # | 2.499326 | 2 |
comparar.py | TJRR/compara | 0 | 6614391 | from jiwer import wer
import nltk
import distance
import re, math
from collections import Counter
from nltk.corpus import stopwords
from nltk import tokenize
import argparse
import os
from pathlib import Path
# Global variables
WORD = re.compile(r'\w+')
sws = stopwords.words('portuguese')
# Stopwords removal
def text_normalized(text):
palavras_tokenize = tokenize.word_tokenize(text, language='portuguese')
filtered_sentence = list(filter(lambda x: x.lower() not in sws, palavras_tokenize))
return " ".join(filtered_sentence)
# Cosine
def get_cosine_result(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
def get_cosine(text1, text2):
vector1 = text_to_vector(text1)
vector2 = text_to_vector(text2)
cosine = get_cosine_result(vector1, vector2)
return cosine
# Jaccard
def get_jaccard(text1, text2):
jaccard = nltk.jaccard_distance(set(text1), set(text2))
return jaccard
# Levenshtein
def get_levenshtein(text1, text2):
levenshtein = distance.levenshtein(text1, text2)
return levenshtein
# Word Error Rate
def get_wer(text1, text2):
return wer(text1, text2)
def get_wrr(text1, text2):
return 1 - wer(text1, text2)
# Characters
def get_numberTotalCharacters(text1):
return len(text1)
def get_numberTotalCharactersSemEspaco(text1):
count = 0
for c in text1:
if c.isspace() != True:
count = count+1
return count
# Punctuations
def get_pontuation(text):
numberOfFullStops = 0
numberOfQuestionMarks = 0
numberOfExclamationMarks = 0
numberOfCommaMarks = 0
numberOfColonMarks = 0
numberTotalPunctuation = 0
for line in text:
numberOfFullStops += line.count(".")
numberOfQuestionMarks += line.count("?")
numberOfExclamationMarks += line.count("!")
numberOfCommaMarks += line.count(",")
numberOfColonMarks += line.count(":")
numberTotalPunctuation = numberOfFullStops + numberOfCommaMarks + numberOfQuestionMarks + numberOfExclamationMarks + numberOfColonMarks
return numberOfFullStops, numberOfCommaMarks, numberOfQuestionMarks, numberOfExclamationMarks, numberOfColonMarks, numberTotalPunctuation
if __name__ == '__main__':
lines = []
parser = argparse.ArgumentParser()
parser.add_argument('-d', type=str, nargs=2, metavar='dir', help='directories of documents .txt. OBS.: First .txt might be the text recovered by API')
args = parser.parse_args()
filename = args.d
for line in filename:
lines.append(Path(line).read_text())
# Documents or Texts
test1 = re.sub(r'\n{1,}|\s{2,}', " ", lines[0])
test2 = re.sub(r'\n{1,}|\s{2,}', " ", lines[1])
print('\n'"TEXTO BASE")
print(test1)
print('\n'"TEXTO AMOSTRAL ANALISADO")
print(test2)
# Get punctuation
numberOfPunctuation = get_pontuation(test1)
numberOfPunctuation2 = get_pontuation(test2)
# Stopwords removal
# test1 = text_normalized(test1)
# test2 = text_normalized(test2)
# Punctuation results
print('\n'"PRECISÃO NA PONTUAÇÃO:",'\t'," TEXTO BASE",'\t',"AMOSTRA")
print('- Quantidade de Pontos: ','\t', numberOfPunctuation[0],'\t', numberOfPunctuation2[0])
print('- Quantidade de Virgulas:','\t', numberOfPunctuation[1],'\t', numberOfPunctuation2[1])
print('- Quantidade de Interrogações:','\t', numberOfPunctuation[2],'\t', numberOfPunctuation2[2])
print('- Quantidade de Exclamações:','\t', numberOfPunctuation[3],'\t', numberOfPunctuation2[3])
print('- Quantidade de Dois Pontos:','\t', numberOfPunctuation[4],'\t', numberOfPunctuation2[4])
print('- Quantidade de Pontuações:','\t', numberOfPunctuation[5],'\t', numberOfPunctuation2[5])
print('- Precisão na pontuação:','\t', round((numberOfPunctuation2[5] / numberOfPunctuation[5])*100,2) , '%')
# Levenshtein results
print('\n'"DISTÂNCIA ENTRE OS TEXTOS:")
print("- Distância entre Caracteres (Levenshtein):",'\t', round((((get_levenshtein(test1, test2) - get_numberTotalCharactersSemEspaco(test1)) / get_numberTotalCharactersSemEspaco(test1))+1)*100,2), '%', '\t', get_levenshtein(test1, test2), 'de', get_numberTotalCharactersSemEspaco(test1))
print("- Distância entre Palavras (Word Error Rate):",'\t', round(get_wer(test1, test2)*100,2), '%','\t', "%.2f" % get_wer(test1, test2))
print('- Distância entre os textos (dissimilaridade):','\t', round((((get_wer(test1, test2))+(((get_levenshtein(test1, test2) - get_numberTotalCharactersSemEspaco(test1)) / get_numberTotalCharactersSemEspaco(test1))+1 ))/2)*100,2) , '%')
# Similatities results
print('\n'"PROXIMIDADE ENTRE OS TEXTOS:")
print("- Taxa Reconhecimento de palavras (WRR):",'\t', round(get_wrr(test1, test2)*100,2), '%','\t', "%.2f" % get_wrr(test1, test2))
print("- Coeficiente de similaridade (Jaccard):",'\t', round((1 - get_jaccard(test1, test2))*100,2), '%','\t', "%.2f" % get_jaccard(test1, test2))
print('- Coeficiente de similaridade (Cosseno):','\t', round(get_cosine(test1, test2)*100,2), '%','\t', "%.2f" % get_cosine(test1, test2))
print('- Proximidade entre os textos (similaridade):','\t', round((( get_wrr(test1, test2) + get_cosine(test1, test2) + (1-get_jaccard(test1, test2)))/3)*100,2) , '%')
print('')
| from jiwer import wer
import nltk
import distance
import re, math
from collections import Counter
from nltk.corpus import stopwords
from nltk import tokenize
import argparse
import os
from pathlib import Path
# Global variables
WORD = re.compile(r'\w+')
sws = stopwords.words('portuguese')
# Stopwords removal
def text_normalized(text):
palavras_tokenize = tokenize.word_tokenize(text, language='portuguese')
filtered_sentence = list(filter(lambda x: x.lower() not in sws, palavras_tokenize))
return " ".join(filtered_sentence)
# Cosine
def get_cosine_result(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
def get_cosine(text1, text2):
vector1 = text_to_vector(text1)
vector2 = text_to_vector(text2)
cosine = get_cosine_result(vector1, vector2)
return cosine
# Jaccard
def get_jaccard(text1, text2):
jaccard = nltk.jaccard_distance(set(text1), set(text2))
return jaccard
# Levenshtein
def get_levenshtein(text1, text2):
levenshtein = distance.levenshtein(text1, text2)
return levenshtein
# Word Error Rate
def get_wer(text1, text2):
return wer(text1, text2)
def get_wrr(text1, text2):
return 1 - wer(text1, text2)
# Characters
def get_numberTotalCharacters(text1):
return len(text1)
def get_numberTotalCharactersSemEspaco(text1):
count = 0
for c in text1:
if c.isspace() != True:
count = count+1
return count
# Punctuations
def get_pontuation(text):
numberOfFullStops = 0
numberOfQuestionMarks = 0
numberOfExclamationMarks = 0
numberOfCommaMarks = 0
numberOfColonMarks = 0
numberTotalPunctuation = 0
for line in text:
numberOfFullStops += line.count(".")
numberOfQuestionMarks += line.count("?")
numberOfExclamationMarks += line.count("!")
numberOfCommaMarks += line.count(",")
numberOfColonMarks += line.count(":")
numberTotalPunctuation = numberOfFullStops + numberOfCommaMarks + numberOfQuestionMarks + numberOfExclamationMarks + numberOfColonMarks
return numberOfFullStops, numberOfCommaMarks, numberOfQuestionMarks, numberOfExclamationMarks, numberOfColonMarks, numberTotalPunctuation
if __name__ == '__main__':
lines = []
parser = argparse.ArgumentParser()
parser.add_argument('-d', type=str, nargs=2, metavar='dir', help='directories of documents .txt. OBS.: First .txt might be the text recovered by API')
args = parser.parse_args()
filename = args.d
for line in filename:
lines.append(Path(line).read_text())
# Documents or Texts
test1 = re.sub(r'\n{1,}|\s{2,}', " ", lines[0])
test2 = re.sub(r'\n{1,}|\s{2,}', " ", lines[1])
print('\n'"TEXTO BASE")
print(test1)
print('\n'"TEXTO AMOSTRAL ANALISADO")
print(test2)
# Get punctuation
numberOfPunctuation = get_pontuation(test1)
numberOfPunctuation2 = get_pontuation(test2)
# Stopwords removal
# test1 = text_normalized(test1)
# test2 = text_normalized(test2)
# Punctuation results
print('\n'"PRECISÃO NA PONTUAÇÃO:",'\t'," TEXTO BASE",'\t',"AMOSTRA")
print('- Quantidade de Pontos: ','\t', numberOfPunctuation[0],'\t', numberOfPunctuation2[0])
print('- Quantidade de Virgulas:','\t', numberOfPunctuation[1],'\t', numberOfPunctuation2[1])
print('- Quantidade de Interrogações:','\t', numberOfPunctuation[2],'\t', numberOfPunctuation2[2])
print('- Quantidade de Exclamações:','\t', numberOfPunctuation[3],'\t', numberOfPunctuation2[3])
print('- Quantidade de Dois Pontos:','\t', numberOfPunctuation[4],'\t', numberOfPunctuation2[4])
print('- Quantidade de Pontuações:','\t', numberOfPunctuation[5],'\t', numberOfPunctuation2[5])
print('- Precisão na pontuação:','\t', round((numberOfPunctuation2[5] / numberOfPunctuation[5])*100,2) , '%')
# Levenshtein results
print('\n'"DISTÂNCIA ENTRE OS TEXTOS:")
print("- Distância entre Caracteres (Levenshtein):",'\t', round((((get_levenshtein(test1, test2) - get_numberTotalCharactersSemEspaco(test1)) / get_numberTotalCharactersSemEspaco(test1))+1)*100,2), '%', '\t', get_levenshtein(test1, test2), 'de', get_numberTotalCharactersSemEspaco(test1))
print("- Distância entre Palavras (Word Error Rate):",'\t', round(get_wer(test1, test2)*100,2), '%','\t', "%.2f" % get_wer(test1, test2))
print('- Distância entre os textos (dissimilaridade):','\t', round((((get_wer(test1, test2))+(((get_levenshtein(test1, test2) - get_numberTotalCharactersSemEspaco(test1)) / get_numberTotalCharactersSemEspaco(test1))+1 ))/2)*100,2) , '%')
# Similatities results
print('\n'"PROXIMIDADE ENTRE OS TEXTOS:")
print("- Taxa Reconhecimento de palavras (WRR):",'\t', round(get_wrr(test1, test2)*100,2), '%','\t', "%.2f" % get_wrr(test1, test2))
print("- Coeficiente de similaridade (Jaccard):",'\t', round((1 - get_jaccard(test1, test2))*100,2), '%','\t', "%.2f" % get_jaccard(test1, test2))
print('- Coeficiente de similaridade (Cosseno):','\t', round(get_cosine(test1, test2)*100,2), '%','\t', "%.2f" % get_cosine(test1, test2))
print('- Proximidade entre os textos (similaridade):','\t', round((( get_wrr(test1, test2) + get_cosine(test1, test2) + (1-get_jaccard(test1, test2)))/3)*100,2) , '%')
print('')
| en | 0.61347 | # Global variables # Stopwords removal # Cosine # Jaccard # Levenshtein # Word Error Rate # Characters # Punctuations # Documents or Texts # Get punctuation # Stopwords removal # test1 = text_normalized(test1) # test2 = text_normalized(test2) # Punctuation results # Levenshtein results # Similatities results | 2.907057 | 3 |
FakeCompiler/parsetab.py | Ducan-Ally/Automata-sTest | 0 | 6614392 |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'COMMA EQUAL INT LEFTPARENTHESIS METODO NAME NEWLINE PRINT RIGHTPARENTHESISFakeProgram : names EQUAL METODO LEFTPARENTHESIS INT RIGHTPARENTHESIS continuationFakeProgram : PRINT NAME continuationnames : NAME COMMA namesnames : NAMEcontinuation : NEWLINE FakeProgramcontinuation : emptyempty : '
_lr_action_items = {'PRINT':([0,10,],[3,3,]),'NAME':([0,3,7,10,],[4,6,4,4,]),'$end':([1,6,9,11,14,16,17,],[0,-7,-2,-6,-5,-7,-1,]),'EQUAL':([2,4,12,],[5,-4,-3,]),'COMMA':([4,],[7,]),'METODO':([5,],[8,]),'NEWLINE':([6,16,],[10,10,]),'LEFTPARENTHESIS':([8,],[13,]),'INT':([13,],[15,]),'RIGHTPARENTHESIS':([15,],[16,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'FakeProgram':([0,10,],[1,14,]),'names':([0,7,10,],[2,12,2,]),'continuation':([6,16,],[9,17,]),'empty':([6,16,],[11,11,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> FakeProgram","S'",1,None,None,None),
('FakeProgram -> names EQUAL METODO LEFTPARENTHESIS INT RIGHTPARENTHESIS continuation','FakeProgram',7,'p_Start1','parser.py',15),
('FakeProgram -> PRINT NAME continuation','FakeProgram',3,'p_Start2','parser.py',19),
('names -> NAME COMMA names','names',3,'p_names1','parser.py',23),
('names -> NAME','names',1,'p_names2','parser.py',26),
('continuation -> NEWLINE FakeProgram','continuation',2,'p_continuation1','parser.py',29),
('continuation -> empty','continuation',1,'p_continuation2','parser.py',32),
('empty -> <empty>','empty',0,'p_empty','parser.py',41),
]
|
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'COMMA EQUAL INT LEFTPARENTHESIS METODO NAME NEWLINE PRINT RIGHTPARENTHESISFakeProgram : names EQUAL METODO LEFTPARENTHESIS INT RIGHTPARENTHESIS continuationFakeProgram : PRINT NAME continuationnames : NAME COMMA namesnames : NAMEcontinuation : NEWLINE FakeProgramcontinuation : emptyempty : '
_lr_action_items = {'PRINT':([0,10,],[3,3,]),'NAME':([0,3,7,10,],[4,6,4,4,]),'$end':([1,6,9,11,14,16,17,],[0,-7,-2,-6,-5,-7,-1,]),'EQUAL':([2,4,12,],[5,-4,-3,]),'COMMA':([4,],[7,]),'METODO':([5,],[8,]),'NEWLINE':([6,16,],[10,10,]),'LEFTPARENTHESIS':([8,],[13,]),'INT':([13,],[15,]),'RIGHTPARENTHESIS':([15,],[16,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'FakeProgram':([0,10,],[1,14,]),'names':([0,7,10,],[2,12,2,]),'continuation':([6,16,],[9,17,]),'empty':([6,16,],[11,11,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> FakeProgram","S'",1,None,None,None),
('FakeProgram -> names EQUAL METODO LEFTPARENTHESIS INT RIGHTPARENTHESIS continuation','FakeProgram',7,'p_Start1','parser.py',15),
('FakeProgram -> PRINT NAME continuation','FakeProgram',3,'p_Start2','parser.py',19),
('names -> NAME COMMA names','names',3,'p_names1','parser.py',23),
('names -> NAME','names',1,'p_names2','parser.py',26),
('continuation -> NEWLINE FakeProgram','continuation',2,'p_continuation1','parser.py',29),
('continuation -> empty','continuation',1,'p_continuation2','parser.py',32),
('empty -> <empty>','empty',0,'p_empty','parser.py',41),
]
| en | 0.554472 | # parsetab.py # This file is automatically generated. Do not edit. # pylint: disable=W,C,R | 2.346512 | 2 |
train.py | priyamDalmia/marl-PZoo | 0 | 6614393 | import numpy as np
import os
import sys
from pettingzoo.magent import tiger_deer_v3
from agents.agent import RandomAgent
# create a loop to initailize the agents.
def initialize_agents(agent_ids, env):
action_type = "discrete"
agents_list = {}
breakpoint()
for _id in agent_ids:
action_space = env.action_space(_id)
observation_space = env.observation_space(_id)
if _id.startswith("deer"):
# creating deer agents
agents_list[_id] = RandomAgent(_id, action_space, observation_space, action_type)
else:
# creating tiger agents
agents_list[_id] = RandomAgent(_id, action_space, observation_space, action_type)
return agents_list
if __name__=="__main__":
episodes = 1000
learning_rate = 0.01
max_iter = 1000
# Initialize the environment.
env = tiger_deer_v3.env(map_size=10, minimap_mode=False, extra_features=False)
breakpoint()
# game variables
agents = env.possible_agents
game_agents = initialize_agents(agents, env)
for ep in episodes:
env.reset()
for _id in env.agent_iter():
observation, reward, done , info = env.last()
action = game_agents[_id].get_action(observation)
# step forward
env.step(aciton)
# an episode ends when the iter loop exits
| import numpy as np
import os
import sys
from pettingzoo.magent import tiger_deer_v3
from agents.agent import RandomAgent
# create a loop to initailize the agents.
def initialize_agents(agent_ids, env):
action_type = "discrete"
agents_list = {}
breakpoint()
for _id in agent_ids:
action_space = env.action_space(_id)
observation_space = env.observation_space(_id)
if _id.startswith("deer"):
# creating deer agents
agents_list[_id] = RandomAgent(_id, action_space, observation_space, action_type)
else:
# creating tiger agents
agents_list[_id] = RandomAgent(_id, action_space, observation_space, action_type)
return agents_list
if __name__=="__main__":
episodes = 1000
learning_rate = 0.01
max_iter = 1000
# Initialize the environment.
env = tiger_deer_v3.env(map_size=10, minimap_mode=False, extra_features=False)
breakpoint()
# game variables
agents = env.possible_agents
game_agents = initialize_agents(agents, env)
for ep in episodes:
env.reset()
for _id in env.agent_iter():
observation, reward, done , info = env.last()
action = game_agents[_id].get_action(observation)
# step forward
env.step(aciton)
# an episode ends when the iter loop exits
| en | 0.751676 | # create a loop to initailize the agents. # creating deer agents # creating tiger agents # Initialize the environment. # game variables # step forward # an episode ends when the iter loop exits | 2.746575 | 3 |
Day8/aoc_2021_day_8.py | rahulvenugopal/AoC_2021_Python | 0 | 6614394 | <reponame>rahulvenugopal/AoC_2021_Python
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 10:38:45 2021
Advent of Code 2021 is here
My goal is to attempt all challenges before the onset of 2022
@author: <NAME>
"""
#%% --- Day 7: The Treachery of Whales ---Part 1
# Load data which is in text format
file = open('input.txt','r')
data = file.readlines()
data = [line.rstrip() for line in data]
# creating integer list
crabs_horizontal_pos = list(data[0].split(","))
crabs_horizontal_pos = [int(i) for i in crabs_horizontal_pos]
import numpy as np
crabs_horizontal_pos = np.array(crabs_horizontal_pos)
from statistics import median
median_distances = round(median(crabs_horizontal_pos))
cheapest_route_fuel = sum(abs(crabs_horizontal_pos - median_distances))
#%% --- Day 7: The Treachery of Whales ---Part 2
# understanding crab engineering, mean might be the mid point for fuel
mean_point = round(np.mean(crabs_horizontal_pos))
cheapest_route_fuel_crabs = abs(crabs_horizontal_pos - mean_point)
total_fuel = 0
for crab_submarines in cheapest_route_fuel_crabs:
total_fuel += (crab_submarines * (crab_submarines+1)) /2
# mean gets us to a value very close to answer. BUT
# we are actually minimising n*(n+1) /2
#%% Part 2 minimisation
total_fuel_estimates = []
for value in range(len(crabs_horizontal_pos)):
cheapest_route_fuel_crabs = abs(crabs_horizontal_pos - value)
total_fuel = 0
for crab_submarines in cheapest_route_fuel_crabs:
total_fuel += (crab_submarines * (crab_submarines+1)) /2
total_fuel_estimates.append(total_fuel)
print(min(total_fuel_estimates))
# I am still thinking about the range of values to iterate for optimisation
| # -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 10:38:45 2021
Advent of Code 2021 is here
My goal is to attempt all challenges before the onset of 2022
@author: <NAME>
"""
#%% --- Day 7: The Treachery of Whales ---Part 1
# Load data which is in text format
file = open('input.txt','r')
data = file.readlines()
data = [line.rstrip() for line in data]
# creating integer list
crabs_horizontal_pos = list(data[0].split(","))
crabs_horizontal_pos = [int(i) for i in crabs_horizontal_pos]
import numpy as np
crabs_horizontal_pos = np.array(crabs_horizontal_pos)
from statistics import median
median_distances = round(median(crabs_horizontal_pos))
cheapest_route_fuel = sum(abs(crabs_horizontal_pos - median_distances))
#%% --- Day 7: The Treachery of Whales ---Part 2
# understanding crab engineering, mean might be the mid point for fuel
mean_point = round(np.mean(crabs_horizontal_pos))
cheapest_route_fuel_crabs = abs(crabs_horizontal_pos - mean_point)
total_fuel = 0
for crab_submarines in cheapest_route_fuel_crabs:
total_fuel += (crab_submarines * (crab_submarines+1)) /2
# mean gets us to a value very close to answer. BUT
# we are actually minimising n*(n+1) /2
#%% Part 2 minimisation
total_fuel_estimates = []
for value in range(len(crabs_horizontal_pos)):
cheapest_route_fuel_crabs = abs(crabs_horizontal_pos - value)
total_fuel = 0
for crab_submarines in cheapest_route_fuel_crabs:
total_fuel += (crab_submarines * (crab_submarines+1)) /2
total_fuel_estimates.append(total_fuel)
print(min(total_fuel_estimates))
# I am still thinking about the range of values to iterate for optimisation | en | 0.820184 | # -*- coding: utf-8 -*- Created on Sat Dec 7 10:38:45 2021 Advent of Code 2021 is here My goal is to attempt all challenges before the onset of 2022 @author: <NAME> #%% --- Day 7: The Treachery of Whales ---Part 1 # Load data which is in text format # creating integer list #%% --- Day 7: The Treachery of Whales ---Part 2 # understanding crab engineering, mean might be the mid point for fuel # mean gets us to a value very close to answer. BUT # we are actually minimising n*(n+1) /2 #%% Part 2 minimisation # I am still thinking about the range of values to iterate for optimisation | 3.50812 | 4 |
functions/correlations.py | fellipegm/Projeto-CESAR | 0 | 6614395 | # Based on https://stackoverflow.com/questions/33171413/cross-correlation-time-lag-correlation-with-pandas
import numpy as np
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas series objects of equal length
Returns
----------
crosscorr : float
"""
minlen = min(datax.shape[0], datay.shape[0])
# if datax.shape != datay.shape:
# raise IndexError('The arrays should have the same length')
return np.corrcoef(np.array(datax[0:minlen].shift(-lag, fill_value=0)), np.array(datay[0:minlen]))[0,1] | # Based on https://stackoverflow.com/questions/33171413/cross-correlation-time-lag-correlation-with-pandas
import numpy as np
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas series objects of equal length
Returns
----------
crosscorr : float
"""
minlen = min(datax.shape[0], datay.shape[0])
# if datax.shape != datay.shape:
# raise IndexError('The arrays should have the same length')
return np.corrcoef(np.array(datax[0:minlen].shift(-lag, fill_value=0)), np.array(datay[0:minlen]))[0,1] | en | 0.470147 | # Based on https://stackoverflow.com/questions/33171413/cross-correlation-time-lag-correlation-with-pandas Lag-N cross correlation. Parameters ---------- lag : int, default 0 datax, datay : pandas series objects of equal length Returns ---------- crosscorr : float # if datax.shape != datay.shape: # raise IndexError('The arrays should have the same length') | 3.290343 | 3 |
oticas/forms.py | Guilehm/E-commerce | 2 | 6614396 | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import EnderecoUser
class RegistroForm(UserCreationForm):
email = forms.EmailField(label='E-mail', required=True)
telefone = forms.CharField()
cpf = forms.CharField(label='CPF')
class Meta:
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'<PASSWORD>',
'<PASSWORD>',
'telefone',
'cpf',
)
widgets = {
'username': forms.TextInput(attrs={'class': 'form-control form-control-sm'}),
'first_name': forms.TextInput(attrs={'class': 'form-control form-control-sm'}),
'last_name': forms.TextInput(attrs={'class': 'form-control form-control-sm'}),
'email': forms.EmailInput(attrs={'class': 'form-control form-control-sm'}),
}
labels = {
'first_name': 'Nome',
'last_name': 'Sobrenome',
'cpf': 'CPF',
}
def save(self, commit=True):
user = super(RegistroForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.email = self.cleaned_data['email']
user.telefone = self.cleaned_data['telefone']
user.cpf = self.cleaned_data['cpf']
if commit:
user.save()
return user
class EnderecoForm(forms.ModelForm):
cep = forms.TextInput()
rua = forms.TextInput()
numero = forms.TextInput()
complemento = forms.TextInput()
bairro = forms.TextInput()
cidade = forms.TextInput()
estado = forms.TextInput()
class Meta:
model = EnderecoUser
fields = (
'cep',
'rua',
'numero',
'complemento',
'bairro',
'cidade',
'estado',
)
labels = {
'cep': 'CEP',
'rua': 'Rua',
'numero': 'Número',
'complemento': 'Complemento',
'bairro': 'Bairro',
'cidade': 'Cidade',
'estado': 'Estado',
}
widgets = {
'cep': forms.TextInput(attrs={'onblur': "pesquisacep(this.value);"}),
}
def save(self, commit=True):
user = super(EnderecoForm, self).save(commit=False)
user.cep = self.cleaned_data['cep']
user.rua = self.cleaned_data['rua']
user.numero = self.cleaned_data['numero']
user.complemento = self.cleaned_data['complemento']
user.bairro = self.cleaned_data['bairro']
user.cidade = self.cleaned_data['cidade']
user.estado = self.cleaned_data['estado']
if commit:
user.save()
return user
class ContatoForm(forms.Form):
nome = forms.CharField(label='Nome', required=True)
email = forms.EmailField(label='Email')
mensagem = forms.CharField(label='Mensagem', widget=forms.Textarea(), required=True)
def __init__(self, *args, **kwargs):
super(ContatoForm, self).__init__(*args, **kwargs)
self.fields['nome'].widget.attrs['class'] = 'form-control form-control-sm'
self.fields['nome'].widget.attrs['id'] = 'id_nome_contato'
self.fields['nome'].widget.attrs['placeholder'] = 'Digite seu nome completo'
self.fields['email'].widget.attrs['class'] = 'form-control form-control-sm'
self.fields['email'].widget.attrs['placeholder'] = 'Digite seu email'
self.fields['mensagem'].widget.attrs['class'] = 'form-control form-control-sm'
self.fields['mensagem'].widget.attrs['placeholder'] = 'Escreva aqui sua mensagem...'
class CepForm(forms.Form):
cep = forms.CharField(label='', required=True)
def __init__(self, *args, **kwargs):
super(CepForm, self).__init__(*args, **kwargs)
self.fields['cep'].widget.attrs['class'] = 'form-control mr-0'
self.fields['cep'].widget.attrs['placeholder'] = '_____-___' | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import EnderecoUser
class RegistroForm(UserCreationForm):
email = forms.EmailField(label='E-mail', required=True)
telefone = forms.CharField()
cpf = forms.CharField(label='CPF')
class Meta:
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'<PASSWORD>',
'<PASSWORD>',
'telefone',
'cpf',
)
widgets = {
'username': forms.TextInput(attrs={'class': 'form-control form-control-sm'}),
'first_name': forms.TextInput(attrs={'class': 'form-control form-control-sm'}),
'last_name': forms.TextInput(attrs={'class': 'form-control form-control-sm'}),
'email': forms.EmailInput(attrs={'class': 'form-control form-control-sm'}),
}
labels = {
'first_name': 'Nome',
'last_name': 'Sobrenome',
'cpf': 'CPF',
}
def save(self, commit=True):
user = super(RegistroForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.email = self.cleaned_data['email']
user.telefone = self.cleaned_data['telefone']
user.cpf = self.cleaned_data['cpf']
if commit:
user.save()
return user
class EnderecoForm(forms.ModelForm):
cep = forms.TextInput()
rua = forms.TextInput()
numero = forms.TextInput()
complemento = forms.TextInput()
bairro = forms.TextInput()
cidade = forms.TextInput()
estado = forms.TextInput()
class Meta:
model = EnderecoUser
fields = (
'cep',
'rua',
'numero',
'complemento',
'bairro',
'cidade',
'estado',
)
labels = {
'cep': 'CEP',
'rua': 'Rua',
'numero': 'Número',
'complemento': 'Complemento',
'bairro': 'Bairro',
'cidade': 'Cidade',
'estado': 'Estado',
}
widgets = {
'cep': forms.TextInput(attrs={'onblur': "pesquisacep(this.value);"}),
}
def save(self, commit=True):
user = super(EnderecoForm, self).save(commit=False)
user.cep = self.cleaned_data['cep']
user.rua = self.cleaned_data['rua']
user.numero = self.cleaned_data['numero']
user.complemento = self.cleaned_data['complemento']
user.bairro = self.cleaned_data['bairro']
user.cidade = self.cleaned_data['cidade']
user.estado = self.cleaned_data['estado']
if commit:
user.save()
return user
class ContatoForm(forms.Form):
nome = forms.CharField(label='Nome', required=True)
email = forms.EmailField(label='Email')
mensagem = forms.CharField(label='Mensagem', widget=forms.Textarea(), required=True)
def __init__(self, *args, **kwargs):
super(ContatoForm, self).__init__(*args, **kwargs)
self.fields['nome'].widget.attrs['class'] = 'form-control form-control-sm'
self.fields['nome'].widget.attrs['id'] = 'id_nome_contato'
self.fields['nome'].widget.attrs['placeholder'] = 'Digite seu nome completo'
self.fields['email'].widget.attrs['class'] = 'form-control form-control-sm'
self.fields['email'].widget.attrs['placeholder'] = 'Digite seu email'
self.fields['mensagem'].widget.attrs['class'] = 'form-control form-control-sm'
self.fields['mensagem'].widget.attrs['placeholder'] = 'Escreva aqui sua mensagem...'
class CepForm(forms.Form):
cep = forms.CharField(label='', required=True)
def __init__(self, *args, **kwargs):
super(CepForm, self).__init__(*args, **kwargs)
self.fields['cep'].widget.attrs['class'] = 'form-control mr-0'
self.fields['cep'].widget.attrs['placeholder'] = '_____-___' | none | 1 | 2.418308 | 2 | |
Day 01 - Beginner - Working with Variables in Python to Manage Data/05_PROJECT_band_name_generator.py | not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022 | 0 | 6614397 | # 1. Create a greeting for your program.
# print("Welcome!! I hope you enjoy your stay!!!")
print("wewcome! >_< me hope u enjoy youw stay! >_<")
# 2. Ask the user for the city that they grew up in.
city = input('What city did you grow up in?\n')
# 3. Ask the user for the name of a pet.
pet = input('Write a name of a pet.\n')
# 4. Combine the name of their city and pet and show them their band name.
print(f"Your band name is {city} {pet}")
# 5. Make sure the input cursor shows on a new line, see the example at:
# https://replit.com/@appbrewery/band-name-generator-end
| # 1. Create a greeting for your program.
# print("Welcome!! I hope you enjoy your stay!!!")
print("wewcome! >_< me hope u enjoy youw stay! >_<")
# 2. Ask the user for the city that they grew up in.
city = input('What city did you grow up in?\n')
# 3. Ask the user for the name of a pet.
pet = input('Write a name of a pet.\n')
# 4. Combine the name of their city and pet and show them their band name.
print(f"Your band name is {city} {pet}")
# 5. Make sure the input cursor shows on a new line, see the example at:
# https://replit.com/@appbrewery/band-name-generator-end
| en | 0.900745 | # 1. Create a greeting for your program. # print("Welcome!! I hope you enjoy your stay!!!") # 2. Ask the user for the city that they grew up in. # 3. Ask the user for the name of a pet. # 4. Combine the name of their city and pet and show them their band name. # 5. Make sure the input cursor shows on a new line, see the example at: # https://replit.com/@appbrewery/band-name-generator-end | 4.38592 | 4 |
python/temp.py | dorfingerjonas/temperature-measuring | 0 | 6614398 | <reponame>dorfingerjonas/temperature-measuring<gh_stars>0
import os
import glob
import time
import json
from firebase import firebase
with open('config.json', 'r') as c:
config = json.load(c)
firebase = firebase.FirebaseApplication(config['url'], None)
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
temp_c = float(temp_string) / 1000
return temp_c
def insert_into_db(value):
data = {
'temp': value,
'timestamp': str(int(time.time() * 1000))
}
firebase.post('/temperature/history/', data)
firebase.put('/temperature', 'currentTemperature', data)
print('Started with url ' + str(config['url']) + ' and interval of ' +
str(firebase.get('temperature/interval', '')) + ' second(s).')
while True:
insert_into_db(read_temp())
time.sleep(int(firebase.get('temperature/interval', '')))
| import os
import glob
import time
import json
from firebase import firebase
with open('config.json', 'r') as c:
config = json.load(c)
firebase = firebase.FirebaseApplication(config['url'], None)
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
temp_c = float(temp_string) / 1000
return temp_c
def insert_into_db(value):
data = {
'temp': value,
'timestamp': str(int(time.time() * 1000))
}
firebase.post('/temperature/history/', data)
firebase.put('/temperature', 'currentTemperature', data)
print('Started with url ' + str(config['url']) + ' and interval of ' +
str(firebase.get('temperature/interval', '')) + ' second(s).')
while True:
insert_into_db(read_temp())
time.sleep(int(firebase.get('temperature/interval', ''))) | none | 1 | 2.629623 | 3 | |
main.py | Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest | 4 | 6614399 | import cv2
from func import main_detect, main_class, GUI_init
# 载入测试视频
vc = cv2.VideoCapture('video/dxandcar.mp4')
cap = cv2.VideoCapture('video/dxha.mp4')
if __name__ == '__main__':
GUI_init() # 初始化GUI界面
while 1:
main_detect(cap)
main_class(vc)
| import cv2
from func import main_detect, main_class, GUI_init
# 载入测试视频
vc = cv2.VideoCapture('video/dxandcar.mp4')
cap = cv2.VideoCapture('video/dxha.mp4')
if __name__ == '__main__':
GUI_init() # 初始化GUI界面
while 1:
main_detect(cap)
main_class(vc)
| zh | 0.713566 | # 载入测试视频 # 初始化GUI界面 | 2.432025 | 2 |
setup.py | mcnakhaee/palmerpenguins | 27 | 6614400 | from setuptools import setup
import setuptools
setup(name='palmerpenguins',
version='0.1.4',
url="https://github.com/mcnakhaee/palmerpenguins",
description="A python package for the palmer penguins dataset ",
long_description=open('DESCRIPTION.rst').read(),
author='<NAME>',
author_emai='<EMAIL>',
packages = ['palmerpenguins'],
install_requires=['pandas', 'numpy'],
include_package_data=True,
package_data={'': ['data/*.csv']},
)
| from setuptools import setup
import setuptools
setup(name='palmerpenguins',
version='0.1.4',
url="https://github.com/mcnakhaee/palmerpenguins",
description="A python package for the palmer penguins dataset ",
long_description=open('DESCRIPTION.rst').read(),
author='<NAME>',
author_emai='<EMAIL>',
packages = ['palmerpenguins'],
install_requires=['pandas', 'numpy'],
include_package_data=True,
package_data={'': ['data/*.csv']},
)
| none | 1 | 1.375364 | 1 | |
src/chime_dash/app/pages/__init__.py | covidcaremap/chime | 222 | 6614401 | <filename>src/chime_dash/app/pages/__init__.py
"""app/pages
page layout controlled here - nothing should be defined here only initialized
for component definitions see -> app/components
for core logic see --> app/services
for utility classes and functions see --> utils
modules desc. route
---- ---- ----
index homepage /
""" | <filename>src/chime_dash/app/pages/__init__.py
"""app/pages
page layout controlled here - nothing should be defined here only initialized
for component definitions see -> app/components
for core logic see --> app/services
for utility classes and functions see --> utils
modules desc. route
---- ---- ----
index homepage /
""" | en | 0.741943 | app/pages page layout controlled here - nothing should be defined here only initialized for component definitions see -> app/components for core logic see --> app/services for utility classes and functions see --> utils modules desc. route ---- ---- ---- index homepage / | 1.411234 | 1 |
layoutx/app.py | 8or5q/LayoutX | 61 | 6614402 | <reponame>8or5q/LayoutX
import tkinter as tk
import tkinter.font as tkFont
from .store import Store
from .view import View, ResizeOption
from .utils import Singleton, is_windows
from ._registry import RegistryNode
from .tkDnD import TkinterDnD
import logging
import asyncio
__all__ = ["Application"]
@Singleton
class Application(RegistryNode):
def __init__(self):
super().__init__(widget = self, name = "app")
#Import Widgets
import layoutx.widgets
self._widgets = {}
for name in layoutx.widgets.__all__:
self._widgets.update({name : getattr(layoutx.widgets, name)})
self._tk = None
self._loop = None
self._root_node = None
self._style = None
self._config = {}
def setup(self, store: Store, rootView: View, font=None, style: str=None, interval=1/120, loop=None):
if not self._tk:
self._tk = TkinterDnD.Tk()
self._loop = loop if loop else asyncio.get_event_loop()
self._tk.protocol("WM_DELETE_WINDOW", self.close)
self._ui_task = self._loop.create_task(self._updater(interval))
# Pick first system font as default if none given
if font:
self._config["font"] = font
else:
if is_windows():
self._config["font"] = {"family": "Courier New", "size": 12} if "Courier New" in tkFont.families() else {"family":tkFont.families()[1], "size": 12}
else:
self._config["font"] = {"family": "DejaVu Sans Mono", "size": 12} if "DejaVu Sans Mono" in tkFont.families() else {"family":tkFont.families()[1], "size": 12}
if style and not self._style:
try:
from ttkthemes import ThemedStyle
self._style = ThemedStyle(self._tk)
self._style.set_theme(style)
except ImportError:
# ttkstyles not installed
self._style = tk.ttk.Style()
else:
self._style = tk.ttk.Style()
if self._root_node:
self.remove_node(self._root_node)
self._root_node = self.add_view(
rootView(
tkinter=self._tk,
store=store
)
)
self._root_node.widget.redraw()
@property
def loop(self):
return self._loop
def close(self):
self._ui_task.add_done_callback(lambda *_: self._cleanup())
self._ui_task.cancel()
@property
def config(self):
return self._config
@property
def style(self):
return self._style
def run( self ):
self._loop.run_forever()
self._loop.close()
def get_root_node(self) -> RegistryNode:
return self._root_node
def get_view(self, name: str) -> RegistryNode:
filter_view = self.filter_children(name=name)
if len(filter_view) == 1:
return filter_view[0]
else:
raise ValueError(f"View {name} not registed")
def add_view(self, view: View) -> RegistryNode:
name = view.__class__.__name__
old_view = self.filter_children(name=name)
if len(old_view) > 0:
self.remove_node(old_view[0])
if len(self.children) > 0:
view.hide()
return self._add_node(widget=view, name=view.__class__.__name__)
def add_custom_widget(self, name, cls):
if name in self._widgets:
raise ValueError(f"Widget name: {name} already exists")
self._widgets[name] = cls
def update(self):
self._tk.update()
def get_widget_cls(self, name):
if name not in self._widgets:
raise KeyError(f"Widget: {name}, does not exist or was never added to the registry")
return self._widgets[name]
async def _updater(self, interval):
while True:
self.update()
await asyncio.sleep(interval)
def _cleanup(self):
self._loop.stop()
self._tk.destroy()
| import tkinter as tk
import tkinter.font as tkFont
from .store import Store
from .view import View, ResizeOption
from .utils import Singleton, is_windows
from ._registry import RegistryNode
from .tkDnD import TkinterDnD
import logging
import asyncio
__all__ = ["Application"]
@Singleton
class Application(RegistryNode):
def __init__(self):
super().__init__(widget = self, name = "app")
#Import Widgets
import layoutx.widgets
self._widgets = {}
for name in layoutx.widgets.__all__:
self._widgets.update({name : getattr(layoutx.widgets, name)})
self._tk = None
self._loop = None
self._root_node = None
self._style = None
self._config = {}
def setup(self, store: Store, rootView: View, font=None, style: str=None, interval=1/120, loop=None):
if not self._tk:
self._tk = TkinterDnD.Tk()
self._loop = loop if loop else asyncio.get_event_loop()
self._tk.protocol("WM_DELETE_WINDOW", self.close)
self._ui_task = self._loop.create_task(self._updater(interval))
# Pick first system font as default if none given
if font:
self._config["font"] = font
else:
if is_windows():
self._config["font"] = {"family": "Courier New", "size": 12} if "Courier New" in tkFont.families() else {"family":tkFont.families()[1], "size": 12}
else:
self._config["font"] = {"family": "DejaVu Sans Mono", "size": 12} if "DejaVu Sans Mono" in tkFont.families() else {"family":tkFont.families()[1], "size": 12}
if style and not self._style:
try:
from ttkthemes import ThemedStyle
self._style = ThemedStyle(self._tk)
self._style.set_theme(style)
except ImportError:
# ttkstyles not installed
self._style = tk.ttk.Style()
else:
self._style = tk.ttk.Style()
if self._root_node:
self.remove_node(self._root_node)
self._root_node = self.add_view(
rootView(
tkinter=self._tk,
store=store
)
)
self._root_node.widget.redraw()
@property
def loop(self):
return self._loop
def close(self):
self._ui_task.add_done_callback(lambda *_: self._cleanup())
self._ui_task.cancel()
@property
def config(self):
return self._config
@property
def style(self):
return self._style
def run( self ):
self._loop.run_forever()
self._loop.close()
def get_root_node(self) -> RegistryNode:
return self._root_node
def get_view(self, name: str) -> RegistryNode:
filter_view = self.filter_children(name=name)
if len(filter_view) == 1:
return filter_view[0]
else:
raise ValueError(f"View {name} not registed")
def add_view(self, view: View) -> RegistryNode:
name = view.__class__.__name__
old_view = self.filter_children(name=name)
if len(old_view) > 0:
self.remove_node(old_view[0])
if len(self.children) > 0:
view.hide()
return self._add_node(widget=view, name=view.__class__.__name__)
def add_custom_widget(self, name, cls):
if name in self._widgets:
raise ValueError(f"Widget name: {name} already exists")
self._widgets[name] = cls
def update(self):
self._tk.update()
def get_widget_cls(self, name):
if name not in self._widgets:
raise KeyError(f"Widget: {name}, does not exist or was never added to the registry")
return self._widgets[name]
async def _updater(self, interval):
while True:
self.update()
await asyncio.sleep(interval)
def _cleanup(self):
self._loop.stop()
self._tk.destroy() | en | 0.58202 | #Import Widgets # Pick first system font as default if none given # ttkstyles not installed | 2.079773 | 2 |
run.py | peterger8y/Airbnb-rental-price-predictor | 0 | 6614403 | <gh_stars>0
from flask_login import login_user, logout_user, current_user, LoginManager
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin
import sqlite3
from sqlalchemy import Table, create_engine
import dash_bootstrap_components as dbc
import dash
import os
import warnings
import configparser
from neighbors_model import bathroom_text_encoder, pipeline_model
import pandas as pd
from data_loading import load_listing
external_stylesheets = [
dbc.themes.UNITED, # Bootswatch theme
'https://use.fontawesome.com/releases/v5.9.0/css/all.css', # for social media icons
]
meta_tags=[
{'name': 'viewport', 'content': 'width=device-width, initial-scale=1'}
]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, meta_tags=meta_tags)
server = app.server
app.config.suppress_callback_exceptions = True # see https://dash.plot.ly/urls
server.config.update(
SECRET_KEY=os.urandom(12),
SQLALCHEMY_DATABASE_URI='sqlite:///data.sqlite',
SQLALCHEMY_TRACK_MODIFICATIONS=False
)
app.title = 'Airbnb Price Predictor' # appears in browser title bar
def get_layout(center_lat, center_long):
key = '<KEY>'
map = dict(
autosize=True,
height=500,
weidth=100,
font=dict(color="#191A1A"),
titlefont=dict(color="#191A1A", size='14'),
margin=dict(
l=0,
r=0,
b=0,
t=0
),
hovermode="closest",
plot_bgcolor='#fffcfc',
paper_bgcolor='#fffcfc',
legend=dict(font=dict(size=2), orientation='h'),
mapbox=dict(
accesstoken=key,
style="open-street-map",
center=dict(
lon=center_long,
lat=center_lat,
),
zoom=10,
)
)
return map
def create_figure(df, city):
center_lat = sum(df.latitude) / len(df.latitude)
center_long = sum(df.longitude) / len(df.longitude)
layout_map = get_layout(center_lat, center_long)
figure = {
"data": [{
"type": "scattermapbox",
"lat": list(df.latitude),
"lon": list(df.longitude),
"hoverinfo": "text",
"hovertext": [["Neighborhood: {} Price: {} Rating: {} Beds: {} Bath:{}".format(i, j, k, n, m)]
for i, j, k, n, m in zip(df['neighbourhood'], df['price'], df['review_scores_rating'],
df['bedrooms'], df['bathrooms_text'],
)],
"mode": "markers",
"name": city,
"marker": {
"size": df['size'],
"opacity": 0.7,
"color": df['color'],
"color_discrete_map": {'yes': 'red', 'no': 'blue'},
"color_discrete_sequence": ['blue', 'red']
}
}],
"layout": layout_map
}
return figure
dir_value = 'united-states, tx, austin'
city_df, keys = load_listing(dir_value=dir_value, list_names=True)
for column in city_df.columns:
city_df[column] = city_df[column].fillna("Missing")
lat = city_df['latitude']
long = city_df['longitude']
n = len(lat)
center_lat = sum(lat) / n
center_long = sum(long) / n
clicks = {'clicks': [0]}
count_btn_press = pd.DataFrame(data=clicks)
count_btn_press.to_pickle('clicks.pkl')
cities = [x for x in keys]
# server = flask.Flask(__name__)
# app = Dash(__name__, external_stylesheets=external_stylesheets, meta_tags=meta_tags, server=server)
# app.title = "Airbnb Rental Price Predictor"
# app.config.suppress_callback_exceptions = True
room_type = city_df['room_type'].unique()
bath_options = city_df['bathrooms_text'].unique()
bed_options = city_df['beds'].unique()
city_df['color'] = 'red'
city_df['size'] = 5
warnings.filterwarnings("ignore")
sqlite3.connect('data.sqlite')
engine = create_engine('sqlite:///data.sqlite')
db = SQLAlchemy()
config = configparser.ConfigParser()
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True, nullable = False)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
Users_tbl = Table('users', Users.metadata)
def create_users_table():
Users.metadata.create_all(engine)
create_users_table()
db.init_app(server)
navbar = dbc.NavbarSimple(
brand='Airbnb Price Predictor',
brand_href='/',
children=[
dbc.NavItem(dcc.Link('Predictions', href='/predictions', className='nav-link')),
],
sticky='top',
color='primary',
light=False,
dark=True
)
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
navbar,
dbc.Container(id='page-content', className='mt-4'),
html.Hr(),
])
login_manager = LoginManager()
login_manager.init_app(server)
login_manager.login_view = '/login'
class Users(UserMixin, Users):
pass
column1 = dbc.Col(
[
dcc.Markdown(
"""
## This app will enable Airbnb Hosts decide what is the ideal price of their listings dependant on location,type of home, rooms and number of bathrooms.
"""
),
dcc.Link(dbc.Button('Check it out!', color='primary'), href='/create')
],
md=4,
)
column2 = dbc.Col(
[
html.Img(src='assets/airbnb-host.jpeg', className='img-fluid', style={'height': '350px'})
]
)
index = dbc.Row([column1, column2])
create = html.Div([html.H1('Create User Account')
, dcc.Location(id='create_user', refresh=True)
, dcc.Input(id="username"
, type="text"
, placeholder="<NAME>"
, maxLength =15)
, dcc.Input(id="password"
, type="password"
, placeholder="password")
, dcc.Input(id="email"
, type="email"
, placeholder="email"
, maxLength = 50)
, html.Button('Create User', id='submit-val', n_clicks=0)
, html.Div(id='container-button-basic')
])
login = html.Div([dcc.Location(id='url_login', refresh=True)
, html.H2('''Please log in to continue:''', id='h1')
, dcc.Input(placeholder='Enter your username',
type='text',
id='uname-box')
, dcc.Input(placeholder='Enter your password',
type='password',
id='pwd-box')
, html.Button(children='Login',
n_clicks=0,
type='submit',
id='login-button')
, html.Div(children='', id='output-state')
])
success = html.Div([dcc.Location(id='url_login_success', refresh=True)
, html.Div([html.H2('Login successful.')
, html.Br()
, html.P('Go to Predictor')
, dcc.Link(dbc.Button('Predict', color='primary'), href = '/predictions')
]) #end div
, html.Div([html.Br()
, html.Button(id='back-button', children='Go back', n_clicks=0)
]) #end div
]) #end div
failed = html.Div([dcc.Location(id='url_login_df', refresh=True)
, html.Div([html.H2('Log in Failed. Please try again.')
, html.Br()
, html.Div([login])
, html.Br()
, html.Button(id='back-button', children='Go back', n_clicks=0)
]) #end div
]) #end div
logout = html.Div([dcc.Location(id='logout', refresh=True)
, html.Br()
, html.Div(html.H2('You have been logged out - Please login'))
, html.Br()
, html.Div([login])
, html.Button(id='back-button', children='Go back', n_clicks=0)
])#end div
predictions = html.Div(children=[
html.Div(
html.H4(children="Select City:")
),
html.Div(children=[
dcc.Dropdown(id='city_dd',
options=[{'label': i, 'value': i} for i in cities],
value='united-states, tx, austin', placeholder='united-states, tx, austin',
style={'height': 50, 'width': 500, }),
dcc.Store(id='current_city', storage_type='session', data='Austin, TX'),
]),
html.Div(className='row',
children=[
html.Div(
dcc.Textarea(id='Static_listing_type_text',
value='Select Listing Type:',
className="three columns",
style={'height': 50, 'width': 200, "margin-left": "15px"},
disabled=True)
),
html.Div(
dcc.Dropdown(id='listing_dd',
options=[{'label': i, 'value': i} for i in room_type],
value=room_type[0], placeholder=room_type[0],
className="three columns",
style={'height': 50, 'width': 200, 'color': 'black'},
)
),
html.Div(
dcc.Textarea(id='Static_num_bathrooms_text',
value='Select # of bathrooms:',
className="twelve columns",
style={'height': 50, 'width': 175, "margin-left": "15px"},
disabled=True)
),
html.Div(
dcc.Dropdown(id='num_bathrooms_dd',
options=[{'label': i, 'value': i} for i in bath_options],
value='1 bath', placeholder='1 bath',
className="three columns",
style={'height': 50, 'width': 150, 'color': 'black'},
)
),
html.Div(
dcc.Textarea(id='Static_num_bedrooms_text',
value='Select # of Beds:',
className="three columns",
style={'height': 50, 'width': 175, "margin-left": "15px"},
disabled=True)
),
html.Div(
dcc.Dropdown(id='num_bedrooms_dd',
options=[{'label': i, 'value': i} for i in bed_options],
value='1', placeholder='1',
className="three columns",
style={'height': 50, 'width': 150, 'color': 'black'},
)
),
]
),
html.Div(className='row', children=[
html.Div(children=[
html.Button('Filter Listings for Selected Options', id='filter_button', n_clicks=0),
dcc.Store(id='session', storage_type='session', data=clicks),
])]
),
html.Div(className='row',
children=[
html.Div(
html.H6(children="Latitude:")
),
dcc.Input(id="lat_dd", placeholder=center_lat, type="number"),
html.Br(),
html.P(id="output")
]),
html.Div(className='row',
children=[
html.Div(
html.H6(children="Longitude:")
),
dcc.Input(id="long_dd", placeholder=center_long, type="number"),
html.Br(),
html.P(id="output")
]),
html.Div(className='row', children=[
html.Div(children=[
dcc.Graph(
id='MapPlot', figure=create_figure(city_df, 'Austin, TX')
)
]
),
html.Div(
dcc.Textarea(id='prediction-output',
value='Output',
className="two columns",
style={'height': 100, 'width': 300, "margin-left": "15px"},
disabled=True))
])
]
)
@login_manager.user_loader
def load_user(user_id):
return Users.query.get(int(user_id))
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/':
return index
elif pathname == '/create':
return create
elif pathname == '/login':
return login
elif pathname == '/success':
if current_user.is_authenticated:
return success
else:
return failed
elif pathname == '/predictions':
return predictions
elif pathname == '/logout':
if current_user.is_authenticated:
logout_user()
return logout
else:
return logout
else:
return '404'
@app.callback(
[Output('container-button-basic', "children")]
, [Input('submit-val', 'n_clicks')]
, [State('username', 'value'), State('password', 'value'), State('email', 'value')])
def insert_users(n_clicks, un, pw, em):
hashed_password = generate_password_hash(pw, method='sha256')
if un is not None and pw is not None and em is not None:
ins = Users_tbl.insert().values(username=un, password=hashed_password, email=em,)
conn = engine.connect()
conn.execute(ins)
conn.close()
return [login]
else:
return [html.Div([html.H2('Already have a user account?'), dcc.Link('Click here to Log In', href='/login')])]
@app.callback(
Output('url_login', 'pathname')
, [Input('login-button', 'n_clicks')]
, [State('uname-box', 'value'), State('pwd-box', 'value')])
def successful(n_clicks, input1, input2):
user = Users.query.filter_by(username=input1).first()
if user:
if check_password_hash(user.password, input2):
login_user(user)
return '/success'
else:
pass
else:
pass
@app.callback(
Output('output-state', 'children')
, [Input('login-button', 'n_clicks')]
, [State('uname-box', 'value'), State('pwd-box', 'value')])
def update_output(n_clicks, input1, input2):
if n_clicks > 0:
user = Users.query.filter_by(username=input1).first()
if user:
if check_password_hash(user.password, input2):
return ''
else:
return 'Incorrect username or password'
else:
return 'Incorrect username or password'
else:
return ''
@app.callback(
Output('prediction-output', 'value'),
Output('MapPlot', 'figure'),
[Input('city_dd', 'value'),
Input('num_bedrooms_dd', 'value'),
Input('num_bathrooms_dd', 'value'),
Input('listing_dd', 'value'),
Input('lat_dd', 'value'),
Input('long_dd', 'value'),
]
)
def predict_price(city_dd, num_bedrooms_dd, num_bathrooms_dd, listing_dd, lat_dd, long_dd):
df_predict = pd.DataFrame(
columns=['bedrooms', 'bathrooms_text', 'room_type', 'latitude', 'longitude'],
data=[[num_bedrooms_dd, num_bathrooms_dd, listing_dd, lat_dd, long_dd]])
new = load_listing(dir_value=city_dd)
new['color'] = 'red'
new['size'] = 5
new1 = new[['bedrooms', 'bathrooms_text', 'room_type', 'price', 'latitude', 'longitude']]
shared, private = bathroom_text_encoder(df_predict)
df_predict['shared_bathrooms'] = shared
df_predict['private_bathrooms'] = private
df_predict.drop(columns=['bathrooms_text'], inplace=True)
new1 = new1.replace("Missing", None)
pipe, oh, stand, simp, kneigh = pipeline_model(new1,
cols_to_keep=['bathrooms_text', 'bedrooms', 'room_type',
'price', 'latitude', 'longitude'])
one = oh.transform(df_predict)
two = stand.transform(one)
three = simp.transform(two)
four = kneigh.kneighbors(three, n_neighbors=20)
y_pred = pipe.predict(df_predict)[0]
near_neighbors = four[1]
value = f'${y_pred} is the optimal rental price for the property'
filter_df = new.copy()
filter_df['bedrooms'] = filter_df['bedrooms'].astype('float')
filter_df = filter_df.loc[filter_df['bathrooms_text'] == num_bathrooms_dd]
filter_df = filter_df.loc[filter_df['bedrooms'] >= float(num_bedrooms_dd)]
filter_df = filter_df.loc[filter_df['room_type'] == listing_dd]
for x in near_neighbors:
beta = new.loc[x]
beta['color'] = 'blue'
beta['size'] = 20
final_df = pd.concat([filter_df, beta])
figure = create_figure(final_df, city_dd)
return value, figure
if __name__ == "__main__":
app.run_server(debug=True, threaded=False, processes=2)
| from flask_login import login_user, logout_user, current_user, LoginManager
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin
import sqlite3
from sqlalchemy import Table, create_engine
import dash_bootstrap_components as dbc
import dash
import os
import warnings
import configparser
from neighbors_model import bathroom_text_encoder, pipeline_model
import pandas as pd
from data_loading import load_listing
external_stylesheets = [
dbc.themes.UNITED, # Bootswatch theme
'https://use.fontawesome.com/releases/v5.9.0/css/all.css', # for social media icons
]
meta_tags=[
{'name': 'viewport', 'content': 'width=device-width, initial-scale=1'}
]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, meta_tags=meta_tags)
server = app.server
app.config.suppress_callback_exceptions = True # see https://dash.plot.ly/urls
server.config.update(
SECRET_KEY=os.urandom(12),
SQLALCHEMY_DATABASE_URI='sqlite:///data.sqlite',
SQLALCHEMY_TRACK_MODIFICATIONS=False
)
app.title = 'Airbnb Price Predictor' # appears in browser title bar
def get_layout(center_lat, center_long):
key = '<KEY>'
map = dict(
autosize=True,
height=500,
weidth=100,
font=dict(color="#191A1A"),
titlefont=dict(color="#191A1A", size='14'),
margin=dict(
l=0,
r=0,
b=0,
t=0
),
hovermode="closest",
plot_bgcolor='#fffcfc',
paper_bgcolor='#fffcfc',
legend=dict(font=dict(size=2), orientation='h'),
mapbox=dict(
accesstoken=key,
style="open-street-map",
center=dict(
lon=center_long,
lat=center_lat,
),
zoom=10,
)
)
return map
def create_figure(df, city):
center_lat = sum(df.latitude) / len(df.latitude)
center_long = sum(df.longitude) / len(df.longitude)
layout_map = get_layout(center_lat, center_long)
figure = {
"data": [{
"type": "scattermapbox",
"lat": list(df.latitude),
"lon": list(df.longitude),
"hoverinfo": "text",
"hovertext": [["Neighborhood: {} Price: {} Rating: {} Beds: {} Bath:{}".format(i, j, k, n, m)]
for i, j, k, n, m in zip(df['neighbourhood'], df['price'], df['review_scores_rating'],
df['bedrooms'], df['bathrooms_text'],
)],
"mode": "markers",
"name": city,
"marker": {
"size": df['size'],
"opacity": 0.7,
"color": df['color'],
"color_discrete_map": {'yes': 'red', 'no': 'blue'},
"color_discrete_sequence": ['blue', 'red']
}
}],
"layout": layout_map
}
return figure
dir_value = 'united-states, tx, austin'
city_df, keys = load_listing(dir_value=dir_value, list_names=True)
for column in city_df.columns:
city_df[column] = city_df[column].fillna("Missing")
lat = city_df['latitude']
long = city_df['longitude']
n = len(lat)
center_lat = sum(lat) / n
center_long = sum(long) / n
clicks = {'clicks': [0]}
count_btn_press = pd.DataFrame(data=clicks)
count_btn_press.to_pickle('clicks.pkl')
cities = [x for x in keys]
# server = flask.Flask(__name__)
# app = Dash(__name__, external_stylesheets=external_stylesheets, meta_tags=meta_tags, server=server)
# app.title = "Airbnb Rental Price Predictor"
# app.config.suppress_callback_exceptions = True
room_type = city_df['room_type'].unique()
bath_options = city_df['bathrooms_text'].unique()
bed_options = city_df['beds'].unique()
city_df['color'] = 'red'
city_df['size'] = 5
warnings.filterwarnings("ignore")
sqlite3.connect('data.sqlite')
engine = create_engine('sqlite:///data.sqlite')
db = SQLAlchemy()
config = configparser.ConfigParser()
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True, nullable = False)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
Users_tbl = Table('users', Users.metadata)
def create_users_table():
Users.metadata.create_all(engine)
create_users_table()
db.init_app(server)
navbar = dbc.NavbarSimple(
brand='Airbnb Price Predictor',
brand_href='/',
children=[
dbc.NavItem(dcc.Link('Predictions', href='/predictions', className='nav-link')),
],
sticky='top',
color='primary',
light=False,
dark=True
)
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
navbar,
dbc.Container(id='page-content', className='mt-4'),
html.Hr(),
])
login_manager = LoginManager()
login_manager.init_app(server)
login_manager.login_view = '/login'
class Users(UserMixin, Users):
pass
column1 = dbc.Col(
[
dcc.Markdown(
"""
## This app will enable Airbnb Hosts decide what is the ideal price of their listings dependant on location,type of home, rooms and number of bathrooms.
"""
),
dcc.Link(dbc.Button('Check it out!', color='primary'), href='/create')
],
md=4,
)
column2 = dbc.Col(
[
html.Img(src='assets/airbnb-host.jpeg', className='img-fluid', style={'height': '350px'})
]
)
index = dbc.Row([column1, column2])
create = html.Div([html.H1('Create User Account')
, dcc.Location(id='create_user', refresh=True)
, dcc.Input(id="username"
, type="text"
, placeholder="<NAME>"
, maxLength =15)
, dcc.Input(id="password"
, type="password"
, placeholder="password")
, dcc.Input(id="email"
, type="email"
, placeholder="email"
, maxLength = 50)
, html.Button('Create User', id='submit-val', n_clicks=0)
, html.Div(id='container-button-basic')
])
login = html.Div([dcc.Location(id='url_login', refresh=True)
, html.H2('''Please log in to continue:''', id='h1')
, dcc.Input(placeholder='Enter your username',
type='text',
id='uname-box')
, dcc.Input(placeholder='Enter your password',
type='password',
id='pwd-box')
, html.Button(children='Login',
n_clicks=0,
type='submit',
id='login-button')
, html.Div(children='', id='output-state')
])
success = html.Div([dcc.Location(id='url_login_success', refresh=True)
, html.Div([html.H2('Login successful.')
, html.Br()
, html.P('Go to Predictor')
, dcc.Link(dbc.Button('Predict', color='primary'), href = '/predictions')
]) #end div
, html.Div([html.Br()
, html.Button(id='back-button', children='Go back', n_clicks=0)
]) #end div
]) #end div
failed = html.Div([dcc.Location(id='url_login_df', refresh=True)
, html.Div([html.H2('Log in Failed. Please try again.')
, html.Br()
, html.Div([login])
, html.Br()
, html.Button(id='back-button', children='Go back', n_clicks=0)
]) #end div
]) #end div
logout = html.Div([dcc.Location(id='logout', refresh=True)
, html.Br()
, html.Div(html.H2('You have been logged out - Please login'))
, html.Br()
, html.Div([login])
, html.Button(id='back-button', children='Go back', n_clicks=0)
])#end div
predictions = html.Div(children=[
html.Div(
html.H4(children="Select City:")
),
html.Div(children=[
dcc.Dropdown(id='city_dd',
options=[{'label': i, 'value': i} for i in cities],
value='united-states, tx, austin', placeholder='united-states, tx, austin',
style={'height': 50, 'width': 500, }),
dcc.Store(id='current_city', storage_type='session', data='Austin, TX'),
]),
html.Div(className='row',
children=[
html.Div(
dcc.Textarea(id='Static_listing_type_text',
value='Select Listing Type:',
className="three columns",
style={'height': 50, 'width': 200, "margin-left": "15px"},
disabled=True)
),
html.Div(
dcc.Dropdown(id='listing_dd',
options=[{'label': i, 'value': i} for i in room_type],
value=room_type[0], placeholder=room_type[0],
className="three columns",
style={'height': 50, 'width': 200, 'color': 'black'},
)
),
html.Div(
dcc.Textarea(id='Static_num_bathrooms_text',
value='Select # of bathrooms:',
className="twelve columns",
style={'height': 50, 'width': 175, "margin-left": "15px"},
disabled=True)
),
html.Div(
dcc.Dropdown(id='num_bathrooms_dd',
options=[{'label': i, 'value': i} for i in bath_options],
value='1 bath', placeholder='1 bath',
className="three columns",
style={'height': 50, 'width': 150, 'color': 'black'},
)
),
html.Div(
dcc.Textarea(id='Static_num_bedrooms_text',
value='Select # of Beds:',
className="three columns",
style={'height': 50, 'width': 175, "margin-left": "15px"},
disabled=True)
),
html.Div(
dcc.Dropdown(id='num_bedrooms_dd',
options=[{'label': i, 'value': i} for i in bed_options],
value='1', placeholder='1',
className="three columns",
style={'height': 50, 'width': 150, 'color': 'black'},
)
),
]
),
html.Div(className='row', children=[
html.Div(children=[
html.Button('Filter Listings for Selected Options', id='filter_button', n_clicks=0),
dcc.Store(id='session', storage_type='session', data=clicks),
])]
),
html.Div(className='row',
children=[
html.Div(
html.H6(children="Latitude:")
),
dcc.Input(id="lat_dd", placeholder=center_lat, type="number"),
html.Br(),
html.P(id="output")
]),
html.Div(className='row',
children=[
html.Div(
html.H6(children="Longitude:")
),
dcc.Input(id="long_dd", placeholder=center_long, type="number"),
html.Br(),
html.P(id="output")
]),
html.Div(className='row', children=[
html.Div(children=[
dcc.Graph(
id='MapPlot', figure=create_figure(city_df, 'Austin, TX')
)
]
),
html.Div(
dcc.Textarea(id='prediction-output',
value='Output',
className="two columns",
style={'height': 100, 'width': 300, "margin-left": "15px"},
disabled=True))
])
]
)
@login_manager.user_loader
def load_user(user_id):
return Users.query.get(int(user_id))
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/':
return index
elif pathname == '/create':
return create
elif pathname == '/login':
return login
elif pathname == '/success':
if current_user.is_authenticated:
return success
else:
return failed
elif pathname == '/predictions':
return predictions
elif pathname == '/logout':
if current_user.is_authenticated:
logout_user()
return logout
else:
return logout
else:
return '404'
@app.callback(
[Output('container-button-basic', "children")]
, [Input('submit-val', 'n_clicks')]
, [State('username', 'value'), State('password', 'value'), State('email', 'value')])
def insert_users(n_clicks, un, pw, em):
hashed_password = generate_password_hash(pw, method='sha256')
if un is not None and pw is not None and em is not None:
ins = Users_tbl.insert().values(username=un, password=hashed_password, email=em,)
conn = engine.connect()
conn.execute(ins)
conn.close()
return [login]
else:
return [html.Div([html.H2('Already have a user account?'), dcc.Link('Click here to Log In', href='/login')])]
@app.callback(
Output('url_login', 'pathname')
, [Input('login-button', 'n_clicks')]
, [State('uname-box', 'value'), State('pwd-box', 'value')])
def successful(n_clicks, input1, input2):
user = Users.query.filter_by(username=input1).first()
if user:
if check_password_hash(user.password, input2):
login_user(user)
return '/success'
else:
pass
else:
pass
@app.callback(
Output('output-state', 'children')
, [Input('login-button', 'n_clicks')]
, [State('uname-box', 'value'), State('pwd-box', 'value')])
def update_output(n_clicks, input1, input2):
if n_clicks > 0:
user = Users.query.filter_by(username=input1).first()
if user:
if check_password_hash(user.password, input2):
return ''
else:
return 'Incorrect username or password'
else:
return 'Incorrect username or password'
else:
return ''
@app.callback(
Output('prediction-output', 'value'),
Output('MapPlot', 'figure'),
[Input('city_dd', 'value'),
Input('num_bedrooms_dd', 'value'),
Input('num_bathrooms_dd', 'value'),
Input('listing_dd', 'value'),
Input('lat_dd', 'value'),
Input('long_dd', 'value'),
]
)
def predict_price(city_dd, num_bedrooms_dd, num_bathrooms_dd, listing_dd, lat_dd, long_dd):
df_predict = pd.DataFrame(
columns=['bedrooms', 'bathrooms_text', 'room_type', 'latitude', 'longitude'],
data=[[num_bedrooms_dd, num_bathrooms_dd, listing_dd, lat_dd, long_dd]])
new = load_listing(dir_value=city_dd)
new['color'] = 'red'
new['size'] = 5
new1 = new[['bedrooms', 'bathrooms_text', 'room_type', 'price', 'latitude', 'longitude']]
shared, private = bathroom_text_encoder(df_predict)
df_predict['shared_bathrooms'] = shared
df_predict['private_bathrooms'] = private
df_predict.drop(columns=['bathrooms_text'], inplace=True)
new1 = new1.replace("Missing", None)
pipe, oh, stand, simp, kneigh = pipeline_model(new1,
cols_to_keep=['bathrooms_text', 'bedrooms', 'room_type',
'price', 'latitude', 'longitude'])
one = oh.transform(df_predict)
two = stand.transform(one)
three = simp.transform(two)
four = kneigh.kneighbors(three, n_neighbors=20)
y_pred = pipe.predict(df_predict)[0]
near_neighbors = four[1]
value = f'${y_pred} is the optimal rental price for the property'
filter_df = new.copy()
filter_df['bedrooms'] = filter_df['bedrooms'].astype('float')
filter_df = filter_df.loc[filter_df['bathrooms_text'] == num_bathrooms_dd]
filter_df = filter_df.loc[filter_df['bedrooms'] >= float(num_bedrooms_dd)]
filter_df = filter_df.loc[filter_df['room_type'] == listing_dd]
for x in near_neighbors:
beta = new.loc[x]
beta['color'] = 'blue'
beta['size'] = 20
final_df = pd.concat([filter_df, beta])
figure = create_figure(final_df, city_dd)
return value, figure
if __name__ == "__main__":
app.run_server(debug=True, threaded=False, processes=2) | en | 0.677626 | # Bootswatch theme # for social media icons # see https://dash.plot.ly/urls # appears in browser title bar # server = flask.Flask(__name__) # app = Dash(__name__, external_stylesheets=external_stylesheets, meta_tags=meta_tags, server=server) # app.title = "Airbnb Rental Price Predictor" # app.config.suppress_callback_exceptions = True ## This app will enable Airbnb Hosts decide what is the ideal price of their listings dependant on location,type of home, rooms and number of bathrooms. Please log in to continue: #end div #end div #end div #end div #end div #end div # of bathrooms:', # of Beds:', | 1.823377 | 2 |
character/__init__.py | ahollyer/unicorn-rpg | 0 | 6614404 | <gh_stars>0
__all__ = ["base", "enemies", "hero"]
| __all__ = ["base", "enemies", "hero"] | none | 1 | 1.106818 | 1 | |
docs/ext/autodoc_skip_protocols.py | ScreenPyHQ/screenpy_requests | 1 | 6614405 | PROTOCOL_METHODS = [
"act",
"add_to_chain",
"answered_by",
"aside",
"attach",
"beat",
"describe",
"error",
"forget",
"perform_as",
"scene",
]
def autodoc_skip_member(_, what, name, ____, skip, options):
if what != "class":
return skip
return skip or (name in PROTOCOL_METHODS and not options.undoc_members)
def setup(app):
app.connect(event="autodoc-skip-member", callback=autodoc_skip_member)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| PROTOCOL_METHODS = [
"act",
"add_to_chain",
"answered_by",
"aside",
"attach",
"beat",
"describe",
"error",
"forget",
"perform_as",
"scene",
]
def autodoc_skip_member(_, what, name, ____, skip, options):
if what != "class":
return skip
return skip or (name in PROTOCOL_METHODS and not options.undoc_members)
def setup(app):
app.connect(event="autodoc-skip-member", callback=autodoc_skip_member)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| none | 1 | 2.121988 | 2 | |
test/test_cloudwatch_writer.py | yimuniao/collectd-cloudwatch | 220 | 6614406 | import unittest
import cloudwatch_writer as plugin
from mock import patch, Mock, MagicMock
class CloudWatchWriter(unittest.TestCase):
def setUp(self):
plugin._LOGGER = MagicMock()
plugin._LOGGER.error = Mock()
plugin._LOGGER.info = Mock()
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_initialize_plugin_without_exceptions(self, config_helper, flusher):
config_helper.return_value = MagicMock()
flusher.return_value = MagicMock()
plugin.aws_init()
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_initialize_plugin_with_flusher_exception(self, config_helper, flusher):
config_helper.return_value = MagicMock()
flusher.side_effect = Exception("Cannot initialize flusher.")
plugin.aws_init()
self.assertTrue(plugin._LOGGER.error.called)
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_initialize_plugin_with_config_IO_error(self, config_helper, flusher):
config_helper.side_effect = IOError("Cannot load configuration file.")
flusher.side_effect = MagicMock()
plugin.aws_init()
self.assertTrue(plugin._LOGGER.error.called)
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_initialize_plugin_with_config_value_error(self, config_helper, flusher):
config_helper.side_effect = ValueError("Inconsistent configuration detected.")
flusher.side_effect = MagicMock()
plugin.aws_init()
self.assertTrue(plugin._LOGGER.error.called)
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_write_passes_vl_to_flusher(self, config_helper, flusher_class):
config_helper.return_value = MagicMock()
flusher = MagicMock()
flusher.add_metric = Mock()
flusher_class.return_value = flusher
plugin.aws_init()
vl = MagicMock()
plugin.aws_write(vl, flusher)
flusher.add_metric.assert_called_with(vl) | import unittest
import cloudwatch_writer as plugin
from mock import patch, Mock, MagicMock
class CloudWatchWriter(unittest.TestCase):
def setUp(self):
plugin._LOGGER = MagicMock()
plugin._LOGGER.error = Mock()
plugin._LOGGER.info = Mock()
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_initialize_plugin_without_exceptions(self, config_helper, flusher):
config_helper.return_value = MagicMock()
flusher.return_value = MagicMock()
plugin.aws_init()
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_initialize_plugin_with_flusher_exception(self, config_helper, flusher):
config_helper.return_value = MagicMock()
flusher.side_effect = Exception("Cannot initialize flusher.")
plugin.aws_init()
self.assertTrue(plugin._LOGGER.error.called)
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_initialize_plugin_with_config_IO_error(self, config_helper, flusher):
config_helper.side_effect = IOError("Cannot load configuration file.")
flusher.side_effect = MagicMock()
plugin.aws_init()
self.assertTrue(plugin._LOGGER.error.called)
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_initialize_plugin_with_config_value_error(self, config_helper, flusher):
config_helper.side_effect = ValueError("Inconsistent configuration detected.")
flusher.side_effect = MagicMock()
plugin.aws_init()
self.assertTrue(plugin._LOGGER.error.called)
@patch("cloudwatch_writer.ConfigHelper")
@patch("cloudwatch_writer.Flusher")
def test_write_passes_vl_to_flusher(self, config_helper, flusher_class):
config_helper.return_value = MagicMock()
flusher = MagicMock()
flusher.add_metric = Mock()
flusher_class.return_value = flusher
plugin.aws_init()
vl = MagicMock()
plugin.aws_write(vl, flusher)
flusher.add_metric.assert_called_with(vl) | none | 1 | 2.724988 | 3 | |
scripts/lda.py | pasta41/decameron | 0 | 6614407 | <gh_stars>0
from collections import defaultdict
from datetime import datetime
import math
from operator import itemgetter
import os
import random
import re
import numpy as np
import pandas as pd
import little_mallet_wrapper as lmw
import pdb
import constants
import sys
path_to_mallet = "~/mallet-2.0.8/bin/mallet"
decameron_path = "/home/cooper/src/decameron/data/csv/decameron.csv"
decameron_df = pd.read_csv(decameron_path)
training_data = []
story_ids = decameron_df["ID"].tolist()
chunk_size = 100
# TODO refactor
def chunk_story(story_id, n):
story = decameron_df[decameron_df['ID']==story_id]
text = story['Text'].item()
# there has to be a better way to do this, but whatever
words = text.split()
# rejoin words every n words, put into list
story_chunks = [" ".join(words[i:i+n]) for i in range(0, len(words), n)]
return story_chunks
if len(sys.argv) == 2 and sys.argv[1] == 'chunk':
training_data = []
for story_id in story_ids:
training_data.extend(chunk_story(story_id, chunk_size))
training_data = [lmw.process_string(t,
stop_words=constants.stop_words) for t in training_data]
training_data = [d for d in training_data if d.strip()]
else:
training_data = [lmw.process_string(t,
stop_words=constants.stop_words) for t in decameron_df['Text'].tolist()]
training_data = [d for d in training_data if d.strip()]
lmw.print_dataset_stats(training_data)
num_topics = 20
output_directory_path = '/home/cooper/src/decameron/output'
topic_keys, topic_distributions = lmw.quick_train_topic_model(path_to_mallet,
output_directory_path,
num_topics,
training_data)
assert(len(topic_distributions) == len(training_data))
for i, t in enumerate(topic_keys):
print(i, '\t', ' '.join(t[:10]))
| from collections import defaultdict
from datetime import datetime
import math
from operator import itemgetter
import os
import random
import re
import numpy as np
import pandas as pd
import little_mallet_wrapper as lmw
import pdb
import constants
import sys
path_to_mallet = "~/mallet-2.0.8/bin/mallet"
decameron_path = "/home/cooper/src/decameron/data/csv/decameron.csv"
decameron_df = pd.read_csv(decameron_path)
training_data = []
story_ids = decameron_df["ID"].tolist()
chunk_size = 100
# TODO refactor
def chunk_story(story_id, n):
story = decameron_df[decameron_df['ID']==story_id]
text = story['Text'].item()
# there has to be a better way to do this, but whatever
words = text.split()
# rejoin words every n words, put into list
story_chunks = [" ".join(words[i:i+n]) for i in range(0, len(words), n)]
return story_chunks
if len(sys.argv) == 2 and sys.argv[1] == 'chunk':
training_data = []
for story_id in story_ids:
training_data.extend(chunk_story(story_id, chunk_size))
training_data = [lmw.process_string(t,
stop_words=constants.stop_words) for t in training_data]
training_data = [d for d in training_data if d.strip()]
else:
training_data = [lmw.process_string(t,
stop_words=constants.stop_words) for t in decameron_df['Text'].tolist()]
training_data = [d for d in training_data if d.strip()]
lmw.print_dataset_stats(training_data)
num_topics = 20
output_directory_path = '/home/cooper/src/decameron/output'
topic_keys, topic_distributions = lmw.quick_train_topic_model(path_to_mallet,
output_directory_path,
num_topics,
training_data)
assert(len(topic_distributions) == len(training_data))
for i, t in enumerate(topic_keys):
print(i, '\t', ' '.join(t[:10])) | en | 0.959004 | # TODO refactor # there has to be a better way to do this, but whatever # rejoin words every n words, put into list | 2.235518 | 2 |
tests/cli/test_submit.py | Monia234/NCI-GwasQc | 0 | 6614408 | import pytest
from pytest_mock import MockerFixture
from cgr_gwas_qc.testing import chdir
from cgr_gwas_qc.testing.data import FakeData
@pytest.mark.parametrize(
"cgems,biowulf,cluster_profile",
[(True, False, None), (False, True, None), (False, False, "./test")],
)
def test_check_exclusive_options_no_error(cgems, biowulf, cluster_profile):
from cgr_gwas_qc.cli.submit import check_exclusive_options
check_exclusive_options(cgems, biowulf, cluster_profile)
@pytest.mark.parametrize(
"cgems,biowulf,cluster_profile",
[(True, True, "./test"), (True, True, None), (True, False, "./test"), (False, True, "./test")],
)
def test_check_exclusive_options_raises_error(cgems, biowulf, cluster_profile):
from click.exceptions import Exit as ClickExit
from cgr_gwas_qc.cli.submit import check_exclusive_options
with pytest.raises(ClickExit):
check_exclusive_options(cgems, biowulf, cluster_profile)
@pytest.mark.parametrize("cluster", ["cgems", "biowulf"])
def test_get_profile(cluster):
from pathlib import Path
from cgr_gwas_qc.cli.submit import get_profile
profile = Path(get_profile(cluster))
assert profile.exists() & profile.is_dir()
def test_check_custom_cluster_profile(tmp_path):
from cgr_gwas_qc.cli.submit import check_custom_cluster_profile
cluster_profile = tmp_path / "test_present"
cluster_profile.mkdir()
queue = "all"
submission_cmd = "qsub"
assert cluster_profile.resolve().as_posix() == check_custom_cluster_profile(
cluster_profile, queue, submission_cmd
)
def test_check_custom_cluster_profile_no_profile(tmp_path):
from cgr_gwas_qc.cli.submit import check_custom_cluster_profile
# Missing profile directory
with pytest.raises(ValueError):
cluster_profile = tmp_path / "test_present"
queue, cmd = "all", "qsub"
check_custom_cluster_profile(cluster_profile, queue, cmd)
def test_check_custom_cluster_profile_no_queue(tmp_path):
from cgr_gwas_qc.cli.submit import check_custom_cluster_profile
# Missing profile directory
with pytest.raises(ValueError):
cluster_profile = tmp_path / "test_present"
cluster_profile.mkdir()
queue, cmd = None, "qsub"
check_custom_cluster_profile(cluster_profile, queue, cmd)
def test_check_custom_cluster_profile_no_cmd(tmp_path):
from cgr_gwas_qc.cli.submit import check_custom_cluster_profile
# Missing profile directory
with pytest.raises(ValueError):
cluster_profile = tmp_path / "test_present"
cluster_profile.mkdir()
queue, cmd = "all", None
check_custom_cluster_profile(cluster_profile, queue, cmd)
def test_create_submission_script_cgems(tmp_path):
import os
from cgr_gwas_qc.cli.submit import create_submission_script
with chdir(tmp_path):
payload = {
"python_executable": "python",
"working_dir": os.getcwd(),
"cgems": True,
"biowulf": False,
"time_h": 12,
"queue": "all.q",
"profile": "test_profile",
"local_tasks": 1,
"local_mem_mb": 500,
"group_options": "",
}
create_submission_script(payload)
assert (
"#$ -N GwasQcPipeline" in (tmp_path / ".snakemake/GwasQcPipeline_submission.sh").read_text()
)
def test_create_submission_script_biowulf(tmp_path):
import os
from cgr_gwas_qc.cli.submit import create_submission_script
with chdir(tmp_path):
payload = {
"python_executable": "python",
"working_dir": os.getcwd(),
"cgems": False,
"biowulf": True,
"time_h": 12,
"queue": "all.q",
"profile": "test_profile",
"group_options": "",
}
create_submission_script(payload)
assert (
'#SBATCH --job-name="GwasQcPipeline"'
in (tmp_path / ".snakemake/GwasQcPipeline_submission.sh").read_text()
)
@pytest.mark.parametrize(
"cluster,cmd", [("cgems", "qsub"), ("biowulf", "sbatch"), ("custom", "pbs")]
)
def test_run_submit_with_right_command(cluster, cmd, tmp_path, mocker: MockerFixture):
from cgr_gwas_qc.cli import submit
if cluster == "cgems":
cgems, biowulf, cluster_profile, queue, submission_cmd = True, False, None, None, None
elif cluster == "biowulf": # Biowulf
cgems, biowulf, cluster_profile, queue, submission_cmd = False, True, None, None, None
else:
profile_dir = tmp_path / "test_profile"
profile_dir.mkdir()
cgems, biowulf, cluster_profile, queue, submission_cmd = (
False,
False,
profile_dir,
"all",
cmd,
)
spy = mocker.patch("cgr_gwas_qc.cli.submit.sp.check_output")
FakeData(tmp_path).make_cgr_sample_sheet().make_config()
with chdir(tmp_path):
submit.main(
cgems=cgems,
biowulf=biowulf,
cluster_profile=cluster_profile,
subworkflow=None,
time_hr=12,
queue=queue,
submission_cmd=submission_cmd,
dry_run=False,
notemp=False,
local_mem_mb=1024,
local_tasks=1,
)
spy.assert_called_once_with([cmd, ".snakemake/GwasQcPipeline_submission.sh"])
| import pytest
from pytest_mock import MockerFixture
from cgr_gwas_qc.testing import chdir
from cgr_gwas_qc.testing.data import FakeData
@pytest.mark.parametrize(
"cgems,biowulf,cluster_profile",
[(True, False, None), (False, True, None), (False, False, "./test")],
)
def test_check_exclusive_options_no_error(cgems, biowulf, cluster_profile):
from cgr_gwas_qc.cli.submit import check_exclusive_options
check_exclusive_options(cgems, biowulf, cluster_profile)
@pytest.mark.parametrize(
"cgems,biowulf,cluster_profile",
[(True, True, "./test"), (True, True, None), (True, False, "./test"), (False, True, "./test")],
)
def test_check_exclusive_options_raises_error(cgems, biowulf, cluster_profile):
from click.exceptions import Exit as ClickExit
from cgr_gwas_qc.cli.submit import check_exclusive_options
with pytest.raises(ClickExit):
check_exclusive_options(cgems, biowulf, cluster_profile)
@pytest.mark.parametrize("cluster", ["cgems", "biowulf"])
def test_get_profile(cluster):
from pathlib import Path
from cgr_gwas_qc.cli.submit import get_profile
profile = Path(get_profile(cluster))
assert profile.exists() & profile.is_dir()
def test_check_custom_cluster_profile(tmp_path):
from cgr_gwas_qc.cli.submit import check_custom_cluster_profile
cluster_profile = tmp_path / "test_present"
cluster_profile.mkdir()
queue = "all"
submission_cmd = "qsub"
assert cluster_profile.resolve().as_posix() == check_custom_cluster_profile(
cluster_profile, queue, submission_cmd
)
def test_check_custom_cluster_profile_no_profile(tmp_path):
from cgr_gwas_qc.cli.submit import check_custom_cluster_profile
# Missing profile directory
with pytest.raises(ValueError):
cluster_profile = tmp_path / "test_present"
queue, cmd = "all", "qsub"
check_custom_cluster_profile(cluster_profile, queue, cmd)
def test_check_custom_cluster_profile_no_queue(tmp_path):
from cgr_gwas_qc.cli.submit import check_custom_cluster_profile
# Missing profile directory
with pytest.raises(ValueError):
cluster_profile = tmp_path / "test_present"
cluster_profile.mkdir()
queue, cmd = None, "qsub"
check_custom_cluster_profile(cluster_profile, queue, cmd)
def test_check_custom_cluster_profile_no_cmd(tmp_path):
from cgr_gwas_qc.cli.submit import check_custom_cluster_profile
# Missing profile directory
with pytest.raises(ValueError):
cluster_profile = tmp_path / "test_present"
cluster_profile.mkdir()
queue, cmd = "all", None
check_custom_cluster_profile(cluster_profile, queue, cmd)
def test_create_submission_script_cgems(tmp_path):
import os
from cgr_gwas_qc.cli.submit import create_submission_script
with chdir(tmp_path):
payload = {
"python_executable": "python",
"working_dir": os.getcwd(),
"cgems": True,
"biowulf": False,
"time_h": 12,
"queue": "all.q",
"profile": "test_profile",
"local_tasks": 1,
"local_mem_mb": 500,
"group_options": "",
}
create_submission_script(payload)
assert (
"#$ -N GwasQcPipeline" in (tmp_path / ".snakemake/GwasQcPipeline_submission.sh").read_text()
)
def test_create_submission_script_biowulf(tmp_path):
import os
from cgr_gwas_qc.cli.submit import create_submission_script
with chdir(tmp_path):
payload = {
"python_executable": "python",
"working_dir": os.getcwd(),
"cgems": False,
"biowulf": True,
"time_h": 12,
"queue": "all.q",
"profile": "test_profile",
"group_options": "",
}
create_submission_script(payload)
assert (
'#SBATCH --job-name="GwasQcPipeline"'
in (tmp_path / ".snakemake/GwasQcPipeline_submission.sh").read_text()
)
@pytest.mark.parametrize(
"cluster,cmd", [("cgems", "qsub"), ("biowulf", "sbatch"), ("custom", "pbs")]
)
def test_run_submit_with_right_command(cluster, cmd, tmp_path, mocker: MockerFixture):
from cgr_gwas_qc.cli import submit
if cluster == "cgems":
cgems, biowulf, cluster_profile, queue, submission_cmd = True, False, None, None, None
elif cluster == "biowulf": # Biowulf
cgems, biowulf, cluster_profile, queue, submission_cmd = False, True, None, None, None
else:
profile_dir = tmp_path / "test_profile"
profile_dir.mkdir()
cgems, biowulf, cluster_profile, queue, submission_cmd = (
False,
False,
profile_dir,
"all",
cmd,
)
spy = mocker.patch("cgr_gwas_qc.cli.submit.sp.check_output")
FakeData(tmp_path).make_cgr_sample_sheet().make_config()
with chdir(tmp_path):
submit.main(
cgems=cgems,
biowulf=biowulf,
cluster_profile=cluster_profile,
subworkflow=None,
time_hr=12,
queue=queue,
submission_cmd=submission_cmd,
dry_run=False,
notemp=False,
local_mem_mb=1024,
local_tasks=1,
)
spy.assert_called_once_with([cmd, ".snakemake/GwasQcPipeline_submission.sh"])
| en | 0.550776 | # Missing profile directory # Missing profile directory # Missing profile directory # Biowulf | 2.02904 | 2 |
migrations/versions/0713a93f9308_.py | Zepp333333/Cyclist-Performance | 0 | 6614409 | <gh_stars>0
"""empty message
Revision ID: 0713a93f9308
Revises: 1382008cc690
Create Date: 2021-08-06 16:09:10.385285
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0713a93f9308'
down_revision = '1382008cc690'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| """empty message
Revision ID: 0713a93f9308
Revises: 1382008cc690
Create Date: 2021-08-06 16:09:10.385285
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0713a93f9308'
down_revision = '1382008cc690'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass | en | 0.409914 | empty message Revision ID: 0713a93f9308 Revises: 1382008cc690 Create Date: 2021-08-06 16:09:10.385285 # revision identifiers, used by Alembic. | 1.044225 | 1 |
rlscope/scripts/rlscope_plot_index.py | UofT-EcoSystem/rlscope | 35 | 6614410 | """
Import index of ``*.venn_js.json`` files needed for multi-process visualization.
rlscope_plot_index_data.py is generated by
:py:mod:`rlscope.scripts.generate_rlscope_plot_index`.`
"""
import rlscope_plot_index_data
from rlscope.parser.plot_index import _DataIndex
DataIndex = _DataIndex(rlscope_plot_index_data.INDEX, rlscope_plot_index_data.DIRECTORY)
| """
Import index of ``*.venn_js.json`` files needed for multi-process visualization.
rlscope_plot_index_data.py is generated by
:py:mod:`rlscope.scripts.generate_rlscope_plot_index`.`
"""
import rlscope_plot_index_data
from rlscope.parser.plot_index import _DataIndex
DataIndex = _DataIndex(rlscope_plot_index_data.INDEX, rlscope_plot_index_data.DIRECTORY)
| en | 0.567441 | Import index of ``*.venn_js.json`` files needed for multi-process visualization. rlscope_plot_index_data.py is generated by :py:mod:`rlscope.scripts.generate_rlscope_plot_index`.` | 1.333584 | 1 |
scripts/dynamo_import.py | jaytmiller/calcloud | 7 | 6614411 | <filename>scripts/dynamo_import.py
import json
import boto3
import argparse
import csv
from decimal import Decimal
import sys
s3 = boto3.resource("s3")
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
def format_row_item(row):
row["timestamp"] = int(row["timestamp"])
row["x_files"] = float(row["x_files"])
row["x_size"] = float(row["x_size"])
row["drizcorr"] = int(row["drizcorr"])
row["pctecorr"] = int(row["pctecorr"])
row["crsplit"] = int(row["crsplit"])
row["subarray"] = int(row["subarray"])
row["detector"] = int(row["detector"])
row["dtype"] = int(row["dtype"])
row["instr"] = int(row["instr"])
row["wallclock"] = float(row["wallclock"])
row["memory"] = float(row["memory"])
row["mem_bin"] = float(row["mem_bin"])
row["n_files"] = float(row["n_files"])
row["total_mb"] = float(row["total_mb"])
row["bin_pred"] = float(row["bin_pred"])
row["mem_pred"] = float(row["mem_pred"])
row["wall_pred"] = float(row["wall_pred"])
row["wc_mean"] = float(row["wc_mean"])
row["wc_std"] = float(row["wc_std"])
row["wc_err"] = float(row["wc_err"])
return json.loads(json.dumps(row, allow_nan=True), parse_int=Decimal, parse_float=Decimal)
def write_to_dynamo(rows, table_name):
try:
table = dynamodb.Table(table_name)
except Exception as e:
print("Error loading DynamoDB table. Check if table was created correctly and environment variable.")
print(e)
try:
with table.batch_writer() as batch:
for i in range(len(rows)):
batch.put_item(Item=rows[i])
except:
import traceback
traceback.print_exc()
sys.exit()
def main(key, table_name):
input_file = csv.DictReader(open(key))
batch_size = 100
batch = []
for row in input_file:
try:
item = format_row_item(row)
except Exception as e:
import traceback
traceback.print_exc()
print(e)
print(row)
import sys
sys.exit()
if len(batch) >= batch_size:
write_to_dynamo(batch, table_name)
batch.clear()
print("Batch uploaded.")
batch.append(item)
if batch:
write_to_dynamo(batch, table_name)
return {"statusCode": 200, "body": json.dumps("Uploaded to DynamoDB Table")}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--table", type=str, default="calcloud-model-sb", help="ddb table")
parser.add_argument("-k", "--key", type=str, default="latest.csv", help="local csv filepath")
args = parser.parse_args()
table_name = args.table
key = args.key
main(key, table_name)
| <filename>scripts/dynamo_import.py
import json
import boto3
import argparse
import csv
from decimal import Decimal
import sys
s3 = boto3.resource("s3")
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
def format_row_item(row):
row["timestamp"] = int(row["timestamp"])
row["x_files"] = float(row["x_files"])
row["x_size"] = float(row["x_size"])
row["drizcorr"] = int(row["drizcorr"])
row["pctecorr"] = int(row["pctecorr"])
row["crsplit"] = int(row["crsplit"])
row["subarray"] = int(row["subarray"])
row["detector"] = int(row["detector"])
row["dtype"] = int(row["dtype"])
row["instr"] = int(row["instr"])
row["wallclock"] = float(row["wallclock"])
row["memory"] = float(row["memory"])
row["mem_bin"] = float(row["mem_bin"])
row["n_files"] = float(row["n_files"])
row["total_mb"] = float(row["total_mb"])
row["bin_pred"] = float(row["bin_pred"])
row["mem_pred"] = float(row["mem_pred"])
row["wall_pred"] = float(row["wall_pred"])
row["wc_mean"] = float(row["wc_mean"])
row["wc_std"] = float(row["wc_std"])
row["wc_err"] = float(row["wc_err"])
return json.loads(json.dumps(row, allow_nan=True), parse_int=Decimal, parse_float=Decimal)
def write_to_dynamo(rows, table_name):
try:
table = dynamodb.Table(table_name)
except Exception as e:
print("Error loading DynamoDB table. Check if table was created correctly and environment variable.")
print(e)
try:
with table.batch_writer() as batch:
for i in range(len(rows)):
batch.put_item(Item=rows[i])
except:
import traceback
traceback.print_exc()
sys.exit()
def main(key, table_name):
input_file = csv.DictReader(open(key))
batch_size = 100
batch = []
for row in input_file:
try:
item = format_row_item(row)
except Exception as e:
import traceback
traceback.print_exc()
print(e)
print(row)
import sys
sys.exit()
if len(batch) >= batch_size:
write_to_dynamo(batch, table_name)
batch.clear()
print("Batch uploaded.")
batch.append(item)
if batch:
write_to_dynamo(batch, table_name)
return {"statusCode": 200, "body": json.dumps("Uploaded to DynamoDB Table")}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--table", type=str, default="calcloud-model-sb", help="ddb table")
parser.add_argument("-k", "--key", type=str, default="latest.csv", help="local csv filepath")
args = parser.parse_args()
table_name = args.table
key = args.key
main(key, table_name)
| none | 1 | 2.162166 | 2 | |
MIDI Remote Scripts/Push2/chain_selection_component.py | aarkwright/ableton_devices | 0 | 6614412 | <gh_stars>0
# uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\chain_selection_component.py
# Compiled at: 2019-04-23 16:19:13
from __future__ import absolute_import, print_function, unicode_literals
from itertools import count
from ableton.v2.base import listens, listens_group, liveobj_valid
from ableton.v2.control_surface.components import ItemProvider
from .colors import DISPLAY_BUTTON_SHADE_LEVEL, IndexedColor
from .item_lister import ItemListerComponent
class ChainProvider(ItemProvider):
def __init__(self, *a, **k):
super(ChainProvider, self).__init__(*a, **k)
self._rack = None
return
def set_rack(self, rack):
if rack != self._rack:
rack_view = rack.view if rack else None
self._rack = rack
self.__on_chains_changed.subject = rack
self.__on_selected_chain_changed.subject = rack_view
self.notify_items()
self.notify_selected_item()
return
@property
def items(self):
chains = self._rack.chains if liveobj_valid(self._rack) else []
return [ (chain, 0) for chain in chains ]
@property
def chains(self):
if liveobj_valid(self._rack):
return self._rack.chains
return []
@property
def selected_item(self):
if liveobj_valid(self._rack):
return self._rack.view.selected_chain
else:
return
def select_chain(self, chain):
self._rack.view.selected_chain = chain
@listens(b'chains')
def __on_chains_changed(self):
self.notify_items()
@listens(b'selected_chain')
def __on_selected_chain_changed(self):
self.notify_selected_item()
class ChainSelectionComponent(ItemListerComponent):
def __init__(self, *a, **k):
self._chain_parent = ChainProvider()
super(ChainSelectionComponent, self).__init__(item_provider=self._chain_parent, *a, **k)
self.register_disconnectable(self._chain_parent)
self.__on_items_changed.subject = self
self.__on_items_changed()
def _on_select_button_pressed(self, button):
self._chain_parent.select_chain(self.items[button.index].item)
def _color_for_button(self, button_index, is_selected):
if is_selected:
return self.color_class_name + b'.ItemSelected'
else:
chain_color = self._chain_parent.chains[button_index].color_index
return IndexedColor.from_live_index(chain_color, DISPLAY_BUTTON_SHADE_LEVEL)
def set_parent(self, parent):
assert parent is None or parent.can_have_chains
self._chain_parent.set_rack(parent)
return
@listens(b'items')
def __on_items_changed(self):
self.__on_chain_color_index_changed.replace_subjects(self._chain_parent.chains, identifiers=count())
@listens_group(b'color_index')
def __on_chain_color_index_changed(self, chain_index):
self.select_buttons[chain_index].color = self._color_for_button(chain_index, self._items_equal(self.items[chain_index], self._item_provider.selected_item)) | # uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\chain_selection_component.py
# Compiled at: 2019-04-23 16:19:13
from __future__ import absolute_import, print_function, unicode_literals
from itertools import count
from ableton.v2.base import listens, listens_group, liveobj_valid
from ableton.v2.control_surface.components import ItemProvider
from .colors import DISPLAY_BUTTON_SHADE_LEVEL, IndexedColor
from .item_lister import ItemListerComponent
class ChainProvider(ItemProvider):
def __init__(self, *a, **k):
super(ChainProvider, self).__init__(*a, **k)
self._rack = None
return
def set_rack(self, rack):
if rack != self._rack:
rack_view = rack.view if rack else None
self._rack = rack
self.__on_chains_changed.subject = rack
self.__on_selected_chain_changed.subject = rack_view
self.notify_items()
self.notify_selected_item()
return
@property
def items(self):
chains = self._rack.chains if liveobj_valid(self._rack) else []
return [ (chain, 0) for chain in chains ]
@property
def chains(self):
if liveobj_valid(self._rack):
return self._rack.chains
return []
@property
def selected_item(self):
if liveobj_valid(self._rack):
return self._rack.view.selected_chain
else:
return
def select_chain(self, chain):
self._rack.view.selected_chain = chain
@listens(b'chains')
def __on_chains_changed(self):
self.notify_items()
@listens(b'selected_chain')
def __on_selected_chain_changed(self):
self.notify_selected_item()
class ChainSelectionComponent(ItemListerComponent):
def __init__(self, *a, **k):
self._chain_parent = ChainProvider()
super(ChainSelectionComponent, self).__init__(item_provider=self._chain_parent, *a, **k)
self.register_disconnectable(self._chain_parent)
self.__on_items_changed.subject = self
self.__on_items_changed()
def _on_select_button_pressed(self, button):
self._chain_parent.select_chain(self.items[button.index].item)
def _color_for_button(self, button_index, is_selected):
if is_selected:
return self.color_class_name + b'.ItemSelected'
else:
chain_color = self._chain_parent.chains[button_index].color_index
return IndexedColor.from_live_index(chain_color, DISPLAY_BUTTON_SHADE_LEVEL)
def set_parent(self, parent):
assert parent is None or parent.can_have_chains
self._chain_parent.set_rack(parent)
return
@listens(b'items')
def __on_items_changed(self):
self.__on_chain_color_index_changed.replace_subjects(self._chain_parent.chains, identifiers=count())
@listens_group(b'color_index')
def __on_chain_color_index_changed(self, chain_index):
self.select_buttons[chain_index].color = self._color_for_button(chain_index, self._items_equal(self.items[chain_index], self._item_provider.selected_item)) | en | 0.433679 | # uncompyle6 version 3.3.5 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)] # Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\chain_selection_component.py # Compiled at: 2019-04-23 16:19:13 | 2.01295 | 2 |
ingest/datasets/sample/DataReader.py | ivmfnal/striped | 1 | 6614413 | <filename>ingest/datasets/sample/DataReader.py<gh_stars>1-10
import numpy as np, re, yaml
from striped.ingestion import UprootArray, UprootArrayBase, BaseDataReader
from striped.common import Tracer
def stripeAttr(groups, array):
i = 0
for n in groups:
yield array[i:i+n]
i += n
def stripeBranchAttr(groups, counts, array):
i = 0
j = 0
for n in groups:
m = sum(counts[i:i+n])
yield array[j:j+m]
j += m
i += n
class DataReader(BaseDataReader):
def __init__(self, file_path, schema):
self.Schema = schema
self.T = Tracer()
self.Config = yaml.load(open(file_path, "r"))
self.NEvents = self.Config["NEvents"]
self.NBPerEvent = self.Config["NBPerEvent"]
def profile(self):
return None
def reopen(self):
pass
def nevents(self):
return self.NEvents
def branchSizeArray(self, bname):
return np.repeat(self.NBPerEvent[bname], self.NEvents)
def stripesAndSizes(self, groups, bname, attr_name, attr_desc):
dtype = attr_desc["dtype"]
n_per_event = 1 if bname is None else self.NBPerEvent[bname]
for g in groups:
arr = np.asarray(np.random.random((n_per_event*g,))*10.0, dtype=dtype)
print bname, attr_name #, arr
yield arr, None
| <filename>ingest/datasets/sample/DataReader.py<gh_stars>1-10
import numpy as np, re, yaml
from striped.ingestion import UprootArray, UprootArrayBase, BaseDataReader
from striped.common import Tracer
def stripeAttr(groups, array):
i = 0
for n in groups:
yield array[i:i+n]
i += n
def stripeBranchAttr(groups, counts, array):
i = 0
j = 0
for n in groups:
m = sum(counts[i:i+n])
yield array[j:j+m]
j += m
i += n
class DataReader(BaseDataReader):
def __init__(self, file_path, schema):
self.Schema = schema
self.T = Tracer()
self.Config = yaml.load(open(file_path, "r"))
self.NEvents = self.Config["NEvents"]
self.NBPerEvent = self.Config["NBPerEvent"]
def profile(self):
return None
def reopen(self):
pass
def nevents(self):
return self.NEvents
def branchSizeArray(self, bname):
return np.repeat(self.NBPerEvent[bname], self.NEvents)
def stripesAndSizes(self, groups, bname, attr_name, attr_desc):
dtype = attr_desc["dtype"]
n_per_event = 1 if bname is None else self.NBPerEvent[bname]
for g in groups:
arr = np.asarray(np.random.random((n_per_event*g,))*10.0, dtype=dtype)
print bname, attr_name #, arr
yield arr, None
| none | 1 | 2.181027 | 2 | |
mirage/libs/ble_utils/firewall.py | stabla/mirage | 0 | 6614414 | <filename>mirage/libs/ble_utils/firewall.py<gh_stars>0
import datetime
import configparser
import mirage.tables.rulesManagement as rm
from mirage.tables.bleATTManager import Attribute, Descriptor, Characteristic,Service
COUNTER_FIELD = 'counter'
TIMESTAMP_FIELD = 'timeStamp'
WINDOW_SIZE_IN_SECONDS = 20
class FirewallEventManager:
def __init__(self, eventCounter: dict = {}):
self.eventCounter = eventCounter
def resetCounters(self, eventName: str):
self.eventCounter[eventName][COUNTER_FIELD] = 0
self.eventCounter[eventName][TIMESTAMP_FIELD] = datetime.datetime.now()
def initCounters(self, eventName: str):
if(eventName not in self.eventCounter):
self.eventCounter[eventName] = {}
self.resetCounters(eventName)
def countEvent(self, eventName: str):
self.eventCounter[eventName][COUNTER_FIELD] += 1
self.eventCounter[eventName][TIMESTAMP_FIELD] = datetime.datetime.now()
def durationSinceLastPacket(self, eventName: str):
delta = datetime.datetime.now()-self.getLastPacketTimestamp(eventName)
return delta.seconds
def getCurrentCount(self, eventName: str):
return self.eventCounter[eventName][COUNTER_FIELD]
def getLastPacketTimestamp(self, eventName: str):
return self.eventCounter[eventName][TIMESTAMP_FIELD]
def printEvent(self, eventName: str):
print(eventName)
print(self.getCurrentCount(eventName))
print(self.durationSinceLastPacket(eventName))
class Firewall_GattServer:
def importATT(self, filename="ATT_SLAVE_MITM", forbiddenAtrributes=[], replaceList=[],server=None):
print("Importing ATT layer datas from "+filename+" ...")
config = configparser.ConfigParser()
config.read(filename)
attribute = Attribute()
for handle in config.sections():
attHandle = int(handle, 16)
infos = config[handle]
attType = infos.get("type")
attValue = bytes.fromhex(
infos.get("value") if infos.get("value") is not None else "")
attribute = Attribute(attHandle, attType, attValue)
forbidden = self.__authorizeGattInfo(attribute, forbiddenAtrributes)
if forbidden:
print(attribute)
print('was refused')
result = self.__getReplacement(attribute,replaceList)
if result != False:
server.addAttribute(handle=attribute.ATThandle,value=attribute.ATTvalue,type=attribute.ATTvalue,permissions=["Read","Write"])
else:
server.addAttribute(handle=attHandle,value=attValue,type=attType,permissions=["Read","Write"])
pass
def importGATT(self, filename="GATT_SLAVE_MITM", forbiddenServices=[], forbiddenCharacteristics=[], forbiddenDescriptors=[],server=None):
print("Importing GATT layer datas from "+filename+" ...")
config = configparser.ConfigParser()
config.read(filename)
for element in config.sections():
infos = config[element]
if "type" in infos:
if infos.get("type") == "service":
startHandle = int(element, 16)
endHandle = int(infos.get("endhandle"), 16)
uuid = bytes.fromhex(infos.get("uuid"))
service = Service(beginHandle=startHandle,
endHandle=endHandle, uuidValue=uuid, serviceType=infos.get('servicetype'))
forbidden = self.__authorizeGattInfo(service, forbiddenServices)
if not forbidden:
if infos.get("servicetype") == "primary":
server.addPrimaryService(uuid,startHandle)
else:
server.addSecondaryService(uuid,startHandle)
else:
print(service)
print('was refused')
elif infos.get("type") == "characteristic":
declarationHandle = int(element, 16)
uuid = bytes.fromhex(infos.get("uuid"))
valueHandle = int(infos.get("valuehandle"), 16)
value = bytes.fromhex(infos.get("value"))
permissions = infos.get("permissions").split(",")
characteristic = Characteristic(declarationHandle=declarationHandle,
uuid=uuid, valueHandle=valueHandle, value=value, permissions=permissions)
forbidden = self.__authorizeGattInfo(
characteristic, forbiddenCharacteristics)
if not forbidden:
server.addCharacteristic(uuid,value,declarationHandle,valueHandle,permissions)
else :
print(characteristic)
print('was refused')
elif infos.get("type") == "descriptor":
handle = int(element, 16)
uuid = bytes.fromhex(infos.get("uuid"))
value = bytes.fromhex(infos.get("value"))
descriptor = Descriptor(
handle=handle, uuid=uuid, value=value)
forbidden = self.__authorizeGattInfo(
descriptor, forbiddenDescriptors)
if not forbidden:
server.addDescriptor(uuid,value,handle)
else:
print(descriptor)
print('was refused')
def __authorizeGattInfo(self, gattInformation, gattForbiddenRules):
return gattInformation in gattForbiddenRules
def __getReplacement(self, attribute, replaceList):
for substitutionTuple in replaceList:
if substitutionTuple[0] == attribute:
return substitutionTuple[1]
return False
def doFiltering(self,characteristicRules,serviceRules,descriptorRules,attributeRules,gatt_modifier_rules):
self.importATT("/Users/ahmed/mirage/ATT_SLAVE_MITM",attributeRules,gatt_modifier_rules)
self.importGATT('/Users/ahmed/mirage/GATT_SLAVE_MITM',serviceRules,characteristicRules, descriptorRules)
def checkRules(pathOfBleTables):
# Parse file
print("PARSING")
parsedFile = rm.parseFile(pathOfBleTables)
print("DONE")
# Extract GATTFILTER RULES
if(rm.GATT_FILTER_SECTION in parsedFile):
gatt_filter_rules = rm.getGattFilterRules(
parsedFile[rm.GATT_FILTER_SECTION])
# Extract ATT SUBSTITUTION RULES
if(rm.GATT_MODIFIER_SECTION in parsedFile):
gatt_modifier_rules = rm.getGattModifierRules(
parsedFile[rm.GATT_MODIFIER_SECTION])
# Filter Rules By Type
print("FILTERING START")
characteristicRules = rm.getCharacteristicRules(gatt_filter_rules)
serviceRules = rm.getServiceRules(gatt_filter_rules)
descriptorRules = rm.getDescriptorRules(gatt_filter_rules)
attributeRules = rm.getAttributeRules(gatt_filter_rules)
print("FILTERING DONE")
return (characteristicRules,serviceRules,descriptorRules,attributeRules,gatt_modifier_rules)
| <filename>mirage/libs/ble_utils/firewall.py<gh_stars>0
import datetime
import configparser
import mirage.tables.rulesManagement as rm
from mirage.tables.bleATTManager import Attribute, Descriptor, Characteristic,Service
COUNTER_FIELD = 'counter'
TIMESTAMP_FIELD = 'timeStamp'
WINDOW_SIZE_IN_SECONDS = 20
class FirewallEventManager:
def __init__(self, eventCounter: dict = {}):
self.eventCounter = eventCounter
def resetCounters(self, eventName: str):
self.eventCounter[eventName][COUNTER_FIELD] = 0
self.eventCounter[eventName][TIMESTAMP_FIELD] = datetime.datetime.now()
def initCounters(self, eventName: str):
if(eventName not in self.eventCounter):
self.eventCounter[eventName] = {}
self.resetCounters(eventName)
def countEvent(self, eventName: str):
self.eventCounter[eventName][COUNTER_FIELD] += 1
self.eventCounter[eventName][TIMESTAMP_FIELD] = datetime.datetime.now()
def durationSinceLastPacket(self, eventName: str):
delta = datetime.datetime.now()-self.getLastPacketTimestamp(eventName)
return delta.seconds
def getCurrentCount(self, eventName: str):
return self.eventCounter[eventName][COUNTER_FIELD]
def getLastPacketTimestamp(self, eventName: str):
return self.eventCounter[eventName][TIMESTAMP_FIELD]
def printEvent(self, eventName: str):
print(eventName)
print(self.getCurrentCount(eventName))
print(self.durationSinceLastPacket(eventName))
class Firewall_GattServer:
def importATT(self, filename="ATT_SLAVE_MITM", forbiddenAtrributes=[], replaceList=[],server=None):
print("Importing ATT layer datas from "+filename+" ...")
config = configparser.ConfigParser()
config.read(filename)
attribute = Attribute()
for handle in config.sections():
attHandle = int(handle, 16)
infos = config[handle]
attType = infos.get("type")
attValue = bytes.fromhex(
infos.get("value") if infos.get("value") is not None else "")
attribute = Attribute(attHandle, attType, attValue)
forbidden = self.__authorizeGattInfo(attribute, forbiddenAtrributes)
if forbidden:
print(attribute)
print('was refused')
result = self.__getReplacement(attribute,replaceList)
if result != False:
server.addAttribute(handle=attribute.ATThandle,value=attribute.ATTvalue,type=attribute.ATTvalue,permissions=["Read","Write"])
else:
server.addAttribute(handle=attHandle,value=attValue,type=attType,permissions=["Read","Write"])
pass
def importGATT(self, filename="GATT_SLAVE_MITM", forbiddenServices=[], forbiddenCharacteristics=[], forbiddenDescriptors=[],server=None):
print("Importing GATT layer datas from "+filename+" ...")
config = configparser.ConfigParser()
config.read(filename)
for element in config.sections():
infos = config[element]
if "type" in infos:
if infos.get("type") == "service":
startHandle = int(element, 16)
endHandle = int(infos.get("endhandle"), 16)
uuid = bytes.fromhex(infos.get("uuid"))
service = Service(beginHandle=startHandle,
endHandle=endHandle, uuidValue=uuid, serviceType=infos.get('servicetype'))
forbidden = self.__authorizeGattInfo(service, forbiddenServices)
if not forbidden:
if infos.get("servicetype") == "primary":
server.addPrimaryService(uuid,startHandle)
else:
server.addSecondaryService(uuid,startHandle)
else:
print(service)
print('was refused')
elif infos.get("type") == "characteristic":
declarationHandle = int(element, 16)
uuid = bytes.fromhex(infos.get("uuid"))
valueHandle = int(infos.get("valuehandle"), 16)
value = bytes.fromhex(infos.get("value"))
permissions = infos.get("permissions").split(",")
characteristic = Characteristic(declarationHandle=declarationHandle,
uuid=uuid, valueHandle=valueHandle, value=value, permissions=permissions)
forbidden = self.__authorizeGattInfo(
characteristic, forbiddenCharacteristics)
if not forbidden:
server.addCharacteristic(uuid,value,declarationHandle,valueHandle,permissions)
else :
print(characteristic)
print('was refused')
elif infos.get("type") == "descriptor":
handle = int(element, 16)
uuid = bytes.fromhex(infos.get("uuid"))
value = bytes.fromhex(infos.get("value"))
descriptor = Descriptor(
handle=handle, uuid=uuid, value=value)
forbidden = self.__authorizeGattInfo(
descriptor, forbiddenDescriptors)
if not forbidden:
server.addDescriptor(uuid,value,handle)
else:
print(descriptor)
print('was refused')
def __authorizeGattInfo(self, gattInformation, gattForbiddenRules):
return gattInformation in gattForbiddenRules
def __getReplacement(self, attribute, replaceList):
for substitutionTuple in replaceList:
if substitutionTuple[0] == attribute:
return substitutionTuple[1]
return False
def doFiltering(self,characteristicRules,serviceRules,descriptorRules,attributeRules,gatt_modifier_rules):
self.importATT("/Users/ahmed/mirage/ATT_SLAVE_MITM",attributeRules,gatt_modifier_rules)
self.importGATT('/Users/ahmed/mirage/GATT_SLAVE_MITM',serviceRules,characteristicRules, descriptorRules)
def checkRules(pathOfBleTables):
# Parse file
print("PARSING")
parsedFile = rm.parseFile(pathOfBleTables)
print("DONE")
# Extract GATTFILTER RULES
if(rm.GATT_FILTER_SECTION in parsedFile):
gatt_filter_rules = rm.getGattFilterRules(
parsedFile[rm.GATT_FILTER_SECTION])
# Extract ATT SUBSTITUTION RULES
if(rm.GATT_MODIFIER_SECTION in parsedFile):
gatt_modifier_rules = rm.getGattModifierRules(
parsedFile[rm.GATT_MODIFIER_SECTION])
# Filter Rules By Type
print("FILTERING START")
characteristicRules = rm.getCharacteristicRules(gatt_filter_rules)
serviceRules = rm.getServiceRules(gatt_filter_rules)
descriptorRules = rm.getDescriptorRules(gatt_filter_rules)
attributeRules = rm.getAttributeRules(gatt_filter_rules)
print("FILTERING DONE")
return (characteristicRules,serviceRules,descriptorRules,attributeRules,gatt_modifier_rules)
| en | 0.418188 | # Parse file # Extract GATTFILTER RULES # Extract ATT SUBSTITUTION RULES # Filter Rules By Type | 2.085423 | 2 |
gen_dataset.py | bigfacebear/MaxOverlap | 0 | 6614415 | <gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import os
import time
import math
import cv2
import numpy as np
import pickle
from scoop import futures
from image_utils import rotateImage
import gen_dataset_flags as FLAGS
if FLAGS.FILLED_SHAPE:
SRC_DIR = './filled_max_areas'
IMG_DIR = './filled_primitives'
DST_DIR = './filled_dataset_' + str(FLAGS.PAIR_NUM)
else:
SRC_DIR = './hollow_max_areas'
IMG_DIR = './hollow_primitives'
DST_DIR = './hollow_dataset_' + str(FLAGS.PAIR_NUM)
IMAGE_SIZE = FLAGS.IMAGE_SIZE
PAIR_NUM = FLAGS.PAIR_NUM
def cropImage(input_img):
lt, rb = (input_img.shape[1], input_img.shape[0]), (0, 0)
for r in range(input_img.shape[0]):
for c in range(input_img.shape[1]):
if input_img[r][c] != 0:
lt = (min(lt[0], c), min(lt[1], r))
rb = (max(rb[0], c), max(rb[1], r))
return input_img[lt[1]:rb[1] + 1, lt[0]:rb[0] + 1]
def generateShapePair(params):
i, mat, images = params
primitive_num = mat.shape[0]
L_idx = random.randint(0, primitive_num - 1)
K_idx = random.randint(0, primitive_num - 1)
L_origin = images[L_idx]
K_origin = images[K_idx]
L = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
K = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
L_rotate = cropImage(rotateImage(L_origin, 360 * random.random()))
K_rotate = cropImage(rotateImage(K_origin, 360 * random.random()))
L_translate_range = (IMAGE_SIZE - L_rotate.shape[1], IMAGE_SIZE - L_rotate.shape[0]) # (x, y)
K_translate_range = (IMAGE_SIZE - K_rotate.shape[1], IMAGE_SIZE - K_rotate.shape[0])
L_translate = (random.randint(0, L_translate_range[0] - 1), random.randint(0, L_translate_range[1] - 1)) # (x, y)
K_translate = (random.randint(0, K_translate_range[0] - 1), random.randint(0, K_translate_range[1] - 1))
L[L_translate[1]:L_translate[1] + L_rotate.shape[0], L_translate[0]:L_translate[0] + L_rotate.shape[1]] = L_rotate
K[K_translate[1]:K_translate[1] + K_rotate.shape[0], K_translate[0]:K_translate[0] + K_rotate.shape[1]] = K_rotate
cv2.imwrite(os.path.join(DST_DIR, str(i) + '_L.png'), L)
cv2.imwrite(os.path.join(DST_DIR, str(i) + '_K.png'), K)
return mat[L_idx][K_idx]
if __name__ == '__main__':
try:
with open(SRC_DIR) as fp:
mat = np.array(pickle.load(fp))
except IOError as err:
print('File Error', err)
if not os.path.exists(DST_DIR):
os.makedirs(DST_DIR)
if len(mat.shape) != 2 or mat.shape[0] != mat.shape[1] or mat.shape[0] <= 1:
print('Please provide valid *_max_area file')
exit()
start_time = time.time()
primitive_num = mat.shape[0]
images = [cv2.imread(os.path.join(IMG_DIR, str(i)+'.png'), cv2.IMREAD_GRAYSCALE) for i in xrange(primitive_num)]
params_list = [(i, mat, images) for i in xrange(FLAGS.PAIR_NUM)]
batch_size = FLAGS.BATCH_SIZE
batch_num = int(math.ceil(len(params_list) / float(batch_size)))
beg_time = time.time()
overlap_areas = []
cnt = 0
for i in xrange(batch_num):
overlap_areas += list(futures.map(generateShapePair, params_list[i*batch_size:min((i+1)*batch_size, len(params_list))]))
cnt += batch_size
print(cnt, '- duration =', time.time() - beg_time)
beg_time = time.time()
with open(os.path.join(DST_DIR, 'OVERLAP_AREAS'), 'wb') as fp:
pickle.dump(overlap_areas, fp)
print('duration =', time.time() - start_time)
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import os
import time
import math
import cv2
import numpy as np
import pickle
from scoop import futures
from image_utils import rotateImage
import gen_dataset_flags as FLAGS
if FLAGS.FILLED_SHAPE:
SRC_DIR = './filled_max_areas'
IMG_DIR = './filled_primitives'
DST_DIR = './filled_dataset_' + str(FLAGS.PAIR_NUM)
else:
SRC_DIR = './hollow_max_areas'
IMG_DIR = './hollow_primitives'
DST_DIR = './hollow_dataset_' + str(FLAGS.PAIR_NUM)
IMAGE_SIZE = FLAGS.IMAGE_SIZE
PAIR_NUM = FLAGS.PAIR_NUM
def cropImage(input_img):
lt, rb = (input_img.shape[1], input_img.shape[0]), (0, 0)
for r in range(input_img.shape[0]):
for c in range(input_img.shape[1]):
if input_img[r][c] != 0:
lt = (min(lt[0], c), min(lt[1], r))
rb = (max(rb[0], c), max(rb[1], r))
return input_img[lt[1]:rb[1] + 1, lt[0]:rb[0] + 1]
def generateShapePair(params):
i, mat, images = params
primitive_num = mat.shape[0]
L_idx = random.randint(0, primitive_num - 1)
K_idx = random.randint(0, primitive_num - 1)
L_origin = images[L_idx]
K_origin = images[K_idx]
L = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
K = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
L_rotate = cropImage(rotateImage(L_origin, 360 * random.random()))
K_rotate = cropImage(rotateImage(K_origin, 360 * random.random()))
L_translate_range = (IMAGE_SIZE - L_rotate.shape[1], IMAGE_SIZE - L_rotate.shape[0]) # (x, y)
K_translate_range = (IMAGE_SIZE - K_rotate.shape[1], IMAGE_SIZE - K_rotate.shape[0])
L_translate = (random.randint(0, L_translate_range[0] - 1), random.randint(0, L_translate_range[1] - 1)) # (x, y)
K_translate = (random.randint(0, K_translate_range[0] - 1), random.randint(0, K_translate_range[1] - 1))
L[L_translate[1]:L_translate[1] + L_rotate.shape[0], L_translate[0]:L_translate[0] + L_rotate.shape[1]] = L_rotate
K[K_translate[1]:K_translate[1] + K_rotate.shape[0], K_translate[0]:K_translate[0] + K_rotate.shape[1]] = K_rotate
cv2.imwrite(os.path.join(DST_DIR, str(i) + '_L.png'), L)
cv2.imwrite(os.path.join(DST_DIR, str(i) + '_K.png'), K)
return mat[L_idx][K_idx]
if __name__ == '__main__':
try:
with open(SRC_DIR) as fp:
mat = np.array(pickle.load(fp))
except IOError as err:
print('File Error', err)
if not os.path.exists(DST_DIR):
os.makedirs(DST_DIR)
if len(mat.shape) != 2 or mat.shape[0] != mat.shape[1] or mat.shape[0] <= 1:
print('Please provide valid *_max_area file')
exit()
start_time = time.time()
primitive_num = mat.shape[0]
images = [cv2.imread(os.path.join(IMG_DIR, str(i)+'.png'), cv2.IMREAD_GRAYSCALE) for i in xrange(primitive_num)]
params_list = [(i, mat, images) for i in xrange(FLAGS.PAIR_NUM)]
batch_size = FLAGS.BATCH_SIZE
batch_num = int(math.ceil(len(params_list) / float(batch_size)))
beg_time = time.time()
overlap_areas = []
cnt = 0
for i in xrange(batch_num):
overlap_areas += list(futures.map(generateShapePair, params_list[i*batch_size:min((i+1)*batch_size, len(params_list))]))
cnt += batch_size
print(cnt, '- duration =', time.time() - beg_time)
beg_time = time.time()
with open(os.path.join(DST_DIR, 'OVERLAP_AREAS'), 'wb') as fp:
pickle.dump(overlap_areas, fp)
print('duration =', time.time() - start_time) | en | 0.517151 | # (x, y) # (x, y) | 2.199322 | 2 |
8_Lesson8/oop/example2-mro-oldstyle5.py | turovod/Otus | 0 | 6614416 | class A1():
# def who_am_i(self):
# print("I am a A1")
pass
class A2():
def who_am_i(self):
print("I am a A2")
class A3():
def who_am_i(self):
print("I am a A3")
class B(A1, A2):
# def who_am_i(self):
# print("I am a B")
pass
class C(A3):
def who_am_i(self):
print("I am a C")
class D(B,C):
# def who_am_i(self):
# print("I am a D")
pass
d1 = D()
d1.who_am_i()
| class A1():
# def who_am_i(self):
# print("I am a A1")
pass
class A2():
def who_am_i(self):
print("I am a A2")
class A3():
def who_am_i(self):
print("I am a A3")
class B(A1, A2):
# def who_am_i(self):
# print("I am a B")
pass
class C(A3):
def who_am_i(self):
print("I am a C")
class D(B,C):
# def who_am_i(self):
# print("I am a D")
pass
d1 = D()
d1.who_am_i()
| en | 0.57188 | # def who_am_i(self): # print("I am a A1") # def who_am_i(self): # print("I am a B") # def who_am_i(self): # print("I am a D") | 3.526538 | 4 |
aps/aps_io/grid_data.py | kmunve/APS | 0 | 6614417 | <gh_stars>0
from numpy import arange, meshgrid, empty
from abc import ABCMeta, abstractmethod
from pyproj import Proj
class ModelGrid(metaclass=ABCMeta):
"""Creates a spatial grid"""
def __init__(self):
self.var_name = '' # name of the modelled variable
@abstractmethod # flags method that MUST be implemented by all subclasses
def to_netCDF(self, filename):
pass
@property
@abstractmethod
def _convert_to_CS(self):
pass
def __repr__(self):
return f"{self.__class__.__name__}, Abstract base class for model grids."
class SeNorgeGrid(ModelGrid):
def __init__(self, var_name: str):
super(ModelGrid, self).__init__()
self.var_name = var_name
# lower left corner in m
self.LowerLeftEast = -75000
self.LowerLeftNorth = 6450000
# upper right corner in m
self.UpperRightEast = 1120000
self.UpperRightNorth = 8000000
# interval
self.dx = 1000
self.dy = 1000
self.x = arange(self.LowerLeftEast, self.UpperRightEast, self.dx)
self.y = arange(self.LowerLeftNorth, self.UpperRightNorth, self.dy)
self.number_of_cells = len(self.x) * len(self.y)
self.values = empty(shape=(len(self.x), len(self.y)), dtype=float)
def _convert_to_CS(self):
# Converts vector into coordinate system
self.xgrid, self.ygrid = meshgrid(self.x, self.y)
self.p = Proj('+proj=utm +zone=33 +ellps=WGS84 +datum=WGS84 +units=m +no_defs')
self.lon, self.lat = self.p(self.xgrid, self.ygrid, inverse=True)
def __repr__(self):
return f"{self.__class__.__name__}({self.var_name}: #cells: x({len(self.x)}) by y({len(self.y)}) = {self.number_of_cells}," \
f" resolution: {self.dx} by {self.dy} m)"
def to_netCDF(self, filename):
"""
Saves the data in netCDF format
:return: a netCDF file containing the data and metadata of the grid
"""
pass
def from_netCDF(self, netcdf_file):
"""
Import data and metadata from a netCDF file
:param netcdf_file: NetCDF file to load
"""
pass
def from_BIL(self, bil_file):
"""
Import grid data from a BIL file.
:param bil_file: Binary data file
"""
pass
def from_ndarray(self, arr):
"""
:param arr: numpy array of shape (self.y, self.x)
:return:
"""
self.values = arr
if __name__ == "__main__":
sg = SeNorgeGrid('Temperature')
print(sg)
| from numpy import arange, meshgrid, empty
from abc import ABCMeta, abstractmethod
from pyproj import Proj
class ModelGrid(metaclass=ABCMeta):
"""Creates a spatial grid"""
def __init__(self):
self.var_name = '' # name of the modelled variable
@abstractmethod # flags method that MUST be implemented by all subclasses
def to_netCDF(self, filename):
pass
@property
@abstractmethod
def _convert_to_CS(self):
pass
def __repr__(self):
return f"{self.__class__.__name__}, Abstract base class for model grids."
class SeNorgeGrid(ModelGrid):
def __init__(self, var_name: str):
super(ModelGrid, self).__init__()
self.var_name = var_name
# lower left corner in m
self.LowerLeftEast = -75000
self.LowerLeftNorth = 6450000
# upper right corner in m
self.UpperRightEast = 1120000
self.UpperRightNorth = 8000000
# interval
self.dx = 1000
self.dy = 1000
self.x = arange(self.LowerLeftEast, self.UpperRightEast, self.dx)
self.y = arange(self.LowerLeftNorth, self.UpperRightNorth, self.dy)
self.number_of_cells = len(self.x) * len(self.y)
self.values = empty(shape=(len(self.x), len(self.y)), dtype=float)
def _convert_to_CS(self):
# Converts vector into coordinate system
self.xgrid, self.ygrid = meshgrid(self.x, self.y)
self.p = Proj('+proj=utm +zone=33 +ellps=WGS84 +datum=WGS84 +units=m +no_defs')
self.lon, self.lat = self.p(self.xgrid, self.ygrid, inverse=True)
def __repr__(self):
return f"{self.__class__.__name__}({self.var_name}: #cells: x({len(self.x)}) by y({len(self.y)}) = {self.number_of_cells}," \
f" resolution: {self.dx} by {self.dy} m)"
def to_netCDF(self, filename):
"""
Saves the data in netCDF format
:return: a netCDF file containing the data and metadata of the grid
"""
pass
def from_netCDF(self, netcdf_file):
"""
Import data and metadata from a netCDF file
:param netcdf_file: NetCDF file to load
"""
pass
def from_BIL(self, bil_file):
"""
Import grid data from a BIL file.
:param bil_file: Binary data file
"""
pass
def from_ndarray(self, arr):
"""
:param arr: numpy array of shape (self.y, self.x)
:return:
"""
self.values = arr
if __name__ == "__main__":
sg = SeNorgeGrid('Temperature')
print(sg) | en | 0.745364 | Creates a spatial grid # name of the modelled variable # flags method that MUST be implemented by all subclasses # lower left corner in m # upper right corner in m # interval # Converts vector into coordinate system #cells: x({len(self.x)}) by y({len(self.y)}) = {self.number_of_cells}," \ Saves the data in netCDF format :return: a netCDF file containing the data and metadata of the grid Import data and metadata from a netCDF file :param netcdf_file: NetCDF file to load Import grid data from a BIL file. :param bil_file: Binary data file :param arr: numpy array of shape (self.y, self.x) :return: | 2.96322 | 3 |
tests/swim_aim/xml/test_mapper_fields.py | eurocontrol-swim/swim-aim | 0 | 6614418 | <filename>tests/swim_aim/xml/test_mapper_fields.py
"""
Copyright 2019 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative:
http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
from datetime import datetime
from unittest import mock
import pytest
from lxml import etree
from swim_aim.xml import NAMESPACES
from swim_aim.xml.mapper_fields import XMLMapperField, DatetimeXMLMapperField, FloatXMLMapperField, \
IntegerXMLMapperField
__author__ = "EUROCONTROL (SWIM)"
@pytest.mark.parametrize('xpath', [
'', 'invalid_xpath', 'invalid xpath', 'invalid.xpath', 'invalid:xpath'
])
def test_mapper_field__xpath_not_separated_by_slashes__raises_valueerror(xpath):
with pytest.raises(ValueError) as e:
XMLMapperField(xpath)
assert 'Invalid xpath' == str(e.value)
@mock.patch.object(XMLMapperField, '_get_value', return_value=None)
def test_integer_mapper_field__invalid_value__raises_on_strict_mode(mock_mapper_field):
imf = IntegerXMLMapperField('./some/xpath', strict=True)
with pytest.raises(TypeError) as e:
imf.from_xml(mock.Mock())
@mock.patch.object(XMLMapperField, '_get_value', return_value='some_value')
def test_integer_mapper_field__returns_value_on_strict_mode_false(mock_mapper_field):
imf = IntegerXMLMapperField('./some/xpath', strict=False)
assert 'some_value' == imf.from_xml(mock.Mock())
@mock.patch.object(XMLMapperField, '_get_value', return_value=None)
def test_float_mapper_field__invalid_value__raises_on_strict_mode(mock_mapper_field):
fmf = FloatXMLMapperField('./some/xpath', strict=True)
with pytest.raises(TypeError) as e:
fmf.from_xml(mock.Mock())
@mock.patch.object(XMLMapperField, '_get_value', return_value='some_value')
def test_float_mapper_field__returns_value_on_strict_mode_false(mock_mapper_field):
imf = FloatXMLMapperField('./some/xpath', strict=False)
assert 'some_value' == imf.from_xml(mock.Mock())
@mock.patch.object(XMLMapperField, '_get_value', return_value=None)
def test_datetime_mapper_field__mapped_value_is_none__returns_none(mock_mapper_field):
dmf = DatetimeXMLMapperField('./some/xpath', strict=True)
assert dmf.from_xml(mock.Mock()) is None
@pytest.mark.parametrize('xml_string, xpath, mapper_field_class, expected_mapped_value', [
(
# string from element
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<gml:identifier codeSpace="urn:uuid:">2193b095-8bd7-40e4-ba10-2a5a3cf29901</gml:identifier>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./gml:identifier',
XMLMapperField,
'2193b095-8bd7-40e4-ba10-2a5a3cf29901'
),
(
# datetime from element
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<aixm:timeSlice>
<aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3">
<aixm:featureLifetime>
<gml:TimePeriod gml:id="ID_5052_1548813652630_5">
<gml:beginPosition>2013-11-14T00:00:00</gml:beginPosition>
<gml:endPosition indeterminatePosition="unknown"/>
</gml:TimePeriod>
</aixm:featureLifetime>
</aixm:AirportHeliportTimeSlice>
</aixm:timeSlice>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:AirportHeliportTimeSlice/aixm:featureLifetime/gml:TimePeriod/gml:beginPosition',
DatetimeXMLMapperField,
datetime(2013, 11, 14, 0, 0, 0)
),
(
# float from element
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<aixm:timeSlice>
<aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3">
<aixm:ARP>
<aixm:ElevatedPoint gml:id="ID_5052_1548813652630_7">
<gml:pos srsName="urn:ogc:def:crs:EPSG::4326">-8.698333333333334 160.67833333333334</gml:pos>
<aixm:elevation uom="FT">100.5</aixm:elevation>
</aixm:ElevatedPoint>
</aixm:ARP>
</aixm:AirportHeliportTimeSlice>
</aixm:timeSlice>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:AirportHeliportTimeSlice/aixm:ARP/aixm:ElevatedPoint/aixm:elevation',
FloatXMLMapperField,
100.5
),
(
# integer from element
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<aixm:timeSlice>
<aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3">
<aixm:ARP>
<aixm:ElevatedPoint gml:id="ID_5052_1548813652630_7">
<gml:pos srsName="urn:ogc:def:crs:EPSG::4326">-8.698333333333334 160.67833333333334</gml:pos>
<aixm:elevation uom="FT">100</aixm:elevation>
</aixm:ElevatedPoint>
</aixm:ARP>
</aixm:AirportHeliportTimeSlice>
</aixm:timeSlice>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:AirportHeliportTimeSlice/aixm:ARP/aixm:ElevatedPoint/aixm:elevation',
IntegerXMLMapperField,
100
),
(
# string from attribute
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<aixm:timeSlice>
<aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3">
<aixm:ARP>
<aixm:ElevatedPoint gml:id="ID_5052_1548813652630_7">
<gml:pos srsName="urn:ogc:def:crs:EPSG::4326">-8.698333333333334 160.67833333333334</gml:pos>
<aixm:elevation uom="FT">100.5</aixm:elevation>
</aixm:ElevatedPoint>
</aixm:ARP>
</aixm:AirportHeliportTimeSlice>
</aixm:timeSlice>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:AirportHeliportTimeSlice/aixm:ARP/aixm:ElevatedPoint/aixm:elevation/@uom',
XMLMapperField,
'FT'
)
])
def test_mapper_field__from_xml_returns_the_correct_value(xml_string, xpath, mapper_field_class, expected_mapped_value):
xml = etree.fromstring(xml_string)
element = xml.find('./adrmsg:hasMember/aixm:AirportHeliport', NAMESPACES)
mf = mapper_field_class(xpath=xpath, namespaces=NAMESPACES)
assert expected_mapped_value == mf.from_xml(element)
@pytest.mark.parametrize('xml_string, xpath, mapper_field_class, expected_mapped_value', [
(
# string from attribute with namespace
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:RouteSegment xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5063_1548813656537_2">
<gml:identifier codeSpace="urn:uuid:">5f7c0b50-b667-470e-953f-8ae479a5df3e</gml:identifier>
<aixm:timeSlice>
<aixm:RouteSegmentTimeSlice gml:id="ID_5063_1548813656537_3">
<aixm:start>
<aixm:EnRouteSegmentPoint gml:id="ID_5063_1548813656537_6">
<aixm:pointChoice_navaidSystem xmlns:xlink="http://www.w3.org/1999/xlink"
xlink:href="urn:uuid:ed74d8c5-91c6-4567-a95d-602cd48c19f4"/>
</aixm:EnRouteSegmentPoint>
</aixm:start>
</aixm:RouteSegmentTimeSlice>
</aixm:timeSlice>
</aixm:RouteSegment>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:RouteSegmentTimeSlice/aixm:start/aixm:EnRouteSegmentPoint/*[@xlink:href]/@xlink:href',
XMLMapperField,
'urn:uuid:ed74d8c5-91c6-4567-a95d-602cd48c19f4'
)
])
def test_mapper_field__maps_attribute_with_namespace(xml_string, xpath, mapper_field_class, expected_mapped_value):
xml = etree.fromstring(xml_string)
element = xml.find('./adrmsg:hasMember/aixm:RouteSegment', NAMESPACES)
mf = mapper_field_class(xpath=xpath, namespaces=NAMESPACES)
assert expected_mapped_value == mf.from_xml(element)
@pytest.mark.parametrize('xml_string, xpath, mapper_field_class, expected_mapped_value', [
(
# string from attribute with namespace
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:RouteSegment xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5063_1548813656537_2">
<gml:identifier codeSpace="urn:uuid:">5f7c0b50-b667-470e-953f-8ae479a5df3e</gml:identifier>
<aixm:timeSlice>
<aixm:RouteSegmentTimeSlice gml:id="ID_5063_1548813656537_3">
<aixm:start>
<aixm:EnRouteSegmentPoint gml:id="ID_5063_1548813656537_6">
<aixm:pointChoice_navaidSystem xmlns:xlink="http://www.w3.org/1999/xlink"
xlink:href="urn:uuid:ed74d8c5-91c6-4567-a95d-602cd48c19f4"/>
</aixm:EnRouteSegmentPoint>
</aixm:start>
</aixm:RouteSegmentTimeSlice>
</aixm:timeSlice>
</aixm:RouteSegment>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:RouteSegmentTimeSlice/aixm:start/aixm:EnRouteSegmentPoint/*[@xlink:href]/@xlink:href',
XMLMapperField,
'ed74d8c5-91c6-4567-a95d-602cd48c19f4'
)
])
def test_mapper_field__maps_attribute_with_namespace_with_post_map(xml_string, xpath, mapper_field_class,
expected_mapped_value):
xml = etree.fromstring(xml_string)
element = xml.find('./adrmsg:hasMember/aixm:RouteSegment', NAMESPACES)
mf = mapper_field_class(xpath=xpath, namespaces=NAMESPACES, post_map=lambda v: v.replace('urn:uuid:', ''))
assert expected_mapped_value == mf.from_xml(element)
| <filename>tests/swim_aim/xml/test_mapper_fields.py
"""
Copyright 2019 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative:
http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
from datetime import datetime
from unittest import mock
import pytest
from lxml import etree
from swim_aim.xml import NAMESPACES
from swim_aim.xml.mapper_fields import XMLMapperField, DatetimeXMLMapperField, FloatXMLMapperField, \
IntegerXMLMapperField
__author__ = "EUROCONTROL (SWIM)"
@pytest.mark.parametrize('xpath', [
'', 'invalid_xpath', 'invalid xpath', 'invalid.xpath', 'invalid:xpath'
])
def test_mapper_field__xpath_not_separated_by_slashes__raises_valueerror(xpath):
with pytest.raises(ValueError) as e:
XMLMapperField(xpath)
assert 'Invalid xpath' == str(e.value)
@mock.patch.object(XMLMapperField, '_get_value', return_value=None)
def test_integer_mapper_field__invalid_value__raises_on_strict_mode(mock_mapper_field):
imf = IntegerXMLMapperField('./some/xpath', strict=True)
with pytest.raises(TypeError) as e:
imf.from_xml(mock.Mock())
@mock.patch.object(XMLMapperField, '_get_value', return_value='some_value')
def test_integer_mapper_field__returns_value_on_strict_mode_false(mock_mapper_field):
imf = IntegerXMLMapperField('./some/xpath', strict=False)
assert 'some_value' == imf.from_xml(mock.Mock())
@mock.patch.object(XMLMapperField, '_get_value', return_value=None)
def test_float_mapper_field__invalid_value__raises_on_strict_mode(mock_mapper_field):
fmf = FloatXMLMapperField('./some/xpath', strict=True)
with pytest.raises(TypeError) as e:
fmf.from_xml(mock.Mock())
@mock.patch.object(XMLMapperField, '_get_value', return_value='some_value')
def test_float_mapper_field__returns_value_on_strict_mode_false(mock_mapper_field):
imf = FloatXMLMapperField('./some/xpath', strict=False)
assert 'some_value' == imf.from_xml(mock.Mock())
@mock.patch.object(XMLMapperField, '_get_value', return_value=None)
def test_datetime_mapper_field__mapped_value_is_none__returns_none(mock_mapper_field):
dmf = DatetimeXMLMapperField('./some/xpath', strict=True)
assert dmf.from_xml(mock.Mock()) is None
@pytest.mark.parametrize('xml_string, xpath, mapper_field_class, expected_mapped_value', [
(
# string from element
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<gml:identifier codeSpace="urn:uuid:">2193b095-8bd7-40e4-ba10-2a5a3cf29901</gml:identifier>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./gml:identifier',
XMLMapperField,
'2193b095-8bd7-40e4-ba10-2a5a3cf29901'
),
(
# datetime from element
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<aixm:timeSlice>
<aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3">
<aixm:featureLifetime>
<gml:TimePeriod gml:id="ID_5052_1548813652630_5">
<gml:beginPosition>2013-11-14T00:00:00</gml:beginPosition>
<gml:endPosition indeterminatePosition="unknown"/>
</gml:TimePeriod>
</aixm:featureLifetime>
</aixm:AirportHeliportTimeSlice>
</aixm:timeSlice>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:AirportHeliportTimeSlice/aixm:featureLifetime/gml:TimePeriod/gml:beginPosition',
DatetimeXMLMapperField,
datetime(2013, 11, 14, 0, 0, 0)
),
(
# float from element
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<aixm:timeSlice>
<aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3">
<aixm:ARP>
<aixm:ElevatedPoint gml:id="ID_5052_1548813652630_7">
<gml:pos srsName="urn:ogc:def:crs:EPSG::4326">-8.698333333333334 160.67833333333334</gml:pos>
<aixm:elevation uom="FT">100.5</aixm:elevation>
</aixm:ElevatedPoint>
</aixm:ARP>
</aixm:AirportHeliportTimeSlice>
</aixm:timeSlice>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:AirportHeliportTimeSlice/aixm:ARP/aixm:ElevatedPoint/aixm:elevation',
FloatXMLMapperField,
100.5
),
(
# integer from element
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<aixm:timeSlice>
<aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3">
<aixm:ARP>
<aixm:ElevatedPoint gml:id="ID_5052_1548813652630_7">
<gml:pos srsName="urn:ogc:def:crs:EPSG::4326">-8.698333333333334 160.67833333333334</gml:pos>
<aixm:elevation uom="FT">100</aixm:elevation>
</aixm:ElevatedPoint>
</aixm:ARP>
</aixm:AirportHeliportTimeSlice>
</aixm:timeSlice>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:AirportHeliportTimeSlice/aixm:ARP/aixm:ElevatedPoint/aixm:elevation',
IntegerXMLMapperField,
100
),
(
# string from attribute
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2">
<aixm:timeSlice>
<aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3">
<aixm:ARP>
<aixm:ElevatedPoint gml:id="ID_5052_1548813652630_7">
<gml:pos srsName="urn:ogc:def:crs:EPSG::4326">-8.698333333333334 160.67833333333334</gml:pos>
<aixm:elevation uom="FT">100.5</aixm:elevation>
</aixm:ElevatedPoint>
</aixm:ARP>
</aixm:AirportHeliportTimeSlice>
</aixm:timeSlice>
</aixm:AirportHeliport>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:AirportHeliportTimeSlice/aixm:ARP/aixm:ElevatedPoint/aixm:elevation/@uom',
XMLMapperField,
'FT'
)
])
def test_mapper_field__from_xml_returns_the_correct_value(xml_string, xpath, mapper_field_class, expected_mapped_value):
xml = etree.fromstring(xml_string)
element = xml.find('./adrmsg:hasMember/aixm:AirportHeliport', NAMESPACES)
mf = mapper_field_class(xpath=xpath, namespaces=NAMESPACES)
assert expected_mapped_value == mf.from_xml(element)
@pytest.mark.parametrize('xml_string, xpath, mapper_field_class, expected_mapped_value', [
(
# string from attribute with namespace
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:RouteSegment xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5063_1548813656537_2">
<gml:identifier codeSpace="urn:uuid:">5f7c0b50-b667-470e-953f-8ae479a5df3e</gml:identifier>
<aixm:timeSlice>
<aixm:RouteSegmentTimeSlice gml:id="ID_5063_1548813656537_3">
<aixm:start>
<aixm:EnRouteSegmentPoint gml:id="ID_5063_1548813656537_6">
<aixm:pointChoice_navaidSystem xmlns:xlink="http://www.w3.org/1999/xlink"
xlink:href="urn:uuid:ed74d8c5-91c6-4567-a95d-602cd48c19f4"/>
</aixm:EnRouteSegmentPoint>
</aixm:start>
</aixm:RouteSegmentTimeSlice>
</aixm:timeSlice>
</aixm:RouteSegment>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:RouteSegmentTimeSlice/aixm:start/aixm:EnRouteSegmentPoint/*[@xlink:href]/@xlink:href',
XMLMapperField,
'urn:uuid:ed74d8c5-91c6-4567-a95d-602cd48c19f4'
)
])
def test_mapper_field__maps_attribute_with_namespace(xml_string, xpath, mapper_field_class, expected_mapped_value):
xml = etree.fromstring(xml_string)
element = xml.find('./adrmsg:hasMember/aixm:RouteSegment', NAMESPACES)
mf = mapper_field_class(xpath=xpath, namespaces=NAMESPACES)
assert expected_mapped_value == mf.from_xml(element)
@pytest.mark.parametrize('xml_string, xpath, mapper_field_class, expected_mapped_value', [
(
# string from attribute with namespace
"""<?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1">
<adrmsg:hasMember>
<aixm:RouteSegment xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5063_1548813656537_2">
<gml:identifier codeSpace="urn:uuid:">5f7c0b50-b667-470e-953f-8ae479a5df3e</gml:identifier>
<aixm:timeSlice>
<aixm:RouteSegmentTimeSlice gml:id="ID_5063_1548813656537_3">
<aixm:start>
<aixm:EnRouteSegmentPoint gml:id="ID_5063_1548813656537_6">
<aixm:pointChoice_navaidSystem xmlns:xlink="http://www.w3.org/1999/xlink"
xlink:href="urn:uuid:ed74d8c5-91c6-4567-a95d-602cd48c19f4"/>
</aixm:EnRouteSegmentPoint>
</aixm:start>
</aixm:RouteSegmentTimeSlice>
</aixm:timeSlice>
</aixm:RouteSegment>
</adrmsg:hasMember>
</adrmsg:ADRMessage>
""".encode('utf-8'),
'./aixm:timeSlice/aixm:RouteSegmentTimeSlice/aixm:start/aixm:EnRouteSegmentPoint/*[@xlink:href]/@xlink:href',
XMLMapperField,
'ed74d8c5-91c6-4567-a95d-602cd48c19f4'
)
])
def test_mapper_field__maps_attribute_with_namespace_with_post_map(xml_string, xpath, mapper_field_class,
expected_mapped_value):
xml = etree.fromstring(xml_string)
element = xml.find('./adrmsg:hasMember/aixm:RouteSegment', NAMESPACES)
mf = mapper_field_class(xpath=xpath, namespaces=NAMESPACES, post_map=lambda v: v.replace('urn:uuid:', ''))
assert expected_mapped_value == mf.from_xml(element)
| en | 0.232419 | Copyright 2019 EUROCONTROL ========================================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ========================================== Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative: http://opensource.org/licenses/BSD-3-Clause Details on EUROCONTROL: http://www.eurocontrol.int # string from element <?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1"> <adrmsg:hasMember> <aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2"> <gml:identifier codeSpace="urn:uuid:">2193b095-8bd7-40e4-ba10-2a5a3cf29901</gml:identifier> </aixm:AirportHeliport> </adrmsg:hasMember> </adrmsg:ADRMessage> # datetime from element <?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1"> <adrmsg:hasMember> <aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2"> <aixm:timeSlice> <aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3"> <aixm:featureLifetime> <gml:TimePeriod gml:id="ID_5052_1548813652630_5"> <gml:beginPosition>2013-11-14T00:00:00</gml:beginPosition> <gml:endPosition indeterminatePosition="unknown"/> </gml:TimePeriod> </aixm:featureLifetime> </aixm:AirportHeliportTimeSlice> </aixm:timeSlice> </aixm:AirportHeliport> </adrmsg:hasMember> </adrmsg:ADRMessage> # float from element <?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1"> <adrmsg:hasMember> <aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2"> <aixm:timeSlice> <aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3"> <aixm:ARP> <aixm:ElevatedPoint gml:id="ID_5052_1548813652630_7"> <gml:pos srsName="urn:ogc:def:crs:EPSG::4326">-8.698333333333334 160.67833333333334</gml:pos> <aixm:elevation uom="FT">100.5</aixm:elevation> </aixm:ElevatedPoint> </aixm:ARP> </aixm:AirportHeliportTimeSlice> </aixm:timeSlice> </aixm:AirportHeliport> </adrmsg:hasMember> </adrmsg:ADRMessage> # integer from element <?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1"> <adrmsg:hasMember> <aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2"> <aixm:timeSlice> <aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3"> <aixm:ARP> <aixm:ElevatedPoint gml:id="ID_5052_1548813652630_7"> <gml:pos srsName="urn:ogc:def:crs:EPSG::4326">-8.698333333333334 160.67833333333334</gml:pos> <aixm:elevation uom="FT">100</aixm:elevation> </aixm:ElevatedPoint> </aixm:ARP> </aixm:AirportHeliportTimeSlice> </aixm:timeSlice> </aixm:AirportHeliport> </adrmsg:hasMember> </adrmsg:ADRMessage> # string from attribute <?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1"> <adrmsg:hasMember> <aixm:AirportHeliport xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5052_1548813652630_2"> <aixm:timeSlice> <aixm:AirportHeliportTimeSlice gml:id="ID_5052_1548813652630_3"> <aixm:ARP> <aixm:ElevatedPoint gml:id="ID_5052_1548813652630_7"> <gml:pos srsName="urn:ogc:def:crs:EPSG::4326">-8.698333333333334 160.67833333333334</gml:pos> <aixm:elevation uom="FT">100.5</aixm:elevation> </aixm:ElevatedPoint> </aixm:ARP> </aixm:AirportHeliportTimeSlice> </aixm:timeSlice> </aixm:AirportHeliport> </adrmsg:hasMember> </adrmsg:ADRMessage> # string from attribute with namespace <?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1"> <adrmsg:hasMember> <aixm:RouteSegment xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5063_1548813656537_2"> <gml:identifier codeSpace="urn:uuid:">5f7c0b50-b667-470e-953f-8ae479a5df3e</gml:identifier> <aixm:timeSlice> <aixm:RouteSegmentTimeSlice gml:id="ID_5063_1548813656537_3"> <aixm:start> <aixm:EnRouteSegmentPoint gml:id="ID_5063_1548813656537_6"> <aixm:pointChoice_navaidSystem xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="urn:uuid:ed74d8c5-91c6-4567-a95d-602cd48c19f4"/> </aixm:EnRouteSegmentPoint> </aixm:start> </aixm:RouteSegmentTimeSlice> </aixm:timeSlice> </aixm:RouteSegment> </adrmsg:hasMember> </adrmsg:ADRMessage> # string from attribute with namespace <?xml version='1.0' encoding='UTF-8'?><adrmsg:ADRMessage xmlns:adrmsg="http://www.eurocontrol.int/cfmu/b2b/ADRMessage" xmlns:gml="http://www.opengis.net/gml/3.2" gml:id="ID_5052_1548813652630_1"> <adrmsg:hasMember> <aixm:RouteSegment xmlns:aixm="http://www.aixm.aero/schema/5.1" gml:id="ID_5063_1548813656537_2"> <gml:identifier codeSpace="urn:uuid:">5f7c0b50-b667-470e-953f-8ae479a5df3e</gml:identifier> <aixm:timeSlice> <aixm:RouteSegmentTimeSlice gml:id="ID_5063_1548813656537_3"> <aixm:start> <aixm:EnRouteSegmentPoint gml:id="ID_5063_1548813656537_6"> <aixm:pointChoice_navaidSystem xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="urn:uuid:ed74d8c5-91c6-4567-a95d-602cd48c19f4"/> </aixm:EnRouteSegmentPoint> </aixm:start> </aixm:RouteSegmentTimeSlice> </aixm:timeSlice> </aixm:RouteSegment> </adrmsg:hasMember> </adrmsg:ADRMessage> | 1.450382 | 1 |
autogluon/searcher/bayesopt/gpmxnet/custom_op.py | NunoEdgarGFlowHub/autogluon | 6 | 6614419 | import mxnet as mx
import logging
logger = logging.getLogger(__name__)
__all__ = ['AddJitterOp', 'AddJitterOpProp']
INITIAL_JITTER_FACTOR = 1e-9
JITTER_GROWTH = 10.
JITTER_UPPERBOUND_FACTOR = 1e3
class AddJitterOp(mx.operator.CustomOp):
"""
Finds smaller jitter to add to diagonal of square matrix to render the
matrix positive definite (in that linalg.potrf works).
Given input x (positive semi-definite matrix) and sigsq_init (nonneg
scalar), find sigsq_final (nonneg scalar), so that:
sigsq_final = sigsq_init + jitter, jitter >= 0,
x + sigsq_final * Id positive definite (so that potrf call works)
We return the matrix x + sigsq_final * Id, for which potrf has not failed.
For the gradient, the dependence of jitter on the inputs is ignored.
The values tried for sigsq_final are:
sigsq_init, sigsq_init + initial_jitter * (jitter_growth ** k),
k = 0, 1, 2, ...,
initial_jitter = initial_jitter_factor * mean(diag(x))
Note: The scaling of initial_jitter with mean(diag(x)) is taken from GPy.
The rationale is that the largest eigenvalue of x is >= mean(diag(x)), and
likely of this magnitude.
There is no guarantee that the Cholesky factor returned is well-conditioned
enough for subsequent computations to be reliable. A better solution
would be to estimate the condition number of the Cholesky factor, and to add
jitter until this is bounded below a threshold we tolerate. See
<NAME>.
A Survey of Condition Number Estimation for Triangular Matrices
MIMS EPrint: 2007.10
Algorithm 4.1 could work for us.
"""
def __init__(
self, initial_jitter_factor, jitter_growth, debug_log, **kwargs):
super(AddJitterOp, self).__init__(**kwargs)
assert initial_jitter_factor > 0. and jitter_growth > 1.
self._initial_jitter_factor = initial_jitter_factor
self._jitter_growth = jitter_growth
self._debug_log = debug_log
def _get_constant_identity(self, x, constant):
n, _ = x.shape
return mx.nd.diag(
mx.nd.ones(shape=(n,), ctx=x.context, dtype=x.dtype) * constant)
def _get_jitter_upperbound(self, x):
# To define a safeguard in the while-loop of the forward,
# we define an upperbound on the jitter we can reasonably add
# the bound is quite generous, and is dependent on the scale of the input x
# (the scale is captured via the trace of x)
# the primary goal is avoid any infinite while-loop.
return JITTER_UPPERBOUND_FACTOR * max(
1., mx.nd.mean(mx.nd.diag(x)).asscalar())
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0]
sigsq_init = in_data[1]
jitter = 0.
jitter_upperbound = self._get_jitter_upperbound(x)
must_increase_jitter = True
x_plus_constant = None
while must_increase_jitter and jitter <= jitter_upperbound:
try:
x_plus_constant = x + self._get_constant_identity(
x, sigsq_init + jitter)
L = mx.nd.linalg.potrf(x_plus_constant)
# because of the implicit asynchronous processing in MXNet,
# we need to enforce the computation of L to happen right here
L.wait_to_read()
must_increase_jitter = False
except mx.base.MXNetError:
if self._debug_log == 'true':
logger.info("sigsq = {} does not work".format(
sigsq_init.asscalar() + jitter))
if jitter == 0.0:
jitter = self._initial_jitter_factor * mx.nd.mean(
mx.nd.diag(x)).asscalar()
else:
jitter *= self._jitter_growth
assert not must_increase_jitter,\
"The jitter ({}) has reached its upperbound ({}) while the Cholesky of the input matrix still cannot be computed.".format(jitter, jitter_upperbound)
if self._debug_log == 'true':
_sigsq_init = sigsq_init.asscalar()
logger.info("sigsq_final = {}".format(_sigsq_init + jitter))
self.assign(out_data[0], req[0], x_plus_constant)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], out_grad[0])
trace_out_grad = mx.nd.sum(mx.nd.diag(out_grad[0]))
self.assign(in_grad[1], req[1], trace_out_grad)
@mx.operator.register("add_jitter")
class AddJitterOpProp(mx.operator.CustomOpProp):
def __init__(
self, initial_jitter_factor=INITIAL_JITTER_FACTOR,
jitter_growth=JITTER_GROWTH, debug_log='false'):
super(AddJitterOpProp, self).__init__(need_top_grad=True)
# We need to cast the arguments
# see detailed example https://github.com/Xilinx/mxnet/blob/master/docs/tutorials/gluon/customop.md
self._initial_jitter_factor = float(initial_jitter_factor)
self._jitter_growth = float(jitter_growth)
self._debug_log = debug_log
def list_arguments(self):
return ['x', 'sigsq_init']
def list_outputs(self):
return ['x_plus_sigsq_final']
def infer_shape(self, in_shape):
x_shape = in_shape[0]
assert len(x_shape) == 2 and x_shape[0] == x_shape[1], \
"x must be square matrix, shape (n, n)"
ssq_shape = in_shape[1]
assert len(ssq_shape) == 1 and ssq_shape[0] == 1, \
"sigsq_init must be scalar, shape (1,)"
return in_shape, [x_shape], []
def create_operator(self, ctx, shapes, dtypes, **kwargs):
return AddJitterOp(
initial_jitter_factor=self._initial_jitter_factor,
jitter_growth=self._jitter_growth,
debug_log=self._debug_log, **kwargs)
| import mxnet as mx
import logging
logger = logging.getLogger(__name__)
__all__ = ['AddJitterOp', 'AddJitterOpProp']
INITIAL_JITTER_FACTOR = 1e-9
JITTER_GROWTH = 10.
JITTER_UPPERBOUND_FACTOR = 1e3
class AddJitterOp(mx.operator.CustomOp):
"""
Finds smaller jitter to add to diagonal of square matrix to render the
matrix positive definite (in that linalg.potrf works).
Given input x (positive semi-definite matrix) and sigsq_init (nonneg
scalar), find sigsq_final (nonneg scalar), so that:
sigsq_final = sigsq_init + jitter, jitter >= 0,
x + sigsq_final * Id positive definite (so that potrf call works)
We return the matrix x + sigsq_final * Id, for which potrf has not failed.
For the gradient, the dependence of jitter on the inputs is ignored.
The values tried for sigsq_final are:
sigsq_init, sigsq_init + initial_jitter * (jitter_growth ** k),
k = 0, 1, 2, ...,
initial_jitter = initial_jitter_factor * mean(diag(x))
Note: The scaling of initial_jitter with mean(diag(x)) is taken from GPy.
The rationale is that the largest eigenvalue of x is >= mean(diag(x)), and
likely of this magnitude.
There is no guarantee that the Cholesky factor returned is well-conditioned
enough for subsequent computations to be reliable. A better solution
would be to estimate the condition number of the Cholesky factor, and to add
jitter until this is bounded below a threshold we tolerate. See
<NAME>.
A Survey of Condition Number Estimation for Triangular Matrices
MIMS EPrint: 2007.10
Algorithm 4.1 could work for us.
"""
def __init__(
self, initial_jitter_factor, jitter_growth, debug_log, **kwargs):
super(AddJitterOp, self).__init__(**kwargs)
assert initial_jitter_factor > 0. and jitter_growth > 1.
self._initial_jitter_factor = initial_jitter_factor
self._jitter_growth = jitter_growth
self._debug_log = debug_log
def _get_constant_identity(self, x, constant):
n, _ = x.shape
return mx.nd.diag(
mx.nd.ones(shape=(n,), ctx=x.context, dtype=x.dtype) * constant)
def _get_jitter_upperbound(self, x):
# To define a safeguard in the while-loop of the forward,
# we define an upperbound on the jitter we can reasonably add
# the bound is quite generous, and is dependent on the scale of the input x
# (the scale is captured via the trace of x)
# the primary goal is avoid any infinite while-loop.
return JITTER_UPPERBOUND_FACTOR * max(
1., mx.nd.mean(mx.nd.diag(x)).asscalar())
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0]
sigsq_init = in_data[1]
jitter = 0.
jitter_upperbound = self._get_jitter_upperbound(x)
must_increase_jitter = True
x_plus_constant = None
while must_increase_jitter and jitter <= jitter_upperbound:
try:
x_plus_constant = x + self._get_constant_identity(
x, sigsq_init + jitter)
L = mx.nd.linalg.potrf(x_plus_constant)
# because of the implicit asynchronous processing in MXNet,
# we need to enforce the computation of L to happen right here
L.wait_to_read()
must_increase_jitter = False
except mx.base.MXNetError:
if self._debug_log == 'true':
logger.info("sigsq = {} does not work".format(
sigsq_init.asscalar() + jitter))
if jitter == 0.0:
jitter = self._initial_jitter_factor * mx.nd.mean(
mx.nd.diag(x)).asscalar()
else:
jitter *= self._jitter_growth
assert not must_increase_jitter,\
"The jitter ({}) has reached its upperbound ({}) while the Cholesky of the input matrix still cannot be computed.".format(jitter, jitter_upperbound)
if self._debug_log == 'true':
_sigsq_init = sigsq_init.asscalar()
logger.info("sigsq_final = {}".format(_sigsq_init + jitter))
self.assign(out_data[0], req[0], x_plus_constant)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], out_grad[0])
trace_out_grad = mx.nd.sum(mx.nd.diag(out_grad[0]))
self.assign(in_grad[1], req[1], trace_out_grad)
@mx.operator.register("add_jitter")
class AddJitterOpProp(mx.operator.CustomOpProp):
def __init__(
self, initial_jitter_factor=INITIAL_JITTER_FACTOR,
jitter_growth=JITTER_GROWTH, debug_log='false'):
super(AddJitterOpProp, self).__init__(need_top_grad=True)
# We need to cast the arguments
# see detailed example https://github.com/Xilinx/mxnet/blob/master/docs/tutorials/gluon/customop.md
self._initial_jitter_factor = float(initial_jitter_factor)
self._jitter_growth = float(jitter_growth)
self._debug_log = debug_log
def list_arguments(self):
return ['x', 'sigsq_init']
def list_outputs(self):
return ['x_plus_sigsq_final']
def infer_shape(self, in_shape):
x_shape = in_shape[0]
assert len(x_shape) == 2 and x_shape[0] == x_shape[1], \
"x must be square matrix, shape (n, n)"
ssq_shape = in_shape[1]
assert len(ssq_shape) == 1 and ssq_shape[0] == 1, \
"sigsq_init must be scalar, shape (1,)"
return in_shape, [x_shape], []
def create_operator(self, ctx, shapes, dtypes, **kwargs):
return AddJitterOp(
initial_jitter_factor=self._initial_jitter_factor,
jitter_growth=self._jitter_growth,
debug_log=self._debug_log, **kwargs)
| en | 0.835227 | Finds smaller jitter to add to diagonal of square matrix to render the matrix positive definite (in that linalg.potrf works). Given input x (positive semi-definite matrix) and sigsq_init (nonneg scalar), find sigsq_final (nonneg scalar), so that: sigsq_final = sigsq_init + jitter, jitter >= 0, x + sigsq_final * Id positive definite (so that potrf call works) We return the matrix x + sigsq_final * Id, for which potrf has not failed. For the gradient, the dependence of jitter on the inputs is ignored. The values tried for sigsq_final are: sigsq_init, sigsq_init + initial_jitter * (jitter_growth ** k), k = 0, 1, 2, ..., initial_jitter = initial_jitter_factor * mean(diag(x)) Note: The scaling of initial_jitter with mean(diag(x)) is taken from GPy. The rationale is that the largest eigenvalue of x is >= mean(diag(x)), and likely of this magnitude. There is no guarantee that the Cholesky factor returned is well-conditioned enough for subsequent computations to be reliable. A better solution would be to estimate the condition number of the Cholesky factor, and to add jitter until this is bounded below a threshold we tolerate. See <NAME>. A Survey of Condition Number Estimation for Triangular Matrices MIMS EPrint: 2007.10 Algorithm 4.1 could work for us. # To define a safeguard in the while-loop of the forward, # we define an upperbound on the jitter we can reasonably add # the bound is quite generous, and is dependent on the scale of the input x # (the scale is captured via the trace of x) # the primary goal is avoid any infinite while-loop. # because of the implicit asynchronous processing in MXNet, # we need to enforce the computation of L to happen right here # We need to cast the arguments # see detailed example https://github.com/Xilinx/mxnet/blob/master/docs/tutorials/gluon/customop.md | 2.20172 | 2 |
mylib/ext/splinter.py | fakegit/mo-han-toolbox | 24 | 6614420 | <gh_stars>10-100
#!/usr/bin/env python3
from splinter import *
from mylib.easy import *
from mylib.ext import http_headers
def __ref_sth():
return Browser
class BrowserWrapper:
def __init__(self, splinter_browser):
self._browser = splinter_browser
self.visit = self.b.visit
self.quit = self.b.quit
@property
def browser(self):
return self._browser
@property
def b(self):
return self._browser
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.browser.quit()
def add_cookies(self, cookies, domain=None, reload=False):
self.browser.cookies.add(cookies)
for cookie in self.list_cookies():
cookie['domain'] = domain
self.add_1cookie_dict(cookie)
if reload:
self.browser.reload()
def add_cookies_from(self, cookies_source, domain=None):
self.add_cookies(http_headers.get_cookies_dict_from(cookies_source), domain=domain)
def add_1cookie_dict(self, single_cookie_dict):
self.browser.driver.add_cookie(single_cookie_dict)
def get_cookies(self):
return self.browser.cookies.all()
def list_cookies(self):
return self.browser.driver.get_cookies()
@functools.lru_cache()
def _method_to_search_element_in_browser(self, prefix, by):
return getattr(self.browser, f'{prefix}_by_{by}')
def find(self, wait_time=None, **kw_by_what):
by, what = kw_by_what.popitem()
return self._method_to_search_element_in_browser('find', by)(what, wait_time=wait_time)
def exist(self, wait_time=None, **kw_by_what):
by, what = kw_by_what.popitem()
return self._method_to_search_element_in_browser('is_element_present', by)(what, wait_time=wait_time)
def not_exist(self, wait_time=None, **kw_by_what):
by, what = kw_by_what.popitem()
return self._method_to_search_element_in_browser('is_element_not_present', by)(what, wait_time=wait_time)
def make_proxy_settings(address, as_kwargs=False):
if isinstance(address, str):
pr = http_headers.ez_parse_netloc(address)
host = pr.hostname
port = pr.port
elif isinstance(address, T.Iterable):
host, port = address
port = int(port)
else:
raise TypeError('address', (str, T.Iterable))
profile_preferences = {
'network.proxy.type': 1,
'network.proxy.http': host,
'network.proxy.http_port': port,
'network.proxy.ssl': host,
'network.proxy.ssl_port': port,
'network.proxy.socks': host,
'network.proxy.socks_port': port,
'network.proxy.ftp': host,
'network.proxy.ftp_port': port
}
return dict(profile_preferences=profile_preferences) if as_kwargs else profile_preferences
| #!/usr/bin/env python3
from splinter import *
from mylib.easy import *
from mylib.ext import http_headers
def __ref_sth():
return Browser
class BrowserWrapper:
def __init__(self, splinter_browser):
self._browser = splinter_browser
self.visit = self.b.visit
self.quit = self.b.quit
@property
def browser(self):
return self._browser
@property
def b(self):
return self._browser
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.browser.quit()
def add_cookies(self, cookies, domain=None, reload=False):
self.browser.cookies.add(cookies)
for cookie in self.list_cookies():
cookie['domain'] = domain
self.add_1cookie_dict(cookie)
if reload:
self.browser.reload()
def add_cookies_from(self, cookies_source, domain=None):
self.add_cookies(http_headers.get_cookies_dict_from(cookies_source), domain=domain)
def add_1cookie_dict(self, single_cookie_dict):
self.browser.driver.add_cookie(single_cookie_dict)
def get_cookies(self):
return self.browser.cookies.all()
def list_cookies(self):
return self.browser.driver.get_cookies()
@functools.lru_cache()
def _method_to_search_element_in_browser(self, prefix, by):
return getattr(self.browser, f'{prefix}_by_{by}')
def find(self, wait_time=None, **kw_by_what):
by, what = kw_by_what.popitem()
return self._method_to_search_element_in_browser('find', by)(what, wait_time=wait_time)
def exist(self, wait_time=None, **kw_by_what):
by, what = kw_by_what.popitem()
return self._method_to_search_element_in_browser('is_element_present', by)(what, wait_time=wait_time)
def not_exist(self, wait_time=None, **kw_by_what):
by, what = kw_by_what.popitem()
return self._method_to_search_element_in_browser('is_element_not_present', by)(what, wait_time=wait_time)
def make_proxy_settings(address, as_kwargs=False):
if isinstance(address, str):
pr = http_headers.ez_parse_netloc(address)
host = pr.hostname
port = pr.port
elif isinstance(address, T.Iterable):
host, port = address
port = int(port)
else:
raise TypeError('address', (str, T.Iterable))
profile_preferences = {
'network.proxy.type': 1,
'network.proxy.http': host,
'network.proxy.http_port': port,
'network.proxy.ssl': host,
'network.proxy.ssl_port': port,
'network.proxy.socks': host,
'network.proxy.socks_port': port,
'network.proxy.ftp': host,
'network.proxy.ftp_port': port
}
return dict(profile_preferences=profile_preferences) if as_kwargs else profile_preferences | fr | 0.221828 | #!/usr/bin/env python3 | 2.380914 | 2 |
tests/test_file_walker.py | ktonal/h5mapper | 4 | 6614421 | <filename>tests/test_file_walker.py
import pytest
import re
import os
from h5mapper.file_walker import FileWalker
def test_find_matches(tmp_path):
(tmp_path / "sub").mkdir()
files = [
str(tmp_path / d / f)
for d in ["", "sub"]
for f in ["a.ext", "a.null", "b.ext", "b.null",
".match-but-hidden.ext"]
]
for f in files:
open(f, "w").write(" ")
assert os.path.isfile(f)
# finds single file
fw = FileWalker(r"ext$", str(tmp_path / "a.ext"))
found = list(fw)
assert len(found) == 1
assert all(re.search(r"ext$", f) for f in found)
# finds files
fw = FileWalker(r"ext$", str(tmp_path))
found = list(fw)
assert len(found) == 4
assert all(re.search(r"ext$", f) for f in found)
assert len([f for f in found if "sub" in f]) == 2
# finds directories
fw = FileWalker(r"sub", str(tmp_path))
found = list(fw)
assert len(found) == 4
assert all(re.search(r"sub", f) for f in found)
assert len([f for f in found if ".ext" in f]) == 2
# finds both
fw = FileWalker(r"ext$", [str(tmp_path / "sub"), str(tmp_path / "a.ext"), str(tmp_path / "b.ext")])
found = list(fw)
assert len(found) == 4
assert all(re.search(r"ext$", f) for f in found)
assert len([f for f in found if "sub" in f]) == 2
# test multiple iter()
assert list(fw) == found
def test_raises_on_filenotfound(tmp_path):
(tmp_path / "sub").mkdir()
files = [
str(tmp_path / d / f)
for d in ["", "sub"]
for f in ["a.ext", "a.null", "b.ext", "b.null"]
]
for f in files:
open(f, "w").write(" ")
assert os.path.isfile(f)
faulty_path = str(tmp_path / "subext")
fw = FileWalker(r"ext", faulty_path)
assert not os.path.exists(faulty_path)
with pytest.raises(FileNotFoundError):
found = list(fw)
# also in lists
fw = FileWalker(r"ext", [faulty_path])
assert not os.path.exists(faulty_path)
with pytest.raises(FileNotFoundError):
found = list(fw) | <filename>tests/test_file_walker.py
import pytest
import re
import os
from h5mapper.file_walker import FileWalker
def test_find_matches(tmp_path):
(tmp_path / "sub").mkdir()
files = [
str(tmp_path / d / f)
for d in ["", "sub"]
for f in ["a.ext", "a.null", "b.ext", "b.null",
".match-but-hidden.ext"]
]
for f in files:
open(f, "w").write(" ")
assert os.path.isfile(f)
# finds single file
fw = FileWalker(r"ext$", str(tmp_path / "a.ext"))
found = list(fw)
assert len(found) == 1
assert all(re.search(r"ext$", f) for f in found)
# finds files
fw = FileWalker(r"ext$", str(tmp_path))
found = list(fw)
assert len(found) == 4
assert all(re.search(r"ext$", f) for f in found)
assert len([f for f in found if "sub" in f]) == 2
# finds directories
fw = FileWalker(r"sub", str(tmp_path))
found = list(fw)
assert len(found) == 4
assert all(re.search(r"sub", f) for f in found)
assert len([f for f in found if ".ext" in f]) == 2
# finds both
fw = FileWalker(r"ext$", [str(tmp_path / "sub"), str(tmp_path / "a.ext"), str(tmp_path / "b.ext")])
found = list(fw)
assert len(found) == 4
assert all(re.search(r"ext$", f) for f in found)
assert len([f for f in found if "sub" in f]) == 2
# test multiple iter()
assert list(fw) == found
def test_raises_on_filenotfound(tmp_path):
(tmp_path / "sub").mkdir()
files = [
str(tmp_path / d / f)
for d in ["", "sub"]
for f in ["a.ext", "a.null", "b.ext", "b.null"]
]
for f in files:
open(f, "w").write(" ")
assert os.path.isfile(f)
faulty_path = str(tmp_path / "subext")
fw = FileWalker(r"ext", faulty_path)
assert not os.path.exists(faulty_path)
with pytest.raises(FileNotFoundError):
found = list(fw)
# also in lists
fw = FileWalker(r"ext", [faulty_path])
assert not os.path.exists(faulty_path)
with pytest.raises(FileNotFoundError):
found = list(fw) | en | 0.862589 | # finds single file # finds files # finds directories # finds both # test multiple iter() # also in lists | 2.560715 | 3 |
entity_event_slack/constants.py | ambitioninc/django-entity-event-slack | 3 | 6614422 | <gh_stars>1-10
SLACK_MEDIUM_NAME = 'slack'
| SLACK_MEDIUM_NAME = 'slack' | none | 1 | 1.070799 | 1 | |
ropywiki.py | gsuberland/rowiki | 0 | 6614423 | <filename>ropywiki.py
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.request import url2pathname
import cgi
import mimetypes
import markdown
from markdown.extensions.toc import TocExtension
'''
ROPy Wiki
Written by <NAME> (gsuberland)
Inspired by sqshr's mikiwiki - https://github.com/sqshr/mikiwiki
'''
sitedir = ".\\wiki"
page_template_path = ".\\pagetemplate.html"
combined_css_path = ".\\combined.css"
def readfile(filename):
f = open(filename, 'r')
contents = f.read()
f.close()
return contents
class RequestHandler(BaseHTTPRequestHandler):
def handle_css(self):
# when we request a CSS file, always return the combined CSS file
self.send_response(200)
self.send_header('Content-Type', 'text/css; charset=UTF-8')
self.send_header('Cache-Control', 'public, max-age=0')
self.end_headers()
response = readfile(combined_css_path)
self.wfile.write(response.encode('utf-8'))
def handle_404(self):
self.send_response(404)
self.send_header('Content-Type', 'text/css; charset=UTF-8')
self.end_headers()
self.wfile.write('404'.encode('utf-8'))
def handle_image(self, path, type):
self.send_header('Content-Type', type)
self.end_headers()
# read the binary image file
f = open(path, 'rb')
image_data = f.read()
f.close()
# write it to the client
self.wfile.write(image_data)
def handle_page_render(self, content, title):
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.end_headers()
# create a markdown engine with the table of contents extension enabled
md = markdown.Markdown(extensions=[TocExtension(title=title)])
template = readfile(page_template_path) # html template for the page
body = md.convert(content) # rendered HTML of the markdown
toc = md.toc # table of contents HTML
# place the above vars into the template data
response = template.replace('%%TITLE%%', title).replace('%%BODY%%', body).replace('%%TOC%%', toc)
# write the response out to the client
self.wfile.write(response.encode('utf-8'))
def handle_page(self, path, title):
# get markdown content
content = readfile(path)
# render the markdown result
self.handle_page_render(content, title)
def handle_dir(self, path):
# directory listing. first enumerate the dir...
ls = sorted(os.listdir(path), key=str.lower)
directories = []
files = []
for item in ls:
item_path = os.path.join(path, item)
if os.path.isdir(item_path):
directories.append((item_path, item))
elif os.path.isfile(item_path):
files.append((item_path, item))
# now generate markdown for the directory
content = "## Directories\n\n"
for directory_path, directory in directories:
content += "* [" + directory + "](" + os.path.relpath(directory_path, sitedir) + ")\n"
content += "## Files\n\n"
for file_path, file in files:
content += "* [" + file + "](" + os.path.relpath(file_path, sitedir) + ")\n"
# render the markdown result
title = os.path.relpath(path, sitedir)
if title == '.':
title = "/"
else:
title = "/" + title
self.handle_page_render(content, title)
def quote_line(self, line, term):
index_l = line.lower().index(term.lower())
return line[:index_l] + "**" + line[index_l:index_l + len(term)] + "**" + line[index_l + len(term):]
def handle_search(self):
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']})
term = ""
if 'term' in form:
term = form['term'].value
term = str.lower(term)
results_dirname = []
results_filename = []
results_contents = []
if term != "":
for root, dirs, files in os.walk(os.path.abspath(sitedir)):
for name in dirs:
if term in str.lower(name):
results_dirname.append(os.path.relpath(os.path.join(root, name), os.path.abspath(sitedir)))
for name in files:
if term in str.lower(name):
results_filename.append(os.path.relpath(os.path.join(root, name), os.path.abspath(sitedir)))
for name in files:
line_number = 0
filepath = os.path.join(root, name)
guessed_type, guessed_encoding = mimetypes.guess_type(filepath)
if guessed_type is None or ('image' not in guessed_type and 'binary' not in guessed_type):
try:
for line in open(filepath):
if term in str.lower(line):
results_contents.append((name, line_number, line))
line_number += 1
except:
print("Search couldn't read " + filepath)
content = "# Search results for " + term + "\n"
if len(results_dirname) == 0 and len(results_filename) == 0 and len(results_contents) == 0:
content += "No results found."
else:
if len(results_dirname) > 0:
content += "## Directory names\n"
for dirname in results_dirname:
content += "* [" + dirname + "](" + dirname + ")\n"
if len(results_filename) > 0:
content += "## File names\n"
for filename in results_filename:
content += "* [" + filename + "](" + filename + ")\n"
if len(results_contents) > 0:
content += "## File contents\n"
for filename, line_number, line in results_contents:
content += "[" + filename + "](" + filename + ") line " + str(line_number) + ":\n\n"
content += " > " + self.quote_line(line, term) + "\n\n"
self.handle_page_render(content, term)
def do_GET(self):
local_file_path = sitedir+url2pathname(self.path)
if local_file_path.endswith('.css'):
self.handle_css()
else:
if not os.path.exists(local_file_path):
self.handle_404()
else:
self.send_response(200)
if os.path.isfile(local_file_path):
# first try to guess what mime type the target file is
guessed_type, guessed_encoding = mimetypes.guess_type(local_file_path)
if guessed_type is not None and 'image' in guessed_type:
# if we guessed a type and that type is an image, return it as a binary file
self.handle_image(local_file_path, guessed_type)
else:
# we didn't detect the target file type as an image, so just assume this is markdown
page_title = os.path.splitext(os.path.basename(url2pathname(self.path)))[0]
self.handle_page(local_file_path, page_title)
else:
self.handle_dir(local_file_path)
def do_POST(self):
if self.path == '/search':
self.send_response(200)
self.handle_search()
else:
self.handle_404()
def run():
serveraddr = ('localhost', 8080)
server = HTTPServer(serveraddr, RequestHandler)
print("ROPyWiki started")
server.serve_forever()
if __name__ == "__main__":
run()
| <filename>ropywiki.py
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.request import url2pathname
import cgi
import mimetypes
import markdown
from markdown.extensions.toc import TocExtension
'''
ROPy Wiki
Written by <NAME> (gsuberland)
Inspired by sqshr's mikiwiki - https://github.com/sqshr/mikiwiki
'''
sitedir = ".\\wiki"
page_template_path = ".\\pagetemplate.html"
combined_css_path = ".\\combined.css"
def readfile(filename):
f = open(filename, 'r')
contents = f.read()
f.close()
return contents
class RequestHandler(BaseHTTPRequestHandler):
def handle_css(self):
# when we request a CSS file, always return the combined CSS file
self.send_response(200)
self.send_header('Content-Type', 'text/css; charset=UTF-8')
self.send_header('Cache-Control', 'public, max-age=0')
self.end_headers()
response = readfile(combined_css_path)
self.wfile.write(response.encode('utf-8'))
def handle_404(self):
self.send_response(404)
self.send_header('Content-Type', 'text/css; charset=UTF-8')
self.end_headers()
self.wfile.write('404'.encode('utf-8'))
def handle_image(self, path, type):
self.send_header('Content-Type', type)
self.end_headers()
# read the binary image file
f = open(path, 'rb')
image_data = f.read()
f.close()
# write it to the client
self.wfile.write(image_data)
def handle_page_render(self, content, title):
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.end_headers()
# create a markdown engine with the table of contents extension enabled
md = markdown.Markdown(extensions=[TocExtension(title=title)])
template = readfile(page_template_path) # html template for the page
body = md.convert(content) # rendered HTML of the markdown
toc = md.toc # table of contents HTML
# place the above vars into the template data
response = template.replace('%%TITLE%%', title).replace('%%BODY%%', body).replace('%%TOC%%', toc)
# write the response out to the client
self.wfile.write(response.encode('utf-8'))
def handle_page(self, path, title):
# get markdown content
content = readfile(path)
# render the markdown result
self.handle_page_render(content, title)
def handle_dir(self, path):
# directory listing. first enumerate the dir...
ls = sorted(os.listdir(path), key=str.lower)
directories = []
files = []
for item in ls:
item_path = os.path.join(path, item)
if os.path.isdir(item_path):
directories.append((item_path, item))
elif os.path.isfile(item_path):
files.append((item_path, item))
# now generate markdown for the directory
content = "## Directories\n\n"
for directory_path, directory in directories:
content += "* [" + directory + "](" + os.path.relpath(directory_path, sitedir) + ")\n"
content += "## Files\n\n"
for file_path, file in files:
content += "* [" + file + "](" + os.path.relpath(file_path, sitedir) + ")\n"
# render the markdown result
title = os.path.relpath(path, sitedir)
if title == '.':
title = "/"
else:
title = "/" + title
self.handle_page_render(content, title)
def quote_line(self, line, term):
index_l = line.lower().index(term.lower())
return line[:index_l] + "**" + line[index_l:index_l + len(term)] + "**" + line[index_l + len(term):]
def handle_search(self):
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']})
term = ""
if 'term' in form:
term = form['term'].value
term = str.lower(term)
results_dirname = []
results_filename = []
results_contents = []
if term != "":
for root, dirs, files in os.walk(os.path.abspath(sitedir)):
for name in dirs:
if term in str.lower(name):
results_dirname.append(os.path.relpath(os.path.join(root, name), os.path.abspath(sitedir)))
for name in files:
if term in str.lower(name):
results_filename.append(os.path.relpath(os.path.join(root, name), os.path.abspath(sitedir)))
for name in files:
line_number = 0
filepath = os.path.join(root, name)
guessed_type, guessed_encoding = mimetypes.guess_type(filepath)
if guessed_type is None or ('image' not in guessed_type and 'binary' not in guessed_type):
try:
for line in open(filepath):
if term in str.lower(line):
results_contents.append((name, line_number, line))
line_number += 1
except:
print("Search couldn't read " + filepath)
content = "# Search results for " + term + "\n"
if len(results_dirname) == 0 and len(results_filename) == 0 and len(results_contents) == 0:
content += "No results found."
else:
if len(results_dirname) > 0:
content += "## Directory names\n"
for dirname in results_dirname:
content += "* [" + dirname + "](" + dirname + ")\n"
if len(results_filename) > 0:
content += "## File names\n"
for filename in results_filename:
content += "* [" + filename + "](" + filename + ")\n"
if len(results_contents) > 0:
content += "## File contents\n"
for filename, line_number, line in results_contents:
content += "[" + filename + "](" + filename + ") line " + str(line_number) + ":\n\n"
content += " > " + self.quote_line(line, term) + "\n\n"
self.handle_page_render(content, term)
def do_GET(self):
local_file_path = sitedir+url2pathname(self.path)
if local_file_path.endswith('.css'):
self.handle_css()
else:
if not os.path.exists(local_file_path):
self.handle_404()
else:
self.send_response(200)
if os.path.isfile(local_file_path):
# first try to guess what mime type the target file is
guessed_type, guessed_encoding = mimetypes.guess_type(local_file_path)
if guessed_type is not None and 'image' in guessed_type:
# if we guessed a type and that type is an image, return it as a binary file
self.handle_image(local_file_path, guessed_type)
else:
# we didn't detect the target file type as an image, so just assume this is markdown
page_title = os.path.splitext(os.path.basename(url2pathname(self.path)))[0]
self.handle_page(local_file_path, page_title)
else:
self.handle_dir(local_file_path)
def do_POST(self):
if self.path == '/search':
self.send_response(200)
self.handle_search()
else:
self.handle_404()
def run():
serveraddr = ('localhost', 8080)
server = HTTPServer(serveraddr, RequestHandler)
print("ROPyWiki started")
server.serve_forever()
if __name__ == "__main__":
run()
| en | 0.790361 | ROPy Wiki Written by <NAME> (gsuberland) Inspired by sqshr's mikiwiki - https://github.com/sqshr/mikiwiki # when we request a CSS file, always return the combined CSS file # read the binary image file # write it to the client # create a markdown engine with the table of contents extension enabled # html template for the page # rendered HTML of the markdown # table of contents HTML # place the above vars into the template data # write the response out to the client # get markdown content # render the markdown result # directory listing. first enumerate the dir... # now generate markdown for the directory # Directories\n\n" # Files\n\n" # render the markdown result # Directory names\n" # File names\n" # File contents\n" # first try to guess what mime type the target file is # if we guessed a type and that type is an image, return it as a binary file # we didn't detect the target file type as an image, so just assume this is markdown | 2.94843 | 3 |
aimysearch/search.py | egusahiroaki/aimysearch | 2 | 6614424 | # -*- coding: utf-8 -*-
# pylint: disable=C0321,C0111,C0103,R0903
from .util import n_gram
class AiMySearch():
class MatchRateError(Exception):
pass
class TargetTextError(Exception):
pass
def __init__(self, search_target_word, target_text, fuzziness=0, match_rate=0.6):
self.search_target_word = search_target_word
if not target_text:
raise self.TargetTextError('target_text should not be blank.')
self.target_text = target_text
self.fuzziness = fuzziness
if not 0 < match_rate < 1:
raise self.MatchRateError('match_rate should be between 0 and 1.')
self.threshold = match_rate * len(search_target_word)
def run(self):
candidates = []
for n_gram_num in list(range(
len(self.search_target_word) - self.fuzziness,
len(self.search_target_word) + self.fuzziness + 1)):
for target_elm in n_gram(self.target_text, n_gram_num):
count = 0
if target_elm['text'] == self.search_target_word:
continue
for t in target_elm['text']:
if t in self.search_target_word:
count += 1
if count >= self.threshold:
candidates.append(target_elm)
filtered = []
sorted_candidates = sorted(candidates, key=lambda x: x['index'])
for candidate in sorted_candidates:
if not filtered:
filtered.append(candidate)
if filtered:
last_elm = filtered[len(filtered)-1]
last = last_elm["index"] + last_elm["length"]
if candidate['index'] - last > 0:
filtered.append(candidate)
else:
l = candidate['index'] - last_elm['index']
last_elm['text'] = last_elm['text'][0:l] + \
candidate['text']
last_elm['length'] = len(last_elm['text'])
return list(filter(lambda x: self.search_target_word not in x['text'], filtered))
| # -*- coding: utf-8 -*-
# pylint: disable=C0321,C0111,C0103,R0903
from .util import n_gram
class AiMySearch():
class MatchRateError(Exception):
pass
class TargetTextError(Exception):
pass
def __init__(self, search_target_word, target_text, fuzziness=0, match_rate=0.6):
self.search_target_word = search_target_word
if not target_text:
raise self.TargetTextError('target_text should not be blank.')
self.target_text = target_text
self.fuzziness = fuzziness
if not 0 < match_rate < 1:
raise self.MatchRateError('match_rate should be between 0 and 1.')
self.threshold = match_rate * len(search_target_word)
def run(self):
candidates = []
for n_gram_num in list(range(
len(self.search_target_word) - self.fuzziness,
len(self.search_target_word) + self.fuzziness + 1)):
for target_elm in n_gram(self.target_text, n_gram_num):
count = 0
if target_elm['text'] == self.search_target_word:
continue
for t in target_elm['text']:
if t in self.search_target_word:
count += 1
if count >= self.threshold:
candidates.append(target_elm)
filtered = []
sorted_candidates = sorted(candidates, key=lambda x: x['index'])
for candidate in sorted_candidates:
if not filtered:
filtered.append(candidate)
if filtered:
last_elm = filtered[len(filtered)-1]
last = last_elm["index"] + last_elm["length"]
if candidate['index'] - last > 0:
filtered.append(candidate)
else:
l = candidate['index'] - last_elm['index']
last_elm['text'] = last_elm['text'][0:l] + \
candidate['text']
last_elm['length'] = len(last_elm['text'])
return list(filter(lambda x: self.search_target_word not in x['text'], filtered))
| en | 0.606534 | # -*- coding: utf-8 -*- # pylint: disable=C0321,C0111,C0103,R0903 | 2.827385 | 3 |
predicthq/endpoints/v1/broadcasts/schemas.py | predicthq/sdk-py | 33 | 6614425 | from predicthq.endpoints.schemas import (
PaginatedMixin,
SortableMixin,
BooleanType,
DateTimeRange,
DateTimeType,
FloatType,
IntRange,
IntType,
ListType,
Model,
ModelType,
ResultSet,
ResultType,
StringType,
)
class BroadcastEventParams(Model):
class Options:
serialize_when_none = False
event_id = ListType(StringType)
category = ListType(StringType)
label = ListType(StringType)
class BroadcastLocationParams(Model):
class Options:
serialize_when_none = False
origin = origin = StringType(regex=r"(-?\d+(\.\d+)?),(-?\d+(\.\d+)?)")
place_id = ListType(StringType)
class SearchParams(PaginatedMixin, SortableMixin, Model):
class Options:
serialize_when_none = False
broadcast_id = ListType(StringType)
location = ModelType(BroadcastLocationParams)
phq_viewership = ModelType(IntRange)
start = ModelType(DateTimeRange)
updated = ModelType(DateTimeRange)
first_seen = ModelType(DateTimeRange)
record_status = ListType(StringType(choices=("active", "deleted"), default="active"))
broadcast_status = ListType(StringType(choices=("scheduled", "predicted", "cancelled")))
event = ModelType(BroadcastEventParams)
class GeoPoint(Model):
lat = FloatType()
lon = FloatType()
class BroadcastEventEntities(Model):
class Options:
serialize_when_none = False
entity_id = StringType()
type = StringType()
name = StringType()
formatted_address = StringType()
class BroadcastEventLocation(Model):
class Options:
serialize_when_none = False
geopoint = ModelType(GeoPoint)
place_hierarchies = ListType(ListType(StringType))
country = StringType()
class BroadcastEventDates(Model):
class Options:
serialize_when_none = False
start = DateTimeType()
end = DateTimeType()
start_local = DateTimeType()
end_local = DateTimeType()
timezone = StringType()
# predicted_end_local is a paid feature.
# It will only show up in your response body if you
# have subscribed to it.
predicted_end_local = DateTimeType()
class BroadcastEvent(Model):
class Options:
serialize_when_none = False
event_id = StringType()
title = StringType()
category = StringType()
labels = ListType(StringType)
dates = ModelType(BroadcastEventDates)
location = ModelType(BroadcastEventLocation)
entities = ListType(ModelType(BroadcastEventEntities))
# The following fields are paid features.
# They will only show up in your response body if you
# have subscribed to them.
phq_attendance = IntType()
phq_rank = IntType()
local_rank = IntType()
aviation_rank = IntType()
class Place(Model):
class Options:
serialize_when_none = False
place_id = StringType()
type = StringType()
name = StringType()
county = StringType()
region = StringType()
country = StringType()
class BroadcastLocation(Model):
class Options:
serialize_when_none = False
geopoint = ModelType(GeoPoint)
place_hierarchies = ListType(ListType(StringType))
places = ListType(ModelType(Place))
country = StringType()
class BroadcastDates(Model):
class Options:
serialize_when_none = False
start = DateTimeType()
start_local = DateTimeType()
timezone = StringType()
class Broadcast(Model):
class Options:
serialize_when_none = False
broadcast_id = StringType()
updated = DateTimeType()
first_seen = DateTimeType()
dates = ModelType(BroadcastDates)
location = ModelType(BroadcastLocation)
phq_viewership = IntType()
record_status = StringType()
broadcast_status = StringType()
event = ModelType(BroadcastEvent)
class BroadcastResultSet(ResultSet):
overflow = BooleanType()
results = ResultType(Broadcast)
| from predicthq.endpoints.schemas import (
PaginatedMixin,
SortableMixin,
BooleanType,
DateTimeRange,
DateTimeType,
FloatType,
IntRange,
IntType,
ListType,
Model,
ModelType,
ResultSet,
ResultType,
StringType,
)
class BroadcastEventParams(Model):
class Options:
serialize_when_none = False
event_id = ListType(StringType)
category = ListType(StringType)
label = ListType(StringType)
class BroadcastLocationParams(Model):
class Options:
serialize_when_none = False
origin = origin = StringType(regex=r"(-?\d+(\.\d+)?),(-?\d+(\.\d+)?)")
place_id = ListType(StringType)
class SearchParams(PaginatedMixin, SortableMixin, Model):
class Options:
serialize_when_none = False
broadcast_id = ListType(StringType)
location = ModelType(BroadcastLocationParams)
phq_viewership = ModelType(IntRange)
start = ModelType(DateTimeRange)
updated = ModelType(DateTimeRange)
first_seen = ModelType(DateTimeRange)
record_status = ListType(StringType(choices=("active", "deleted"), default="active"))
broadcast_status = ListType(StringType(choices=("scheduled", "predicted", "cancelled")))
event = ModelType(BroadcastEventParams)
class GeoPoint(Model):
lat = FloatType()
lon = FloatType()
class BroadcastEventEntities(Model):
class Options:
serialize_when_none = False
entity_id = StringType()
type = StringType()
name = StringType()
formatted_address = StringType()
class BroadcastEventLocation(Model):
class Options:
serialize_when_none = False
geopoint = ModelType(GeoPoint)
place_hierarchies = ListType(ListType(StringType))
country = StringType()
class BroadcastEventDates(Model):
class Options:
serialize_when_none = False
start = DateTimeType()
end = DateTimeType()
start_local = DateTimeType()
end_local = DateTimeType()
timezone = StringType()
# predicted_end_local is a paid feature.
# It will only show up in your response body if you
# have subscribed to it.
predicted_end_local = DateTimeType()
class BroadcastEvent(Model):
class Options:
serialize_when_none = False
event_id = StringType()
title = StringType()
category = StringType()
labels = ListType(StringType)
dates = ModelType(BroadcastEventDates)
location = ModelType(BroadcastEventLocation)
entities = ListType(ModelType(BroadcastEventEntities))
# The following fields are paid features.
# They will only show up in your response body if you
# have subscribed to them.
phq_attendance = IntType()
phq_rank = IntType()
local_rank = IntType()
aviation_rank = IntType()
class Place(Model):
class Options:
serialize_when_none = False
place_id = StringType()
type = StringType()
name = StringType()
county = StringType()
region = StringType()
country = StringType()
class BroadcastLocation(Model):
class Options:
serialize_when_none = False
geopoint = ModelType(GeoPoint)
place_hierarchies = ListType(ListType(StringType))
places = ListType(ModelType(Place))
country = StringType()
class BroadcastDates(Model):
class Options:
serialize_when_none = False
start = DateTimeType()
start_local = DateTimeType()
timezone = StringType()
class Broadcast(Model):
class Options:
serialize_when_none = False
broadcast_id = StringType()
updated = DateTimeType()
first_seen = DateTimeType()
dates = ModelType(BroadcastDates)
location = ModelType(BroadcastLocation)
phq_viewership = IntType()
record_status = StringType()
broadcast_status = StringType()
event = ModelType(BroadcastEvent)
class BroadcastResultSet(ResultSet):
overflow = BooleanType()
results = ResultType(Broadcast)
| en | 0.968495 | # predicted_end_local is a paid feature. # It will only show up in your response body if you # have subscribed to it. # The following fields are paid features. # They will only show up in your response body if you # have subscribed to them. | 1.943035 | 2 |
tec/__init__.py | thorwhalen/tec | 1 | 6614426 | from tec.peek import print_source, print_signature
from tec.modules import (
loaded_module_from_dotpath_and_filepath,
second_party_names,
filepath_to_dotpath,
get_imported_module_paths,
ModulesReader,
ModuleAllAttrsReader,
ModuleAttrsReader,
)
from tec.pip_packaging import (
create_github_repo,
get_last_pypi_version_number,
format_str_vals_of_dict,
ujoin
)
from tec.packages import (
print_top_level_diagnosis
)
from tec.stores import (
file_contents_to_short_description,
find_short_description_for_pkg,
PkgReader,
PkgFilesReader, # TODO: Deprecate in favor of PyFilesReader
PyFilesReader,
builtins_py_files,
sitepackages_py_files,
py_files_with_contents_matching_pattern
)
from tec.import_counting import (
modules_imported,
modules_imported_count,
base_module_name
)
from tec.util import (
find,
extract_encoding_from_contents,
get_encoding,
decoding_problem_sentinel,
decode_or_default,
resolve_module_filepath,
resolve_to_folder,
resolve_module_contents,
)
| from tec.peek import print_source, print_signature
from tec.modules import (
loaded_module_from_dotpath_and_filepath,
second_party_names,
filepath_to_dotpath,
get_imported_module_paths,
ModulesReader,
ModuleAllAttrsReader,
ModuleAttrsReader,
)
from tec.pip_packaging import (
create_github_repo,
get_last_pypi_version_number,
format_str_vals_of_dict,
ujoin
)
from tec.packages import (
print_top_level_diagnosis
)
from tec.stores import (
file_contents_to_short_description,
find_short_description_for_pkg,
PkgReader,
PkgFilesReader, # TODO: Deprecate in favor of PyFilesReader
PyFilesReader,
builtins_py_files,
sitepackages_py_files,
py_files_with_contents_matching_pattern
)
from tec.import_counting import (
modules_imported,
modules_imported_count,
base_module_name
)
from tec.util import (
find,
extract_encoding_from_contents,
get_encoding,
decoding_problem_sentinel,
decode_or_default,
resolve_module_filepath,
resolve_to_folder,
resolve_module_contents,
)
| en | 0.658946 | # TODO: Deprecate in favor of PyFilesReader | 1.619595 | 2 |
deeppy/dataset/__init__.py | DmitryUlyanov/deeppy | 1 | 6614427 | <reponame>DmitryUlyanov/deeppy
from .infimnist import InfiMNIST
from .mnist import MNIST
from .mvsc import MVSC
from .cifar10 import CIFAR10
| from .infimnist import InfiMNIST
from .mnist import MNIST
from .mvsc import MVSC
from .cifar10 import CIFAR10 | none | 1 | 1.14747 | 1 | |
tltorch/tensor_hooks/_tensor_lasso.py | colehawkins/torch | 27 | 6614428 | import tensorly as tl
tl.set_backend('pytorch')
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from ..factorized_tensors import TuckerTensor, TTTensor, CPTensor
from ..utils import ParameterList
# Author: <NAME>
# License: BSD 3 clause
class TensorLasso:
"""Generalized Tensor Lasso on factorized tensors
Applies a generalized Lasso (l1 regularization) on a factorized tensor.
Parameters
----------
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
Examples
--------
First you need to create an instance of the regularizer:
>>> regularizer = tensor_lasso(factorization='cp')
You can apply the regularizer to one or several layers:
>>> trl = TRL((5, 5), (5, 5), rank='same')
>>> trl2 = TRL((5, 5), (2, ), rank='same')
>>> regularizer.apply(trl.weight)
>>> regularizer.apply(trl2.weight)
The lasso is automatically applied:
>>> x = trl(x)
>>> pred = trl2(x)
>>> loss = your_loss_function(pred)
Add the Lasso loss:
>>> loss = loss + regularizer.loss
You can now backpropagate through your loss as usual:
>>> loss.backwards()
After you finish updating the weights, don't forget to reset the regularizer,
otherwise it will keep accumulating values!
>>> loss.reset()
You can also remove the regularizer with `regularizer.remove(trl)`.
"""
_factorizations = dict()
def __init_subclass__(cls, factorization, **kwargs):
"""When a subclass is created, register it in _factorizations"""
cls._factorizations[factorization.__name__] = cls
def __init__(self, penalty=0.01, clamp_weights=True, threshold=1e-6, normalize_loss=True):
self.penalty = penalty
self.clamp_weights = clamp_weights
self.threshold = threshold
self.normalize_loss = normalize_loss
# Initialize the counters
self.reset()
def reset(self):
"""Reset the loss, should be called at the end of each iteration.
"""
self._loss = 0
self.n_element = 0
@property
def loss(self):
"""Returns the current Lasso (l1) loss for the layers that have been called so far.
Returns
-------
float
l1 regularization on the tensor layers the regularization has been applied to.
"""
if self.n_element == 0:
warnings.warn('The L1Regularization was not applied to any weights.')
return 0
elif self.normalize_loss:
return self.penalty*self._loss/self.n_element
else:
return self.penalty*self._loss
def __call__(self, module, input, tucker_tensor):
raise NotImplementedError
def apply_lasso(self, tucker_tensor, lasso_weights):
"""Applies the lasso to a decomposed tensor
"""
raise NotImplementedError
@classmethod
def from_factorization(cls, factorization, penalty=0.01, clamp_weights=True, threshold=1e-6, normalize_loss=True):
return cls.from_factorization_name(factorization.__class__.__name__, penalty=penalty,
clamp_weights=clamp_weights, threshold=threshold, normalize_loss=normalize_loss)
@classmethod
def from_factorization_name(cls, factorization_name, penalty=0.01, clamp_weights=True, threshold=1e-6, normalize_loss=True):
cls = cls._factorizations[factorization_name]
lasso = cls(penalty=penalty, clamp_weights=clamp_weights, threshold=threshold, normalize_loss=normalize_loss)
return lasso
def remove(self, module):
raise NotImplementedError
class CPLasso(TensorLasso, factorization=CPTensor):
"""Decomposition Hook for Tensor Lasso on CP tensors
Parameters
----------
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
"""
def __call__(self, module, input, cp_tensor):
"""CP already includes weights, we'll just take their l1 norm
"""
weights = getattr(module, 'lasso_weights')
with torch.no_grad():
if self.clamp_weights:
weights.data = torch.clamp(weights.data, -1, 1)
setattr(module, 'lasso_weights', weights)
if self.threshold:
weights.data = F.threshold(weights.data, threshold=self.threshold, value=0, inplace=True)
setattr(module, 'lasso_weights', weights)
self.n_element += weights.numel()
self._loss = self._loss + self.penalty*torch.norm(weights, 1)
return cp_tensor
def apply(self, module):
"""Apply an instance of the L1Regularizer to a tensor module
Parameters
----------
module : TensorModule
module on which to add the regularization
Returns
-------
TensorModule (with Regularization hook)
"""
context = tl.context(module.factors[0])
lasso_weights = nn.Parameter(torch.ones(module.rank, **context))
setattr(module, 'lasso_weights', lasso_weights)
module.register_forward_hook(self)
return module
def remove(self, module):
delattr(module, 'lasso_weights')
def set_weights(self, module, value):
with torch.no_grad():
module.lasso_weights.data.fill_(value)
class TuckerLasso(TensorLasso, factorization=TuckerTensor):
"""Decomposition Hook for Tensor Lasso on Tucker tensors
Applies a generalized Lasso (l1 regularization) on the tensor layers the regularization it is applied to.
Parameters
----------
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
"""
_log = []
def __call__(self, module, input, tucker_tensor):
lasso_weights = getattr(module, 'lasso_weights')
order = len(lasso_weights)
with torch.no_grad():
for i in range(order):
if self.clamp_weights:
lasso_weights[i].data = torch.clamp(lasso_weights[i].data, -1, 1)
if self.threshold:
lasso_weights[i] = F.threshold(lasso_weights[i], threshold=self.threshold, value=0, inplace=True)
setattr(module, 'lasso_weights', lasso_weights)
for weight in lasso_weights:
self.n_element += weight.numel()
self._loss = self._loss + torch.sum(torch.abs(weight))
return self.apply_lasso(tucker_tensor, lasso_weights)
def apply_lasso(self, tucker_tensor, lasso_weights):
"""Applies the lasso to a decomposed tensor
"""
factors = tucker_tensor.factors
factors = [factor*w for (factor, w) in zip(factors, lasso_weights)]
return TuckerTensor(tucker_tensor.core, factors)
def apply(self, module):
"""Apply an instance of the L1Regularizer to a tensor module
Parameters
----------
module : TensorModule
module on which to add the regularization
Returns
-------
TensorModule (with Regularization hook)
"""
rank = module.rank
context = tl.context(module.core)
lasso_weights = ParameterList([nn.Parameter(torch.ones(r, **context)) for r in rank])
setattr(module, 'lasso_weights', lasso_weights)
module.register_forward_hook(self)
return module
def remove(self, module):
delattr(module, 'lasso_weights')
def set_weights(self, module, value):
with torch.no_grad():
for weight in module.lasso_weights:
weight.data.fill_(value)
class TTLasso(TensorLasso, factorization=TTTensor):
"""Decomposition Hook for Tensor Lasso on TT tensors
Parameters
----------
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
"""
def __call__(self, module, input, tt_tensor):
lasso_weights = getattr(module, 'lasso_weights')
order = len(lasso_weights)
with torch.no_grad():
for i in range(order):
if self.clamp_weights:
lasso_weights[i].data = torch.clamp(lasso_weights[i].data, -1, 1)
if self.threshold:
lasso_weights[i] = F.threshold(lasso_weights[i], threshold=self.threshold, value=0, inplace=True)
setattr(module, 'lasso_weights', lasso_weights)
for weight in lasso_weights:
self.n_element += weight.numel()
self._loss = self._loss + torch.sum(torch.abs(weight))
return self.apply_lasso(tt_tensor, lasso_weights)
def apply_lasso(self, tt_tensor, lasso_weights):
"""Applies the lasso to a decomposed tensor
"""
factors = tt_tensor.factors
factors = [factor*w for (factor, w) in zip(factors, lasso_weights)] + [factors[-1]]
return TTTensor(factors)
def apply(self, module):
"""Apply an instance of the L1Regularizer to a tensor module
Parameters
----------
module : TensorModule
module on which to add the regularization
Returns
-------
TensorModule (with Regularization hook)
"""
rank = module.rank[1:-1]
lasso_weights = ParameterList([nn.Parameter(torch.ones(1, 1, r)) for r in rank])
setattr(module, 'lasso_weights', lasso_weights)
handle = module.register_forward_hook(self)
return module
def remove(self, module):
"""Remove the Regularization from a module.
"""
delattr(module, 'lasso_weights')
def set_weights(self, module, value):
with torch.no_grad():
for weight in module.lasso_weights:
weight.data.fill_(value)
def tensor_lasso(factorization='CP', penalty=0.01, clamp_weights=True, threshold=1e-6, normalize_loss=True):
"""Generalized Tensor Lasso from a factorized tensors
Applies a generalized Lasso (l1 regularization) on a factorized tensor.
Parameters
----------
factorization : str
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
Examples
--------
Let's say you have a set of factorized (here, CP) tensors:
>>> tensor = FactorizedTensor.new((3, 4, 2), rank='same', factorization='CP').normal_()
>>> tensor2 = FactorizedTensor.new((5, 6, 7), rank=0.5, factorization='CP').normal_()
First you need to create an instance of the regularizer:
>>> regularizer = TensorLasso(factorization='cp', penalty=penalty)
You can apply the regularizer to one or several layers:
>>> regularizer.apply(tensor)
>>> regularizer.apply(tensor2)
The lasso is automatically applied:
>>> sum = torch.sum(tensor() + tensor2())
You can access the Lasso loss from your instance:
>>> l1_loss = regularizer.loss
You can optimize and backpropagate through your loss as usual.
After you finish updating the weights, don't forget to reset the regularizer,
otherwise it will keep accumulating values!
>>> regularizer.reset()
You can also remove the regularizer with `regularizer.remove(tensor)`,
or `remove_tensor_lasso(tensor)`.
"""
factorization = factorization.lower()
mapping = dict(cp='CPTensor', tucker='TuckerTensor', tt='TTTensor')
return TensorLasso.from_factorization_name(mapping[factorization], penalty=penalty, clamp_weights=clamp_weights,
threshold=threshold, normalize_loss=normalize_loss)
def remove_tensor_lasso(factorized_tensor):
"""Removes the tensor lasso from a TensorModule
Parameters
----------
factorized_tensor : tltorch.FactorizedTensor
the tensor module parametrized by the tensor decomposition to which to apply tensor dropout
Examples
--------
>>> tensor = FactorizedTensor.new((3, 4, 2), rank=0.5, factorization='CP').normal_()
>>> tensor = tensor_lasso(tensor, p=0.5)
>>> remove_tensor_lasso(tensor)
"""
for key, hook in factorized_tensor._forward_hooks.items():
if isinstance(hook, TensorLasso):
hook.remove(factorized_tensor)
del factorized_tensor._forward_hooks[key]
return factorized_tensor
raise ValueError(f'TensorLasso not found in factorized tensor {factorized_tensor}')
| import tensorly as tl
tl.set_backend('pytorch')
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from ..factorized_tensors import TuckerTensor, TTTensor, CPTensor
from ..utils import ParameterList
# Author: <NAME>
# License: BSD 3 clause
class TensorLasso:
"""Generalized Tensor Lasso on factorized tensors
Applies a generalized Lasso (l1 regularization) on a factorized tensor.
Parameters
----------
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
Examples
--------
First you need to create an instance of the regularizer:
>>> regularizer = tensor_lasso(factorization='cp')
You can apply the regularizer to one or several layers:
>>> trl = TRL((5, 5), (5, 5), rank='same')
>>> trl2 = TRL((5, 5), (2, ), rank='same')
>>> regularizer.apply(trl.weight)
>>> regularizer.apply(trl2.weight)
The lasso is automatically applied:
>>> x = trl(x)
>>> pred = trl2(x)
>>> loss = your_loss_function(pred)
Add the Lasso loss:
>>> loss = loss + regularizer.loss
You can now backpropagate through your loss as usual:
>>> loss.backwards()
After you finish updating the weights, don't forget to reset the regularizer,
otherwise it will keep accumulating values!
>>> loss.reset()
You can also remove the regularizer with `regularizer.remove(trl)`.
"""
_factorizations = dict()
def __init_subclass__(cls, factorization, **kwargs):
"""When a subclass is created, register it in _factorizations"""
cls._factorizations[factorization.__name__] = cls
def __init__(self, penalty=0.01, clamp_weights=True, threshold=1e-6, normalize_loss=True):
self.penalty = penalty
self.clamp_weights = clamp_weights
self.threshold = threshold
self.normalize_loss = normalize_loss
# Initialize the counters
self.reset()
def reset(self):
"""Reset the loss, should be called at the end of each iteration.
"""
self._loss = 0
self.n_element = 0
@property
def loss(self):
"""Returns the current Lasso (l1) loss for the layers that have been called so far.
Returns
-------
float
l1 regularization on the tensor layers the regularization has been applied to.
"""
if self.n_element == 0:
warnings.warn('The L1Regularization was not applied to any weights.')
return 0
elif self.normalize_loss:
return self.penalty*self._loss/self.n_element
else:
return self.penalty*self._loss
def __call__(self, module, input, tucker_tensor):
raise NotImplementedError
def apply_lasso(self, tucker_tensor, lasso_weights):
"""Applies the lasso to a decomposed tensor
"""
raise NotImplementedError
@classmethod
def from_factorization(cls, factorization, penalty=0.01, clamp_weights=True, threshold=1e-6, normalize_loss=True):
return cls.from_factorization_name(factorization.__class__.__name__, penalty=penalty,
clamp_weights=clamp_weights, threshold=threshold, normalize_loss=normalize_loss)
@classmethod
def from_factorization_name(cls, factorization_name, penalty=0.01, clamp_weights=True, threshold=1e-6, normalize_loss=True):
cls = cls._factorizations[factorization_name]
lasso = cls(penalty=penalty, clamp_weights=clamp_weights, threshold=threshold, normalize_loss=normalize_loss)
return lasso
def remove(self, module):
raise NotImplementedError
class CPLasso(TensorLasso, factorization=CPTensor):
"""Decomposition Hook for Tensor Lasso on CP tensors
Parameters
----------
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
"""
def __call__(self, module, input, cp_tensor):
"""CP already includes weights, we'll just take their l1 norm
"""
weights = getattr(module, 'lasso_weights')
with torch.no_grad():
if self.clamp_weights:
weights.data = torch.clamp(weights.data, -1, 1)
setattr(module, 'lasso_weights', weights)
if self.threshold:
weights.data = F.threshold(weights.data, threshold=self.threshold, value=0, inplace=True)
setattr(module, 'lasso_weights', weights)
self.n_element += weights.numel()
self._loss = self._loss + self.penalty*torch.norm(weights, 1)
return cp_tensor
def apply(self, module):
"""Apply an instance of the L1Regularizer to a tensor module
Parameters
----------
module : TensorModule
module on which to add the regularization
Returns
-------
TensorModule (with Regularization hook)
"""
context = tl.context(module.factors[0])
lasso_weights = nn.Parameter(torch.ones(module.rank, **context))
setattr(module, 'lasso_weights', lasso_weights)
module.register_forward_hook(self)
return module
def remove(self, module):
delattr(module, 'lasso_weights')
def set_weights(self, module, value):
with torch.no_grad():
module.lasso_weights.data.fill_(value)
class TuckerLasso(TensorLasso, factorization=TuckerTensor):
"""Decomposition Hook for Tensor Lasso on Tucker tensors
Applies a generalized Lasso (l1 regularization) on the tensor layers the regularization it is applied to.
Parameters
----------
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
"""
_log = []
def __call__(self, module, input, tucker_tensor):
lasso_weights = getattr(module, 'lasso_weights')
order = len(lasso_weights)
with torch.no_grad():
for i in range(order):
if self.clamp_weights:
lasso_weights[i].data = torch.clamp(lasso_weights[i].data, -1, 1)
if self.threshold:
lasso_weights[i] = F.threshold(lasso_weights[i], threshold=self.threshold, value=0, inplace=True)
setattr(module, 'lasso_weights', lasso_weights)
for weight in lasso_weights:
self.n_element += weight.numel()
self._loss = self._loss + torch.sum(torch.abs(weight))
return self.apply_lasso(tucker_tensor, lasso_weights)
def apply_lasso(self, tucker_tensor, lasso_weights):
"""Applies the lasso to a decomposed tensor
"""
factors = tucker_tensor.factors
factors = [factor*w for (factor, w) in zip(factors, lasso_weights)]
return TuckerTensor(tucker_tensor.core, factors)
def apply(self, module):
"""Apply an instance of the L1Regularizer to a tensor module
Parameters
----------
module : TensorModule
module on which to add the regularization
Returns
-------
TensorModule (with Regularization hook)
"""
rank = module.rank
context = tl.context(module.core)
lasso_weights = ParameterList([nn.Parameter(torch.ones(r, **context)) for r in rank])
setattr(module, 'lasso_weights', lasso_weights)
module.register_forward_hook(self)
return module
def remove(self, module):
delattr(module, 'lasso_weights')
def set_weights(self, module, value):
with torch.no_grad():
for weight in module.lasso_weights:
weight.data.fill_(value)
class TTLasso(TensorLasso, factorization=TTTensor):
"""Decomposition Hook for Tensor Lasso on TT tensors
Parameters
----------
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
"""
def __call__(self, module, input, tt_tensor):
lasso_weights = getattr(module, 'lasso_weights')
order = len(lasso_weights)
with torch.no_grad():
for i in range(order):
if self.clamp_weights:
lasso_weights[i].data = torch.clamp(lasso_weights[i].data, -1, 1)
if self.threshold:
lasso_weights[i] = F.threshold(lasso_weights[i], threshold=self.threshold, value=0, inplace=True)
setattr(module, 'lasso_weights', lasso_weights)
for weight in lasso_weights:
self.n_element += weight.numel()
self._loss = self._loss + torch.sum(torch.abs(weight))
return self.apply_lasso(tt_tensor, lasso_weights)
def apply_lasso(self, tt_tensor, lasso_weights):
"""Applies the lasso to a decomposed tensor
"""
factors = tt_tensor.factors
factors = [factor*w for (factor, w) in zip(factors, lasso_weights)] + [factors[-1]]
return TTTensor(factors)
def apply(self, module):
"""Apply an instance of the L1Regularizer to a tensor module
Parameters
----------
module : TensorModule
module on which to add the regularization
Returns
-------
TensorModule (with Regularization hook)
"""
rank = module.rank[1:-1]
lasso_weights = ParameterList([nn.Parameter(torch.ones(1, 1, r)) for r in rank])
setattr(module, 'lasso_weights', lasso_weights)
handle = module.register_forward_hook(self)
return module
def remove(self, module):
"""Remove the Regularization from a module.
"""
delattr(module, 'lasso_weights')
def set_weights(self, module, value):
with torch.no_grad():
for weight in module.lasso_weights:
weight.data.fill_(value)
def tensor_lasso(factorization='CP', penalty=0.01, clamp_weights=True, threshold=1e-6, normalize_loss=True):
"""Generalized Tensor Lasso from a factorized tensors
Applies a generalized Lasso (l1 regularization) on a factorized tensor.
Parameters
----------
factorization : str
penalty : float, default is 0.01
scaling factor for the loss
clamp_weights : bool, default is True
if True, the lasso weights are clamp between -1 and 1
threshold : float, default is 1e-6
if a lasso weight is lower than the set threshold, it is set to 0
normalize_loss : bool, default is True
If True, the loss will be between 0 and 1.
Otherwise, the raw sum of absolute weights will be returned.
Examples
--------
Let's say you have a set of factorized (here, CP) tensors:
>>> tensor = FactorizedTensor.new((3, 4, 2), rank='same', factorization='CP').normal_()
>>> tensor2 = FactorizedTensor.new((5, 6, 7), rank=0.5, factorization='CP').normal_()
First you need to create an instance of the regularizer:
>>> regularizer = TensorLasso(factorization='cp', penalty=penalty)
You can apply the regularizer to one or several layers:
>>> regularizer.apply(tensor)
>>> regularizer.apply(tensor2)
The lasso is automatically applied:
>>> sum = torch.sum(tensor() + tensor2())
You can access the Lasso loss from your instance:
>>> l1_loss = regularizer.loss
You can optimize and backpropagate through your loss as usual.
After you finish updating the weights, don't forget to reset the regularizer,
otherwise it will keep accumulating values!
>>> regularizer.reset()
You can also remove the regularizer with `regularizer.remove(tensor)`,
or `remove_tensor_lasso(tensor)`.
"""
factorization = factorization.lower()
mapping = dict(cp='CPTensor', tucker='TuckerTensor', tt='TTTensor')
return TensorLasso.from_factorization_name(mapping[factorization], penalty=penalty, clamp_weights=clamp_weights,
threshold=threshold, normalize_loss=normalize_loss)
def remove_tensor_lasso(factorized_tensor):
"""Removes the tensor lasso from a TensorModule
Parameters
----------
factorized_tensor : tltorch.FactorizedTensor
the tensor module parametrized by the tensor decomposition to which to apply tensor dropout
Examples
--------
>>> tensor = FactorizedTensor.new((3, 4, 2), rank=0.5, factorization='CP').normal_()
>>> tensor = tensor_lasso(tensor, p=0.5)
>>> remove_tensor_lasso(tensor)
"""
for key, hook in factorized_tensor._forward_hooks.items():
if isinstance(hook, TensorLasso):
hook.remove(factorized_tensor)
del factorized_tensor._forward_hooks[key]
return factorized_tensor
raise ValueError(f'TensorLasso not found in factorized tensor {factorized_tensor}')
| en | 0.797471 | # Author: <NAME> # License: BSD 3 clause Generalized Tensor Lasso on factorized tensors Applies a generalized Lasso (l1 regularization) on a factorized tensor. Parameters ---------- penalty : float, default is 0.01 scaling factor for the loss clamp_weights : bool, default is True if True, the lasso weights are clamp between -1 and 1 threshold : float, default is 1e-6 if a lasso weight is lower than the set threshold, it is set to 0 normalize_loss : bool, default is True If True, the loss will be between 0 and 1. Otherwise, the raw sum of absolute weights will be returned. Examples -------- First you need to create an instance of the regularizer: >>> regularizer = tensor_lasso(factorization='cp') You can apply the regularizer to one or several layers: >>> trl = TRL((5, 5), (5, 5), rank='same') >>> trl2 = TRL((5, 5), (2, ), rank='same') >>> regularizer.apply(trl.weight) >>> regularizer.apply(trl2.weight) The lasso is automatically applied: >>> x = trl(x) >>> pred = trl2(x) >>> loss = your_loss_function(pred) Add the Lasso loss: >>> loss = loss + regularizer.loss You can now backpropagate through your loss as usual: >>> loss.backwards() After you finish updating the weights, don't forget to reset the regularizer, otherwise it will keep accumulating values! >>> loss.reset() You can also remove the regularizer with `regularizer.remove(trl)`. When a subclass is created, register it in _factorizations # Initialize the counters Reset the loss, should be called at the end of each iteration. Returns the current Lasso (l1) loss for the layers that have been called so far. Returns ------- float l1 regularization on the tensor layers the regularization has been applied to. Applies the lasso to a decomposed tensor Decomposition Hook for Tensor Lasso on CP tensors Parameters ---------- penalty : float, default is 0.01 scaling factor for the loss clamp_weights : bool, default is True if True, the lasso weights are clamp between -1 and 1 threshold : float, default is 1e-6 if a lasso weight is lower than the set threshold, it is set to 0 normalize_loss : bool, default is True If True, the loss will be between 0 and 1. Otherwise, the raw sum of absolute weights will be returned. CP already includes weights, we'll just take their l1 norm Apply an instance of the L1Regularizer to a tensor module Parameters ---------- module : TensorModule module on which to add the regularization Returns ------- TensorModule (with Regularization hook) Decomposition Hook for Tensor Lasso on Tucker tensors Applies a generalized Lasso (l1 regularization) on the tensor layers the regularization it is applied to. Parameters ---------- penalty : float, default is 0.01 scaling factor for the loss clamp_weights : bool, default is True if True, the lasso weights are clamp between -1 and 1 threshold : float, default is 1e-6 if a lasso weight is lower than the set threshold, it is set to 0 normalize_loss : bool, default is True If True, the loss will be between 0 and 1. Otherwise, the raw sum of absolute weights will be returned. Applies the lasso to a decomposed tensor Apply an instance of the L1Regularizer to a tensor module Parameters ---------- module : TensorModule module on which to add the regularization Returns ------- TensorModule (with Regularization hook) Decomposition Hook for Tensor Lasso on TT tensors Parameters ---------- penalty : float, default is 0.01 scaling factor for the loss clamp_weights : bool, default is True if True, the lasso weights are clamp between -1 and 1 threshold : float, default is 1e-6 if a lasso weight is lower than the set threshold, it is set to 0 normalize_loss : bool, default is True If True, the loss will be between 0 and 1. Otherwise, the raw sum of absolute weights will be returned. Applies the lasso to a decomposed tensor Apply an instance of the L1Regularizer to a tensor module Parameters ---------- module : TensorModule module on which to add the regularization Returns ------- TensorModule (with Regularization hook) Remove the Regularization from a module. Generalized Tensor Lasso from a factorized tensors Applies a generalized Lasso (l1 regularization) on a factorized tensor. Parameters ---------- factorization : str penalty : float, default is 0.01 scaling factor for the loss clamp_weights : bool, default is True if True, the lasso weights are clamp between -1 and 1 threshold : float, default is 1e-6 if a lasso weight is lower than the set threshold, it is set to 0 normalize_loss : bool, default is True If True, the loss will be between 0 and 1. Otherwise, the raw sum of absolute weights will be returned. Examples -------- Let's say you have a set of factorized (here, CP) tensors: >>> tensor = FactorizedTensor.new((3, 4, 2), rank='same', factorization='CP').normal_() >>> tensor2 = FactorizedTensor.new((5, 6, 7), rank=0.5, factorization='CP').normal_() First you need to create an instance of the regularizer: >>> regularizer = TensorLasso(factorization='cp', penalty=penalty) You can apply the regularizer to one or several layers: >>> regularizer.apply(tensor) >>> regularizer.apply(tensor2) The lasso is automatically applied: >>> sum = torch.sum(tensor() + tensor2()) You can access the Lasso loss from your instance: >>> l1_loss = regularizer.loss You can optimize and backpropagate through your loss as usual. After you finish updating the weights, don't forget to reset the regularizer, otherwise it will keep accumulating values! >>> regularizer.reset() You can also remove the regularizer with `regularizer.remove(tensor)`, or `remove_tensor_lasso(tensor)`. Removes the tensor lasso from a TensorModule Parameters ---------- factorized_tensor : tltorch.FactorizedTensor the tensor module parametrized by the tensor decomposition to which to apply tensor dropout Examples -------- >>> tensor = FactorizedTensor.new((3, 4, 2), rank=0.5, factorization='CP').normal_() >>> tensor = tensor_lasso(tensor, p=0.5) >>> remove_tensor_lasso(tensor) | 3.279503 | 3 |
npc_engine/services/persona_dialogue/persona_dialogue_base.py | npc-engine/npc-engine | 12 | 6614429 | <reponame>npc-engine/npc-engine
"""Module that implements persona dialogue API."""
from typing import Any, Dict, List, Tuple
from abc import abstractmethod
from npc_engine.services.base_service import BaseService
class PersonaDialogueAPI(BaseService):
"""Abstract base class for persona dialogue models."""
API_METHODS: List[str] = ["start_dialogue", "step_dialogue", "get_history"]
def __init__(self, *args, **kwargs) -> None:
"""Empty initialization method for API to be similar to other model base classes."""
super().__init__(*args, **kwargs)
self.initialized = True
@classmethod
def get_api_name(cls) -> str:
"""Get the API name."""
return "PersonaDialogueAPI"
@abstractmethod
def start_dialogue(
self,
name1: str = None,
persona1: str = None,
name2: str = None,
persona2: str = None,
location_name: str = None,
location_description: str = None,
items_of_interest: List[str] = None,
dialogue_id: str = None,
other: Dict[str, Any] = None,
) -> str:
"""Start a dialogue between two characters.
All arguments are supposed to be natural language descriptions.
Args:
name1: Name of the first character.
persona1: Persona of the first character.
name2: Name of the second character.
persona2: Persona of the second character.
location_name: Name of the place where dialogue happens.
location_description: Description of the place where dialogue happens.
items_of_interest: List of items of interest that could be mentioned in the dialogue.
dialogue_id: ID of the dialogue. If None it will be named automatically.
other: Other information that could be used to start the dialogue.
Returns:
Dialogue id.
"""
pass
@abstractmethod
def end_dialogue(self, dialogue_id: str):
"""End a dialogue between two characters.
Args:
dialogue_id: ID of the dialogue.
"""
pass
def step_dialogue(
self,
dialogue_id: str,
speaker_id: str,
utterance: str = None,
scripted_utterances: List[str] = None,
scripted_threshold: float = 0.5,
update_history: bool = True,
) -> Tuple[str, bool]:
"""Step a dialogue between two characters.
Args:
dialogue_id: ID of the dialogue.
speaker: 0 for the first character, 1 for the second character.
utterance: Natural language utterance. If None it will be generated.
scripted_utterances: List of natural language utterances
that will be matched against utterance.
scripted_threshold: Threshold for matching scripted utterances.
update_history: If True, the dialogue history will be updated.
Returns:
str: Next utterance.
bool: scripted utterance triggered
"""
if utterance is None:
utterance = self.generate_utterance(dialogue_id, speaker_id)
scripted = False
if scripted_utterances is not None:
idx = self.check_scripted_utterances(
utterance, scripted_utterances, scripted_threshold
)
if idx is not None:
utterance = scripted_utterances[idx]
scripted = True
if update_history:
self.update_dialogue(dialogue_id, speaker_id, utterance)
return utterance, scripted
@abstractmethod
def generate_utterance(self, dialogue_id: str, speaker_id: str) -> str:
"""Generate an utterance for the given speaker.
Args:
dialogue_id: ID of the dialogue.
speaker: 0 for the first character, 1 for the second character.
"""
pass
@abstractmethod
def check_scripted_utterances(
self, utterance: str, scripted_utterances: List[str], threshold: float
) -> int:
"""Check if the given utterance is one of the scripted utterances.
Args:
utterance: Natural language utterance.
scripted_utterances: Natural language utterances.
threshold: [0,1] threshold for the similarity between the utterance and the scripted utterances.
Returns:
id of the utterance, None if the utterance is not one of the scripted utterances.
"""
pass
@abstractmethod
def update_dialogue(self, dialogue_id: str, speaker_id: str, utterance: str):
"""Update dialogue state.
Args:
dialogue_id: ID of the dialogue.
speaker_id: 0 for the first character, 1 for the second character.
utterance: Natural language utterance.
"""
pass
@abstractmethod
def get_history(self, dialogue_id: str) -> List[Dict[str, Any]]:
"""Get the history of a dialogue.
Args:
dialogue_id: ID of the dialogue.
"""
pass
| """Module that implements persona dialogue API."""
from typing import Any, Dict, List, Tuple
from abc import abstractmethod
from npc_engine.services.base_service import BaseService
class PersonaDialogueAPI(BaseService):
"""Abstract base class for persona dialogue models."""
API_METHODS: List[str] = ["start_dialogue", "step_dialogue", "get_history"]
def __init__(self, *args, **kwargs) -> None:
"""Empty initialization method for API to be similar to other model base classes."""
super().__init__(*args, **kwargs)
self.initialized = True
@classmethod
def get_api_name(cls) -> str:
"""Get the API name."""
return "PersonaDialogueAPI"
@abstractmethod
def start_dialogue(
self,
name1: str = None,
persona1: str = None,
name2: str = None,
persona2: str = None,
location_name: str = None,
location_description: str = None,
items_of_interest: List[str] = None,
dialogue_id: str = None,
other: Dict[str, Any] = None,
) -> str:
"""Start a dialogue between two characters.
All arguments are supposed to be natural language descriptions.
Args:
name1: Name of the first character.
persona1: Persona of the first character.
name2: Name of the second character.
persona2: Persona of the second character.
location_name: Name of the place where dialogue happens.
location_description: Description of the place where dialogue happens.
items_of_interest: List of items of interest that could be mentioned in the dialogue.
dialogue_id: ID of the dialogue. If None it will be named automatically.
other: Other information that could be used to start the dialogue.
Returns:
Dialogue id.
"""
pass
@abstractmethod
def end_dialogue(self, dialogue_id: str):
"""End a dialogue between two characters.
Args:
dialogue_id: ID of the dialogue.
"""
pass
def step_dialogue(
self,
dialogue_id: str,
speaker_id: str,
utterance: str = None,
scripted_utterances: List[str] = None,
scripted_threshold: float = 0.5,
update_history: bool = True,
) -> Tuple[str, bool]:
"""Step a dialogue between two characters.
Args:
dialogue_id: ID of the dialogue.
speaker: 0 for the first character, 1 for the second character.
utterance: Natural language utterance. If None it will be generated.
scripted_utterances: List of natural language utterances
that will be matched against utterance.
scripted_threshold: Threshold for matching scripted utterances.
update_history: If True, the dialogue history will be updated.
Returns:
str: Next utterance.
bool: scripted utterance triggered
"""
if utterance is None:
utterance = self.generate_utterance(dialogue_id, speaker_id)
scripted = False
if scripted_utterances is not None:
idx = self.check_scripted_utterances(
utterance, scripted_utterances, scripted_threshold
)
if idx is not None:
utterance = scripted_utterances[idx]
scripted = True
if update_history:
self.update_dialogue(dialogue_id, speaker_id, utterance)
return utterance, scripted
@abstractmethod
def generate_utterance(self, dialogue_id: str, speaker_id: str) -> str:
"""Generate an utterance for the given speaker.
Args:
dialogue_id: ID of the dialogue.
speaker: 0 for the first character, 1 for the second character.
"""
pass
@abstractmethod
def check_scripted_utterances(
self, utterance: str, scripted_utterances: List[str], threshold: float
) -> int:
"""Check if the given utterance is one of the scripted utterances.
Args:
utterance: Natural language utterance.
scripted_utterances: Natural language utterances.
threshold: [0,1] threshold for the similarity between the utterance and the scripted utterances.
Returns:
id of the utterance, None if the utterance is not one of the scripted utterances.
"""
pass
@abstractmethod
def update_dialogue(self, dialogue_id: str, speaker_id: str, utterance: str):
"""Update dialogue state.
Args:
dialogue_id: ID of the dialogue.
speaker_id: 0 for the first character, 1 for the second character.
utterance: Natural language utterance.
"""
pass
@abstractmethod
def get_history(self, dialogue_id: str) -> List[Dict[str, Any]]:
"""Get the history of a dialogue.
Args:
dialogue_id: ID of the dialogue.
"""
pass | en | 0.808991 | Module that implements persona dialogue API. Abstract base class for persona dialogue models. Empty initialization method for API to be similar to other model base classes. Get the API name. Start a dialogue between two characters.
All arguments are supposed to be natural language descriptions.
Args:
name1: Name of the first character.
persona1: Persona of the first character.
name2: Name of the second character.
persona2: Persona of the second character.
location_name: Name of the place where dialogue happens.
location_description: Description of the place where dialogue happens.
items_of_interest: List of items of interest that could be mentioned in the dialogue.
dialogue_id: ID of the dialogue. If None it will be named automatically.
other: Other information that could be used to start the dialogue.
Returns:
Dialogue id. End a dialogue between two characters.
Args:
dialogue_id: ID of the dialogue. Step a dialogue between two characters.
Args:
dialogue_id: ID of the dialogue.
speaker: 0 for the first character, 1 for the second character.
utterance: Natural language utterance. If None it will be generated.
scripted_utterances: List of natural language utterances
that will be matched against utterance.
scripted_threshold: Threshold for matching scripted utterances.
update_history: If True, the dialogue history will be updated.
Returns:
str: Next utterance.
bool: scripted utterance triggered Generate an utterance for the given speaker.
Args:
dialogue_id: ID of the dialogue.
speaker: 0 for the first character, 1 for the second character. Check if the given utterance is one of the scripted utterances.
Args:
utterance: Natural language utterance.
scripted_utterances: Natural language utterances.
threshold: [0,1] threshold for the similarity between the utterance and the scripted utterances.
Returns:
id of the utterance, None if the utterance is not one of the scripted utterances. Update dialogue state.
Args:
dialogue_id: ID of the dialogue.
speaker_id: 0 for the first character, 1 for the second character.
utterance: Natural language utterance. Get the history of a dialogue.
Args:
dialogue_id: ID of the dialogue. | 3.237455 | 3 |
fictions/run.py | aiApple/fictions | 0 | 6614430 | from scrapy import cmdline
cmdline.execute("scrapy crawl fiction".split()) # -s JOBDIR=job_info
| from scrapy import cmdline
cmdline.execute("scrapy crawl fiction".split()) # -s JOBDIR=job_info
| ar | 0.19601 | # -s JOBDIR=job_info | 1.921727 | 2 |
examples/examples_src/coordinates_to_brain_maps.py | RaphaelMeudec/neuroquery | 21 | 6614431 | <reponame>RaphaelMeudec/neuroquery
import argparse
import pathlib
import pandas as pd
from neuroquery import img_utils
parser = argparse.ArgumentParser(
description="Generate brain maps from (x, y, z) MNI coordinates "
"grouped by pubmed ID",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"coordinates_csv",
help=".csv file containing the coordinates. Must have columns"
" 'pmid', 'x', 'y', and 'z'.",
)
parser.add_argument(
"output_directory", help="directory where generated maps are saved."
)
parser.add_argument(
"--fwhm", type=float, default=8.0, help="full width at half maximum"
)
parser.add_argument(
"--resolution",
type=float,
default=4.0,
help="resolution of created images in mm",
)
args = parser.parse_args()
out_dir = pathlib.Path(args.output_directory)
out_dir.mkdir(parents=True, exist_ok=True)
coordinates = pd.read_csv(args.coordinates_csv)
for pmid, img in img_utils.iter_coordinates_to_maps(
coordinates, target_affine=args.resolution
):
img_file = img.to_filename(str(out_dir / "pmid_{}.nii.gz".format(pmid)))
print("\n")
print(out_dir)
| import argparse
import pathlib
import pandas as pd
from neuroquery import img_utils
parser = argparse.ArgumentParser(
description="Generate brain maps from (x, y, z) MNI coordinates "
"grouped by pubmed ID",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"coordinates_csv",
help=".csv file containing the coordinates. Must have columns"
" 'pmid', 'x', 'y', and 'z'.",
)
parser.add_argument(
"output_directory", help="directory where generated maps are saved."
)
parser.add_argument(
"--fwhm", type=float, default=8.0, help="full width at half maximum"
)
parser.add_argument(
"--resolution",
type=float,
default=4.0,
help="resolution of created images in mm",
)
args = parser.parse_args()
out_dir = pathlib.Path(args.output_directory)
out_dir.mkdir(parents=True, exist_ok=True)
coordinates = pd.read_csv(args.coordinates_csv)
for pmid, img in img_utils.iter_coordinates_to_maps(
coordinates, target_affine=args.resolution
):
img_file = img.to_filename(str(out_dir / "pmid_{}.nii.gz".format(pmid)))
print("\n")
print(out_dir) | none | 1 | 2.934137 | 3 | |
code/Trainer.py | bj80heyue/Learning-to-Group | 18 | 6614432 | #coding=utf-8#
import sys
import Dataset
import frame
from sys import path
path.append('libsvm/python')
from svmutil import *
def trainNewModel():
print 'Train new model start'
param=svm_parameter('-s 0 -t 0')
y1,x1=svm_read_problem('data/traindata_p2p')
y2,x2=svm_read_problem('data/traindata_p2G')
y3,x3=svm_read_problem('data/traindata_G2G')
prob1=svm_problem(y1,x1)
prob2=svm_problem(y2,x2)
prob3=svm_problem(y3,x3)
print '....training p2p'
model_p2p=svm_train(prob1,param)
print '....training p2G'
model_p2G=svm_train(prob2,param)
print '....training G2G'
model_G2G=svm_train(prob3,param)
svm_save_model('model/model_p2p.model',model_p2p)
svm_save_model('model/model_p2G.model',model_p2G)
svm_save_model('model/model_G2G.model',model_G2G)
print 'Train new model finished'
if __name__=='__main__':
a=Dataset.identity_Dataset()
a.loadAlbumList('albumList_train')
data=list(list())
data.append([0,0,0])
iteration=1
while iteration<5000:
print '====================================================='
print 'Iter: %d'%iteration
iteration+=1
dataset=a.SimulateDataset(1000,0.6,0.4)
dataset.computeQuality()
dataset.computeAffinity()
f=frame.frame()
f.loadDataset(dataset)
model_p2p=svm_load_model('model/model_p2p.model')
model_p2G=svm_load_model('model/model_p2G.model')
model_G2G=svm_load_model('model/model_G2G.model')
index=1
while f.checkState():
package=f.getObservation()
if type(package)==int:
print 'Done!'
break
data[0]=package
question_type=len(package)
if question_type==3: #point-----point
action,t1,t2=svm_predict([0],data, model_p2p)
tp='P2P'
elif question_type==3+f.k_size: #point-----Group or group---point
action,t1,t2=svm_predict([0],data, model_p2G)
tp='P2G'
else:
action,t1,t2=svm_predict([0],data, model_G2G)
tp='G2G'
#set action
if action[0]==1:
index+=1
TF=f.setPerception(action)
if TF==False:
print action,index,1000,f.albumnum,f.queue.qsize(),tp,f.dataset.imgID[f.S],f.dataset.imgID[f.D],package
#训练新模型
#f.Normalize_label()
#f.showResult()
trainNewModel()
print 'Done'
| #coding=utf-8#
import sys
import Dataset
import frame
from sys import path
path.append('libsvm/python')
from svmutil import *
def trainNewModel():
print 'Train new model start'
param=svm_parameter('-s 0 -t 0')
y1,x1=svm_read_problem('data/traindata_p2p')
y2,x2=svm_read_problem('data/traindata_p2G')
y3,x3=svm_read_problem('data/traindata_G2G')
prob1=svm_problem(y1,x1)
prob2=svm_problem(y2,x2)
prob3=svm_problem(y3,x3)
print '....training p2p'
model_p2p=svm_train(prob1,param)
print '....training p2G'
model_p2G=svm_train(prob2,param)
print '....training G2G'
model_G2G=svm_train(prob3,param)
svm_save_model('model/model_p2p.model',model_p2p)
svm_save_model('model/model_p2G.model',model_p2G)
svm_save_model('model/model_G2G.model',model_G2G)
print 'Train new model finished'
if __name__=='__main__':
a=Dataset.identity_Dataset()
a.loadAlbumList('albumList_train')
data=list(list())
data.append([0,0,0])
iteration=1
while iteration<5000:
print '====================================================='
print 'Iter: %d'%iteration
iteration+=1
dataset=a.SimulateDataset(1000,0.6,0.4)
dataset.computeQuality()
dataset.computeAffinity()
f=frame.frame()
f.loadDataset(dataset)
model_p2p=svm_load_model('model/model_p2p.model')
model_p2G=svm_load_model('model/model_p2G.model')
model_G2G=svm_load_model('model/model_G2G.model')
index=1
while f.checkState():
package=f.getObservation()
if type(package)==int:
print 'Done!'
break
data[0]=package
question_type=len(package)
if question_type==3: #point-----point
action,t1,t2=svm_predict([0],data, model_p2p)
tp='P2P'
elif question_type==3+f.k_size: #point-----Group or group---point
action,t1,t2=svm_predict([0],data, model_p2G)
tp='P2G'
else:
action,t1,t2=svm_predict([0],data, model_G2G)
tp='G2G'
#set action
if action[0]==1:
index+=1
TF=f.setPerception(action)
if TF==False:
print action,index,1000,f.albumnum,f.queue.qsize(),tp,f.dataset.imgID[f.S],f.dataset.imgID[f.D],package
#训练新模型
#f.Normalize_label()
#f.showResult()
trainNewModel()
print 'Done'
| en | 0.320558 | #coding=utf-8# #point-----point #point-----Group or group---point #set action #训练新模型 #f.Normalize_label() #f.showResult() | 2.757021 | 3 |
acli/plugin_cmd_mgr.py | odzzi/acli | 0 | 6614433 | import os,re
from cliff.commandmanager import CommandManager
from cliff.command import Command
def make_cmd(cmd):
class CmdApp(Command):
interactive = False
def get_description(self):
return str(cmd['desc'])
def get_parser(self, prog_name):
"""Return an :class:`argparse.ArgumentParser`.
"""
if "interactive" in cmd:
self.interactive = cmd["interactive"]
parser = super(CmdApp, self).get_parser(prog_name)
for para in cmd['paras']:
parser.add_argument(para, nargs='?', default='.')
return parser
def take_action(self, parsed_args):
po = os.popen(cmd['cmd'] % vars(parsed_args))
ret_text = po.read()
print po.name, ret_text
return CmdApp
def loadCmds(path):
import json
cmds = json.load(open(path, "r"))
return cmds
class PluginCommandManager(CommandManager):
def load_commands(self, namespace):
cmds = loadCmds("cmds.json")
for cmd in cmds:
self.add_command(cmd['name'], make_cmd(cmd))
| import os,re
from cliff.commandmanager import CommandManager
from cliff.command import Command
def make_cmd(cmd):
class CmdApp(Command):
interactive = False
def get_description(self):
return str(cmd['desc'])
def get_parser(self, prog_name):
"""Return an :class:`argparse.ArgumentParser`.
"""
if "interactive" in cmd:
self.interactive = cmd["interactive"]
parser = super(CmdApp, self).get_parser(prog_name)
for para in cmd['paras']:
parser.add_argument(para, nargs='?', default='.')
return parser
def take_action(self, parsed_args):
po = os.popen(cmd['cmd'] % vars(parsed_args))
ret_text = po.read()
print po.name, ret_text
return CmdApp
def loadCmds(path):
import json
cmds = json.load(open(path, "r"))
return cmds
class PluginCommandManager(CommandManager):
def load_commands(self, namespace):
cmds = loadCmds("cmds.json")
for cmd in cmds:
self.add_command(cmd['name'], make_cmd(cmd))
| es | 0.121104 | Return an :class:`argparse.ArgumentParser`. | 2.699347 | 3 |
tests/test_alt_pattern.py | tolstislon/phone_gen | 6 | 6614434 | <reponame>tolstislon/phone_gen
import phonenumbers
import pytest
from phone_gen.alt_patterns import ALT_PATTERNS
from phone_gen import PhoneNumber
@pytest.mark.phonenumbers
@pytest.mark.parametrize('count', range(5))
@pytest.mark.parametrize('code, ref', [
(key, value['ref']) for key, value in ALT_PATTERNS.items() if 'pattern' in value and 'ref' in value
])
def test_alt_pattern(code, ref, count):
number = PhoneNumber(code).get_number()
num_obj = phonenumbers.parse(number, code)
assert phonenumbers.is_valid_number_for_region(num_obj, ref)
@pytest.mark.phonenumbers
@pytest.mark.parametrize('count', range(5))
@pytest.mark.parametrize('code, ref', [
(key, value['ref']) for key, value in ALT_PATTERNS.items() if 'pattern' not in value and 'ref' in value
])
def test_ref(code, ref, count):
number = PhoneNumber(code).get_number()
num_obj = phonenumbers.parse(number, code)
assert phonenumbers.is_valid_number_for_region(num_obj, ref)
| import phonenumbers
import pytest
from phone_gen.alt_patterns import ALT_PATTERNS
from phone_gen import PhoneNumber
@pytest.mark.phonenumbers
@pytest.mark.parametrize('count', range(5))
@pytest.mark.parametrize('code, ref', [
(key, value['ref']) for key, value in ALT_PATTERNS.items() if 'pattern' in value and 'ref' in value
])
def test_alt_pattern(code, ref, count):
number = PhoneNumber(code).get_number()
num_obj = phonenumbers.parse(number, code)
assert phonenumbers.is_valid_number_for_region(num_obj, ref)
@pytest.mark.phonenumbers
@pytest.mark.parametrize('count', range(5))
@pytest.mark.parametrize('code, ref', [
(key, value['ref']) for key, value in ALT_PATTERNS.items() if 'pattern' not in value and 'ref' in value
])
def test_ref(code, ref, count):
number = PhoneNumber(code).get_number()
num_obj = phonenumbers.parse(number, code)
assert phonenumbers.is_valid_number_for_region(num_obj, ref) | none | 1 | 2.461854 | 2 | |
src/WebApp/SUPERSTAR/config.py | abradle/ccf | 1 | 6614435 | from django.db import models
def find_allowed_ph4(item_dict):
"""Function to find allowed ph4"""
# Create the out list to return
out_list = []
# Loop through the keys - except unused
for item in item_dict:
if item == "unused":
continue
out_list.extend(item_dict[item])
return list(set(out_list))
class SuperStarConfig():
"""Model to store the required config information for SUPERSTAR"""
def __init__(self):
# First the path
self.base_path = "W:\\Informatics\\Pharmacophore\\anthony\\DPhil\\CODE\\CHOC\\src\\WebApp\\SUPERSTAR\\PROTEINS"
self.ins_path = "W:\\Informatics\\Pharmacophore\\anthony\\DPhil\\CODE\\CHOC\\src\\WebApp\\SUPERSTAR\\INS_FILES"
self.out_path = "W:\\Informatics\\Pharmacophore\\anthony\\DPhil\\CODE\\CHOC\\src\\WebApp\\SUPERSTAR\\OUT_FILES"
self.superstar_path = r"C:\Program Files (x86)\CCDC\SuperStar 2.1.2\bin\superstar_app.exe"
self.probe_dict = {'ali': 'ALIPHATIC CH CARBON', 'aro':'AROMATIC CH CARBON', 'uncharged':'UNCHARGED NH NITROGEN', 'carbonyl_O':'CARBONYL OXYGEN', 'carboxylate': 'CARBOXYLATE OXYGEN', 'charged':'CHARGED NH NITROGEN', 'water':'WATER OXYGEN'}
self.type_dict = {'unused': ['SmallHalogen','Cyano','Imidazole','Guanidine'],
'water': [],
'ali': ['RH6_6','iPropyl','Methyl','RH3_3', 'ThreeWayAttach'],
'uncharged': ['SingleAtomDonor'],
'carbonyl_O': ['SingleAtomAcceptor','Carbonyl'],
'aro': ['Arom5','Arom6'],
'carboxylate': ['AcidicGroup'],
'charged': ['BasicGroup','PosN']
}
self.allowed_ph4 = find_allowed_ph4(self.type_dict) | from django.db import models
def find_allowed_ph4(item_dict):
"""Function to find allowed ph4"""
# Create the out list to return
out_list = []
# Loop through the keys - except unused
for item in item_dict:
if item == "unused":
continue
out_list.extend(item_dict[item])
return list(set(out_list))
class SuperStarConfig():
"""Model to store the required config information for SUPERSTAR"""
def __init__(self):
# First the path
self.base_path = "W:\\Informatics\\Pharmacophore\\anthony\\DPhil\\CODE\\CHOC\\src\\WebApp\\SUPERSTAR\\PROTEINS"
self.ins_path = "W:\\Informatics\\Pharmacophore\\anthony\\DPhil\\CODE\\CHOC\\src\\WebApp\\SUPERSTAR\\INS_FILES"
self.out_path = "W:\\Informatics\\Pharmacophore\\anthony\\DPhil\\CODE\\CHOC\\src\\WebApp\\SUPERSTAR\\OUT_FILES"
self.superstar_path = r"C:\Program Files (x86)\CCDC\SuperStar 2.1.2\bin\superstar_app.exe"
self.probe_dict = {'ali': 'ALIPHATIC CH CARBON', 'aro':'AROMATIC CH CARBON', 'uncharged':'UNCHARGED NH NITROGEN', 'carbonyl_O':'CARBONYL OXYGEN', 'carboxylate': 'CARBOXYLATE OXYGEN', 'charged':'CHARGED NH NITROGEN', 'water':'WATER OXYGEN'}
self.type_dict = {'unused': ['SmallHalogen','Cyano','Imidazole','Guanidine'],
'water': [],
'ali': ['RH6_6','iPropyl','Methyl','RH3_3', 'ThreeWayAttach'],
'uncharged': ['SingleAtomDonor'],
'carbonyl_O': ['SingleAtomAcceptor','Carbonyl'],
'aro': ['Arom5','Arom6'],
'carboxylate': ['AcidicGroup'],
'charged': ['BasicGroup','PosN']
}
self.allowed_ph4 = find_allowed_ph4(self.type_dict) | en | 0.738108 | Function to find allowed ph4 # Create the out list to return # Loop through the keys - except unused Model to store the required config information for SUPERSTAR # First the path | 2.630949 | 3 |
src/ssmrandom/__init__.py | leifj/ssmrandom | 2 | 6614436 | <gh_stars>1-10
#!/usr/bin/env python
"""
Usage: ssmrandom {recv|send|rawsend} [options]+ [host IP(only for recv)]+
Common options:
-h print this message
-v print version information
-g SSM multicast group (in 172.16.31.10/8)
-i local bind address
-p port
-f stay in the foreground (and log to stderr)
-F do not detatch (but otherwize act as a daemon) - useful for init
-P <pidfile>
-L <loglevel> set logging level (e.g. DEBUG)
recv options:
-s buffer size
-o ouput PIPE
send and rawsend options:
-t TTL
-s number of bytes of entropy payload
-r input entropy device
ssmramdom can be operated in either send or receive mode. In send mode it will
read data from the input entropy device and will transmit it framed (except when
using rawsend) as JSON objects on a multicast group in SSM address space. In
receive mode ssmrandom will receive a random sample (using random sleep intervals
between 1 and 20 seconds) of such SSM messages and will write the entropy
payload to a PIPE where it can be consumed by rngd from the rng-tools package.
BUGS: only ipv4 is supported
NOTE that you may need to enable igmpv3 on your network for SSM to work.
"""
from logging import StreamHandler
from ssmrandom.pidfile import PidFile
__author__ = 'leifj'
import socket
import json
import os
import base64
import logging
import getopt
import sys
import random
import time
from logging.handlers import SysLogHandler
import daemon
import lockfile
if not hasattr(socket, 'IP_MULTICAST_TTL'):
setattr(socket, 'IP_MULTICAST_TTL', 33)
if not hasattr(socket, 'IP_ADD_SOURCE_MEMBERSHIP'):
setattr(socket, 'IP_ADD_SOURCE_MEMBERSHIP', 39)
VERSION = "0.3"
PROTOCOL_VERSION = "1.0"
SSM_GROUP = '192.168.127.12'
SSM_PORT = '49999'
ENTROPY_DEVICE='/dev/urandom'
RNGD_PIPE = "/var/run/ssm-rng-pipe"
BUFSZ= "4096"
MSGSZ = "1024"
LOGLEVEL = "WARNING"
MCTTL = "32"
PIDFILE = '/var/run/ssmrandom.pid'
def _setup_logging(level,foreground=False):
loglevel = getattr(logging, level.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: %s' % loglevel)
if foreground:
handler = StreamHandler()
else:
handler = SysLogHandler(address='/dev/log',facility=SysLogHandler.LOG_DAEMON)
pid = os.getpid()
formatter = logging.Formatter('ssmrandom['+str(pid)+'] %(message)s')
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logging.root.setLevel(loglevel)
def usage():
print __doc__
def _sender(s,group,bufsz,src,level,foreground):
_setup_logging(level,foreground)
with open(src) as fd:
logging.info("entropy SSM transmitter v%s starting..." % VERSION)
while True:
try:
logging.debug("about to read from %s" % src)
d = fd.read(bufsz)
if sys.argv[1] == 'send':
e = base64.b64encode(d)
msg = {'v': PROTOCOL_VERSION, 's': src, 'd': e}
s.send(json.dumps(msg))
else: # rawsend
s.send(d)
logging.debug("sending %d bytes of entropy to SSM:@%s" % (len(d), group))
except KeyboardInterrupt,ex:
raise ex
except Exception, ex:
logging.warning(ex)
pass
def _receiver(s,group,bufsz,dst,level,foreground):
_setup_logging(level,foreground)
with open(dst, "w+") as fd:
logging.info("entropy SSM receiver v%s starting..." % VERSION)
while True:
try:
msg = json.loads(s.recv(bufsz))
data = base64.b64decode(msg['d'])
logging.debug(msg)
logging.info("sending %d bytes of entropy from SSM:@%s upstream" % (len(data), group))
fd.write(data)
z = random.randint(1, 20)
logging.debug("sleeping for %d seconds..." % z)
time.sleep(z)
except KeyboardInterrupt,ex:
raise ex
except Exception, ex:
logging.warning(ex)
time.sleep(1)
pass
def main():
try:
_main()
except KeyboardInterrupt:
sys.exit()
def _main():
opts = {}
args = []
flags = None
if len(sys.argv) < 2:
usage()
sys.exit(2)
if sys.argv[1] in ('recv'):
flags = 'vfFhL:P:g:s:i:p:o:'
elif sys.argv[1] in ('send','rawsend'):
flags = 'vfFhL:P:g:s:t:p:r:'
else:
usage()
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[2:], flags)
opts = dict(opts)
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
if '-h' in opts:
usage()
sys.exit()
if '-v' in opts:
print "ssmrandom version %s (c) NORDUnet A/S 2012" % VERSION
sys.exit()
opts.setdefault('-i','0.0.0.0')
opts.setdefault('-p',SSM_PORT)
opts.setdefault('-o',RNGD_PIPE)
opts.setdefault('-g',SSM_GROUP)
opts.setdefault('-L',LOGLEVEL)
opts.setdefault('-r',ENTROPY_DEVICE)
opts.setdefault('-L',LOGLEVEL)
opts.setdefault('-t',MCTTL)
opts.setdefault('-P',PIDFILE)
context = None
if not '-f' in opts:
context = daemon.DaemonContext(working_directory='/tmp')
context.pidfile = PidFile(opts['-P'])
if sys.argv[1] == 'recv':
group = opts['-g']
port = int(opts['-p'])
opts.setdefault('-s',BUFSZ)
if len(args) < 1:
usage()
sys.exit(2)
dst = opts['-o']
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
for host in args:
name,aliases,addrs = socket.gethostbyaddr(host)
imr = None
for addr in addrs:
if ':' in addr:
pass
else:
imr = (socket.inet_pton(socket.AF_INET, group) +
socket.inet_pton(socket.AF_INET, opts['-i']) +
socket.inet_pton(socket.AF_INET, addr))
if imr is not None:
s.setsockopt(socket.SOL_IP, socket.IP_ADD_SOURCE_MEMBERSHIP, imr)
s.bind((group,port))
if not os.path.exists(dst):
os.mkfifo(dst)
if context is not None:
context.files_preserve=[s]
if '-F' in opts:
context.detach_process = False
with context as ctx:
_receiver(s,group,int(opts['-s']),dst,opts['-L'],False)
else:
_receiver(s,group,int(opts['-s']),dst,opts['-L'],True)
elif sys.argv[1] == 'send' or sys.argv[1] == 'rawsend':
opts.setdefault('-s',MSGSZ)
group = opts['-g']
port = int(opts['-p'])
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if '-t' in opts:
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, chr(int(opts['-t'])))
if '-i' in opts:
s.bind((opts['-i'], 0))
s.connect((group,port))
if context is not None:
context.files_preserve=[s]
if '-F' in opts:
context.detach_process = False
with context as ctx:
_sender(s,group,int(opts['-s']),opts['-r'],opts['-L'],False)
else:
_sender(s,group,int(opts['-s']),opts['-r'],opts['-L'],True)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""
Usage: ssmrandom {recv|send|rawsend} [options]+ [host IP(only for recv)]+
Common options:
-h print this message
-v print version information
-g SSM multicast group (in 172.16.31.10/8)
-i local bind address
-p port
-f stay in the foreground (and log to stderr)
-F do not detatch (but otherwize act as a daemon) - useful for init
-P <pidfile>
-L <loglevel> set logging level (e.g. DEBUG)
recv options:
-s buffer size
-o ouput PIPE
send and rawsend options:
-t TTL
-s number of bytes of entropy payload
-r input entropy device
ssmramdom can be operated in either send or receive mode. In send mode it will
read data from the input entropy device and will transmit it framed (except when
using rawsend) as JSON objects on a multicast group in SSM address space. In
receive mode ssmrandom will receive a random sample (using random sleep intervals
between 1 and 20 seconds) of such SSM messages and will write the entropy
payload to a PIPE where it can be consumed by rngd from the rng-tools package.
BUGS: only ipv4 is supported
NOTE that you may need to enable igmpv3 on your network for SSM to work.
"""
from logging import StreamHandler
from ssmrandom.pidfile import PidFile
__author__ = 'leifj'
import socket
import json
import os
import base64
import logging
import getopt
import sys
import random
import time
from logging.handlers import SysLogHandler
import daemon
import lockfile
if not hasattr(socket, 'IP_MULTICAST_TTL'):
setattr(socket, 'IP_MULTICAST_TTL', 33)
if not hasattr(socket, 'IP_ADD_SOURCE_MEMBERSHIP'):
setattr(socket, 'IP_ADD_SOURCE_MEMBERSHIP', 39)
VERSION = "0.3"
PROTOCOL_VERSION = "1.0"
SSM_GROUP = '192.168.127.12'
SSM_PORT = '49999'
ENTROPY_DEVICE='/dev/urandom'
RNGD_PIPE = "/var/run/ssm-rng-pipe"
BUFSZ= "4096"
MSGSZ = "1024"
LOGLEVEL = "WARNING"
MCTTL = "32"
PIDFILE = '/var/run/ssmrandom.pid'
def _setup_logging(level,foreground=False):
loglevel = getattr(logging, level.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: %s' % loglevel)
if foreground:
handler = StreamHandler()
else:
handler = SysLogHandler(address='/dev/log',facility=SysLogHandler.LOG_DAEMON)
pid = os.getpid()
formatter = logging.Formatter('ssmrandom['+str(pid)+'] %(message)s')
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logging.root.setLevel(loglevel)
def usage():
print __doc__
def _sender(s,group,bufsz,src,level,foreground):
_setup_logging(level,foreground)
with open(src) as fd:
logging.info("entropy SSM transmitter v%s starting..." % VERSION)
while True:
try:
logging.debug("about to read from %s" % src)
d = fd.read(bufsz)
if sys.argv[1] == 'send':
e = base64.b64encode(d)
msg = {'v': PROTOCOL_VERSION, 's': src, 'd': e}
s.send(json.dumps(msg))
else: # rawsend
s.send(d)
logging.debug("sending %d bytes of entropy to SSM:@%s" % (len(d), group))
except KeyboardInterrupt,ex:
raise ex
except Exception, ex:
logging.warning(ex)
pass
def _receiver(s,group,bufsz,dst,level,foreground):
_setup_logging(level,foreground)
with open(dst, "w+") as fd:
logging.info("entropy SSM receiver v%s starting..." % VERSION)
while True:
try:
msg = json.loads(s.recv(bufsz))
data = base64.b64decode(msg['d'])
logging.debug(msg)
logging.info("sending %d bytes of entropy from SSM:@%s upstream" % (len(data), group))
fd.write(data)
z = random.randint(1, 20)
logging.debug("sleeping for %d seconds..." % z)
time.sleep(z)
except KeyboardInterrupt,ex:
raise ex
except Exception, ex:
logging.warning(ex)
time.sleep(1)
pass
def main():
try:
_main()
except KeyboardInterrupt:
sys.exit()
def _main():
opts = {}
args = []
flags = None
if len(sys.argv) < 2:
usage()
sys.exit(2)
if sys.argv[1] in ('recv'):
flags = 'vfFhL:P:g:s:i:p:o:'
elif sys.argv[1] in ('send','rawsend'):
flags = 'vfFhL:P:g:s:t:p:r:'
else:
usage()
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[2:], flags)
opts = dict(opts)
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
if '-h' in opts:
usage()
sys.exit()
if '-v' in opts:
print "ssmrandom version %s (c) NORDUnet A/S 2012" % VERSION
sys.exit()
opts.setdefault('-i','0.0.0.0')
opts.setdefault('-p',SSM_PORT)
opts.setdefault('-o',RNGD_PIPE)
opts.setdefault('-g',SSM_GROUP)
opts.setdefault('-L',LOGLEVEL)
opts.setdefault('-r',ENTROPY_DEVICE)
opts.setdefault('-L',LOGLEVEL)
opts.setdefault('-t',MCTTL)
opts.setdefault('-P',PIDFILE)
context = None
if not '-f' in opts:
context = daemon.DaemonContext(working_directory='/tmp')
context.pidfile = PidFile(opts['-P'])
if sys.argv[1] == 'recv':
group = opts['-g']
port = int(opts['-p'])
opts.setdefault('-s',BUFSZ)
if len(args) < 1:
usage()
sys.exit(2)
dst = opts['-o']
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
for host in args:
name,aliases,addrs = socket.gethostbyaddr(host)
imr = None
for addr in addrs:
if ':' in addr:
pass
else:
imr = (socket.inet_pton(socket.AF_INET, group) +
socket.inet_pton(socket.AF_INET, opts['-i']) +
socket.inet_pton(socket.AF_INET, addr))
if imr is not None:
s.setsockopt(socket.SOL_IP, socket.IP_ADD_SOURCE_MEMBERSHIP, imr)
s.bind((group,port))
if not os.path.exists(dst):
os.mkfifo(dst)
if context is not None:
context.files_preserve=[s]
if '-F' in opts:
context.detach_process = False
with context as ctx:
_receiver(s,group,int(opts['-s']),dst,opts['-L'],False)
else:
_receiver(s,group,int(opts['-s']),dst,opts['-L'],True)
elif sys.argv[1] == 'send' or sys.argv[1] == 'rawsend':
opts.setdefault('-s',MSGSZ)
group = opts['-g']
port = int(opts['-p'])
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if '-t' in opts:
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, chr(int(opts['-t'])))
if '-i' in opts:
s.bind((opts['-i'], 0))
s.connect((group,port))
if context is not None:
context.files_preserve=[s]
if '-F' in opts:
context.detach_process = False
with context as ctx:
_sender(s,group,int(opts['-s']),opts['-r'],opts['-L'],False)
else:
_sender(s,group,int(opts['-s']),opts['-r'],opts['-L'],True)
if __name__ == '__main__':
main() | en | 0.760681 | #!/usr/bin/env python Usage: ssmrandom {recv|send|rawsend} [options]+ [host IP(only for recv)]+ Common options: -h print this message -v print version information -g SSM multicast group (in 172.16.31.10/8) -i local bind address -p port -f stay in the foreground (and log to stderr) -F do not detatch (but otherwize act as a daemon) - useful for init -P <pidfile> -L <loglevel> set logging level (e.g. DEBUG) recv options: -s buffer size -o ouput PIPE send and rawsend options: -t TTL -s number of bytes of entropy payload -r input entropy device ssmramdom can be operated in either send or receive mode. In send mode it will read data from the input entropy device and will transmit it framed (except when using rawsend) as JSON objects on a multicast group in SSM address space. In receive mode ssmrandom will receive a random sample (using random sleep intervals between 1 and 20 seconds) of such SSM messages and will write the entropy payload to a PIPE where it can be consumed by rngd from the rng-tools package. BUGS: only ipv4 is supported NOTE that you may need to enable igmpv3 on your network for SSM to work. # rawsend | 2.214212 | 2 |
deploy_config_generator/output/kube_secret.py | ApplauseAQI/applause-deploy-config-generator | 3 | 6614437 | <filename>deploy_config_generator/output/kube_secret.py
import base64
import copy
from deploy_config_generator.utils import yaml_dump, underscore_to_camelcase
from deploy_config_generator.output import kube_common
class OutputPlugin(kube_common.OutputPlugin):
NAME = 'kube_secret'
DESCR = 'Kubernetes secret output plugin'
FILE_EXT = '.yaml'
DEFAULT_CONFIG = {
'fields': {
'kube_secrets': dict(
metadata=dict(
type='dict',
required=True,
fields=copy.deepcopy(kube_common.METADATA_FIELD_SPEC),
),
type=dict(
type='str',
required=True,
),
data=dict(
type='dict',
subtype='str',
description='Values will be automatically base64-encoded as expected by the Kubernetes API',
),
string_data=dict(
type='dict',
),
),
}
}
def generate_output(self, app_vars):
# Basic structure
data = {
'apiVersion': 'v1',
'kind': 'Secret',
}
data['metadata'] = self.build_metadata(app_vars['APP']['metadata'])
for field in ('type',):
data[underscore_to_camelcase(field)] = app_vars['APP'][field]
if app_vars['APP']['string_data']:
data['stringData'] = app_vars['APP']['string_data']
if app_vars['APP']['data']:
tmp_data = dict()
for key in app_vars['APP']['data']:
# Values under 'data' should be base64-encoded
# We have to jump through the hoops of encoding/decoding it because the
# py3 b64encode() function expects a bytestring, and then the YAML encoder
# tries to double-encode the resulting bytestring, so we convert it back
# to a string
tmp_value = app_vars['APP']['data'][key].encode("utf-8")
tmp_data[key] = base64.b64encode(tmp_value).decode('utf-8')
data['data'] = tmp_data
data = self._template.render_template(data, app_vars)
output = yaml_dump(data)
return (output, self.get_output_filename_suffix(data))
| <filename>deploy_config_generator/output/kube_secret.py
import base64
import copy
from deploy_config_generator.utils import yaml_dump, underscore_to_camelcase
from deploy_config_generator.output import kube_common
class OutputPlugin(kube_common.OutputPlugin):
NAME = 'kube_secret'
DESCR = 'Kubernetes secret output plugin'
FILE_EXT = '.yaml'
DEFAULT_CONFIG = {
'fields': {
'kube_secrets': dict(
metadata=dict(
type='dict',
required=True,
fields=copy.deepcopy(kube_common.METADATA_FIELD_SPEC),
),
type=dict(
type='str',
required=True,
),
data=dict(
type='dict',
subtype='str',
description='Values will be automatically base64-encoded as expected by the Kubernetes API',
),
string_data=dict(
type='dict',
),
),
}
}
def generate_output(self, app_vars):
# Basic structure
data = {
'apiVersion': 'v1',
'kind': 'Secret',
}
data['metadata'] = self.build_metadata(app_vars['APP']['metadata'])
for field in ('type',):
data[underscore_to_camelcase(field)] = app_vars['APP'][field]
if app_vars['APP']['string_data']:
data['stringData'] = app_vars['APP']['string_data']
if app_vars['APP']['data']:
tmp_data = dict()
for key in app_vars['APP']['data']:
# Values under 'data' should be base64-encoded
# We have to jump through the hoops of encoding/decoding it because the
# py3 b64encode() function expects a bytestring, and then the YAML encoder
# tries to double-encode the resulting bytestring, so we convert it back
# to a string
tmp_value = app_vars['APP']['data'][key].encode("utf-8")
tmp_data[key] = base64.b64encode(tmp_value).decode('utf-8')
data['data'] = tmp_data
data = self._template.render_template(data, app_vars)
output = yaml_dump(data)
return (output, self.get_output_filename_suffix(data))
| en | 0.707657 | # Basic structure # Values under 'data' should be base64-encoded # We have to jump through the hoops of encoding/decoding it because the # py3 b64encode() function expects a bytestring, and then the YAML encoder # tries to double-encode the resulting bytestring, so we convert it back # to a string | 2.231769 | 2 |
testproject/django_file_form_example/migrations/0002_auto_20200407_0814.py | kosior/django-file-form | 133 | 6614438 | <filename>testproject/django_file_form_example/migrations/0002_auto_20200407_0814.py<gh_stars>100-1000
# Generated by Django 3.0.4 on 2020-04-07 13:14
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_file_form_example", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="example",
name="input_file",
field=models.FileField(
max_length=255,
storage=django.core.files.storage.FileSystemStorage(
location="/Users/mbraak/django-file-form/testproject/media"
),
upload_to="example",
),
),
migrations.AlterField(
model_name="examplefile",
name="input_file",
field=models.FileField(
max_length=255,
storage=django.core.files.storage.FileSystemStorage(
location="/Users/mbraak/django-file-form/testproject/media"
),
upload_to="example",
),
),
]
| <filename>testproject/django_file_form_example/migrations/0002_auto_20200407_0814.py<gh_stars>100-1000
# Generated by Django 3.0.4 on 2020-04-07 13:14
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_file_form_example", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="example",
name="input_file",
field=models.FileField(
max_length=255,
storage=django.core.files.storage.FileSystemStorage(
location="/Users/mbraak/django-file-form/testproject/media"
),
upload_to="example",
),
),
migrations.AlterField(
model_name="examplefile",
name="input_file",
field=models.FileField(
max_length=255,
storage=django.core.files.storage.FileSystemStorage(
location="/Users/mbraak/django-file-form/testproject/media"
),
upload_to="example",
),
),
]
| en | 0.843675 | # Generated by Django 3.0.4 on 2020-04-07 13:14 | 1.59556 | 2 |
bin/sanitize-cosmosis-path.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 1 | 6614439 | from __future__ import print_function
import os
paths = os.environ['DYLD_LIBRARY_PATH'].split(":")
paths = [p for p in paths if "/ups/gcc/v4_8_2/Darwin64bit+13/" not in p]
print(":".join(paths)) | from __future__ import print_function
import os
paths = os.environ['DYLD_LIBRARY_PATH'].split(":")
paths = [p for p in paths if "/ups/gcc/v4_8_2/Darwin64bit+13/" not in p]
print(":".join(paths)) | none | 1 | 2.344683 | 2 | |
tools_box/tools_box/page/business_board/business_board.py | ansiabdo/tools_box | 0 | 6614440 | <reponame>ansiabdo/tools_box
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
###########################################################
# SALES BY MONTH
##########################################################
months = [i for i in range(1, 13)] # 12 months calendar
@frappe.whitelist()
def get_chart_results(year1, year2):
prev_year_result = [] # result arrays
cur_year_result = []
for x in months: #query the database for result by monthly
prev_year_result.append(round(filter_null(compute_year_month(f'{int(year1)}', f'{x}')[0]['sum(grand_total)'])/1000000, 3))
cur_year_result.append(round(filter_null(compute_year_month(f'{int(year2)}', f'{x}')[0]['sum(grand_total)'])/1000000, 3))
return [prev_year_result], [cur_year_result], [compute_year_total(int(year1))], [compute_year_total(int(year2))]#[year[0]['sum(grand_total)']]
def compute_year_month(year, month):
year = frappe.db.sql(
f"""SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='{int(year)}-{int(month)}-01' AND posting_date<='{int(year)}-{int(month)}-31'""",
as_dict=True
)
return year
def compute_year_total(year):
data = frappe.db.sql(
f"""SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='{int(year)}-01-01' AND posting_date<='{int(year)}-12-31';""",
as_dict=True
)
return data
def filter_null(data):
if type(data)!=float:
value = 0.0
return value
return data
##################################################################
# territory_list
##################################################################
@frappe.whitelist()
def get_sales_by_territory(year):
lagos_result = filter_null(frappe.db.sql(filter_territory(lagos, year), as_dict=True)[0]['sum(grand_total)'])
south_west_result = filter_null(frappe.db.sql(filter_territory(south_west, year), as_dict=True)[0]['sum(grand_total)'])
south_east_result = filter_null(frappe.db.sql(filter_territory(south_east, year), as_dict=True)[0]['sum(grand_total)'])
south_south_result = filter_null(frappe.db.sql(filter_territory(south_south, year), as_dict=True)[0]['sum(grand_total)'])
north_zone_result = filter_null(frappe.db.sql(filter_territory(north_zone, year), as_dict=True)[0]['sum(grand_total)'])
# nigeria_result = filter_null(frappe.db.sql(filter_territory(nigeria, 2019), as_dict=True)[0]['sum(grand_total)'])
international_result = filter_null(frappe.db.sql(filter_territory(international, year), as_dict=True)[0]['sum(grand_total)'])
#all_territories_result = filter_null(frappe.db.sql(filter_territory(all_territories, year), as_dict=True)[0]['sum(grand_total)'])
sales_territory_result_set = [lagos_result, south_west_result, south_east_result, south_south_result, north_zone_result, international_result] #, all_territories_result]
sales_territory_result_sum = sum(sales_territory_result_set)
for x, y in enumerate(sales_territory_result_set):
sales_territory_result_set[x] = round(y/1000000000, 2)
# print(lagos_result)
return sales_territory_result_sum, sales_territory_result_set
lagos = ['Agege', 'Ajeromi-ifelodun', 'Alimosho', 'Amuwo-Odofin', 'Apapa', 'Badagry', 'Epe', 'Eti-Osa',
'Ibeju-Lekki', 'Ifako-Ijaye', 'Ikeja', 'Ikorodu', 'Kosofe', 'Lagos', 'Lagos Island', 'Lagos Zone',
'Lagos Mainland', 'MM Tier 1', 'MM Tier 2', 'Mushin', 'Ojo', 'Oshodi-Isolo', 'Shomolu', 'Surulere', 'Nigeria']
south_west = ['Akure', 'Ekiti', 'Ibadan', 'Ife', 'Kwara', 'Ogun', 'Okitipupa', 'Ondo', 'Osun', 'Oyo', 'Ilorin', 'South', 'West Zone', 'Tanke', 'South West Zone']
south_south = ['Benin', 'Rivers', 'Akwa Ibom', 'Bayelsa', 'Delta', 'South South Zone']
south_east = ['Abia', 'Anambra', 'Enugu', 'Imo', 'South East Zone']
north_zone = ['Abuja', 'Kaduna', 'Kano', 'Bauchi', 'Benue', 'Northern Zone', 'Plateau']
international = ['International', 'Canada', 'Ghana', 'UK', 'USA']
nigeria = ['Nigeria']
all_territories = ['All Territories']
def filter_territory(territory_list, year):
query = """SELECT sum(grand_total) FROM `tabSales Invoice` WHERE (""" #territory='abia' OR territory='anambra' OR territory='abia' OR territory='enugu' OR territory='imo' OR territory='south east zone';
for t in territory_list:
if (len(territory_list)-1 == territory_list.index(t)):
query += f"territory='{t}')"
else:
query += f"territory='{t}' OR "
query += f" AND (posting_date>='{int(year)}-01-01' and posting_date<='{int(year)}-12-31')"
return query
# def get_territory_by_month(filter_territory, month):
# @frappe.whitelist()
# def get_2019_analytics():
# sum_pre_year = frappe.db.sql(
# """SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='2019-01-01' AND posting_date<='2019-12-31';""",
# as_dict=True
# )
# sum_cur_year = frappe.db.sql(
# """SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='2020-01-01' AND posting_date<='2020-12-31';""",
# as_dict=True
# )
# prev_year = frappe.db.sql(
# """SELECT grand_total, posting_date FROM `tabSales Invoice` WHERE posting_date>='2019-01-01' AND posting_date<='2019-12-31' ORDER BY posting_date ASC;""",
# as_dict=True
# )
# cur_year = frappe.db.sql(
# """SELECT grand_total, posting_date FROM `tabSales Invoice` WHERE posting_date>='2020-01-01' AND posting_date<='2020-12-31' ORDER BY posting_date ASC;""",
# as_dict=True
# )
# return {'sum_pre_year': sum_pre_year, 'sum_cur_year': sum_cur_year, 'prev_year': prev_year, 'cur_year': cur_year}
| from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
###########################################################
# SALES BY MONTH
##########################################################
months = [i for i in range(1, 13)] # 12 months calendar
@frappe.whitelist()
def get_chart_results(year1, year2):
prev_year_result = [] # result arrays
cur_year_result = []
for x in months: #query the database for result by monthly
prev_year_result.append(round(filter_null(compute_year_month(f'{int(year1)}', f'{x}')[0]['sum(grand_total)'])/1000000, 3))
cur_year_result.append(round(filter_null(compute_year_month(f'{int(year2)}', f'{x}')[0]['sum(grand_total)'])/1000000, 3))
return [prev_year_result], [cur_year_result], [compute_year_total(int(year1))], [compute_year_total(int(year2))]#[year[0]['sum(grand_total)']]
def compute_year_month(year, month):
year = frappe.db.sql(
f"""SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='{int(year)}-{int(month)}-01' AND posting_date<='{int(year)}-{int(month)}-31'""",
as_dict=True
)
return year
def compute_year_total(year):
data = frappe.db.sql(
f"""SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='{int(year)}-01-01' AND posting_date<='{int(year)}-12-31';""",
as_dict=True
)
return data
def filter_null(data):
if type(data)!=float:
value = 0.0
return value
return data
##################################################################
# territory_list
##################################################################
@frappe.whitelist()
def get_sales_by_territory(year):
lagos_result = filter_null(frappe.db.sql(filter_territory(lagos, year), as_dict=True)[0]['sum(grand_total)'])
south_west_result = filter_null(frappe.db.sql(filter_territory(south_west, year), as_dict=True)[0]['sum(grand_total)'])
south_east_result = filter_null(frappe.db.sql(filter_territory(south_east, year), as_dict=True)[0]['sum(grand_total)'])
south_south_result = filter_null(frappe.db.sql(filter_territory(south_south, year), as_dict=True)[0]['sum(grand_total)'])
north_zone_result = filter_null(frappe.db.sql(filter_territory(north_zone, year), as_dict=True)[0]['sum(grand_total)'])
# nigeria_result = filter_null(frappe.db.sql(filter_territory(nigeria, 2019), as_dict=True)[0]['sum(grand_total)'])
international_result = filter_null(frappe.db.sql(filter_territory(international, year), as_dict=True)[0]['sum(grand_total)'])
#all_territories_result = filter_null(frappe.db.sql(filter_territory(all_territories, year), as_dict=True)[0]['sum(grand_total)'])
sales_territory_result_set = [lagos_result, south_west_result, south_east_result, south_south_result, north_zone_result, international_result] #, all_territories_result]
sales_territory_result_sum = sum(sales_territory_result_set)
for x, y in enumerate(sales_territory_result_set):
sales_territory_result_set[x] = round(y/1000000000, 2)
# print(lagos_result)
return sales_territory_result_sum, sales_territory_result_set
lagos = ['Agege', 'Ajeromi-ifelodun', 'Alimosho', 'Amuwo-Odofin', 'Apapa', 'Badagry', 'Epe', 'Eti-Osa',
'Ibeju-Lekki', 'Ifako-Ijaye', 'Ikeja', 'Ikorodu', 'Kosofe', 'Lagos', 'Lagos Island', 'Lagos Zone',
'Lagos Mainland', 'MM Tier 1', 'MM Tier 2', 'Mushin', 'Ojo', 'Oshodi-Isolo', 'Shomolu', 'Surulere', 'Nigeria']
south_west = ['Akure', 'Ekiti', 'Ibadan', 'Ife', 'Kwara', 'Ogun', 'Okitipupa', 'Ondo', 'Osun', 'Oyo', 'Ilorin', 'South', 'West Zone', 'Tanke', 'South West Zone']
south_south = ['Benin', 'Rivers', 'Akwa Ibom', 'Bayelsa', 'Delta', 'South South Zone']
south_east = ['Abia', 'Anambra', 'Enugu', 'Imo', 'South East Zone']
north_zone = ['Abuja', 'Kaduna', 'Kano', 'Bauchi', 'Benue', 'Northern Zone', 'Plateau']
international = ['International', 'Canada', 'Ghana', 'UK', 'USA']
nigeria = ['Nigeria']
all_territories = ['All Territories']
def filter_territory(territory_list, year):
query = """SELECT sum(grand_total) FROM `tabSales Invoice` WHERE (""" #territory='abia' OR territory='anambra' OR territory='abia' OR territory='enugu' OR territory='imo' OR territory='south east zone';
for t in territory_list:
if (len(territory_list)-1 == territory_list.index(t)):
query += f"territory='{t}')"
else:
query += f"territory='{t}' OR "
query += f" AND (posting_date>='{int(year)}-01-01' and posting_date<='{int(year)}-12-31')"
return query
# def get_territory_by_month(filter_territory, month):
# @frappe.whitelist()
# def get_2019_analytics():
# sum_pre_year = frappe.db.sql(
# """SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='2019-01-01' AND posting_date<='2019-12-31';""",
# as_dict=True
# )
# sum_cur_year = frappe.db.sql(
# """SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='2020-01-01' AND posting_date<='2020-12-31';""",
# as_dict=True
# )
# prev_year = frappe.db.sql(
# """SELECT grand_total, posting_date FROM `tabSales Invoice` WHERE posting_date>='2019-01-01' AND posting_date<='2019-12-31' ORDER BY posting_date ASC;""",
# as_dict=True
# )
# cur_year = frappe.db.sql(
# """SELECT grand_total, posting_date FROM `tabSales Invoice` WHERE posting_date>='2020-01-01' AND posting_date<='2020-12-31' ORDER BY posting_date ASC;""",
# as_dict=True
# )
# return {'sum_pre_year': sum_pre_year, 'sum_cur_year': sum_cur_year, 'prev_year': prev_year, 'cur_year': cur_year} | en | 0.242384 | ########################################################### # SALES BY MONTH ########################################################## # 12 months calendar # result arrays #query the database for result by monthly #[year[0]['sum(grand_total)']] SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='{int(year)}-{int(month)}-01' AND posting_date<='{int(year)}-{int(month)}-31' SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='{int(year)}-01-01' AND posting_date<='{int(year)}-12-31'; ################################################################## # territory_list ################################################################## # nigeria_result = filter_null(frappe.db.sql(filter_territory(nigeria, 2019), as_dict=True)[0]['sum(grand_total)']) #all_territories_result = filter_null(frappe.db.sql(filter_territory(all_territories, year), as_dict=True)[0]['sum(grand_total)']) #, all_territories_result] # print(lagos_result) SELECT sum(grand_total) FROM `tabSales Invoice` WHERE ( #territory='abia' OR territory='anambra' OR territory='abia' OR territory='enugu' OR territory='imo' OR territory='south east zone'; # def get_territory_by_month(filter_territory, month): # @frappe.whitelist() # def get_2019_analytics(): # sum_pre_year = frappe.db.sql( # """SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='2019-01-01' AND posting_date<='2019-12-31';""", # as_dict=True # ) # sum_cur_year = frappe.db.sql( # """SELECT sum(grand_total) FROM `tabSales Invoice` WHERE posting_date>='2020-01-01' AND posting_date<='2020-12-31';""", # as_dict=True # ) # prev_year = frappe.db.sql( # """SELECT grand_total, posting_date FROM `tabSales Invoice` WHERE posting_date>='2019-01-01' AND posting_date<='2019-12-31' ORDER BY posting_date ASC;""", # as_dict=True # ) # cur_year = frappe.db.sql( # """SELECT grand_total, posting_date FROM `tabSales Invoice` WHERE posting_date>='2020-01-01' AND posting_date<='2020-12-31' ORDER BY posting_date ASC;""", # as_dict=True # ) # return {'sum_pre_year': sum_pre_year, 'sum_cur_year': sum_cur_year, 'prev_year': prev_year, 'cur_year': cur_year} | 2.275117 | 2 |
thesite/movies/models.py | Coolthulhu/moviedescriber | 0 | 6614441 | <filename>thesite/movies/models.py
from django.db import models
from django.urls import reverse
class Movie(models.Model):
# Values as strings because it's more convenient here
# Might be cleaner to have them nullable
title = models.CharField(max_length=1000, verbose_name="Title")
year = models.CharField(max_length=1000, verbose_name="Year")
rated = models.CharField(max_length=1000, verbose_name="Rated")
released = models.CharField(max_length=1000, verbose_name="Released")
runtime = models.CharField(max_length=1000, verbose_name="Runtime")
genre = models.CharField(max_length=1000, verbose_name="Genre")
director = models.CharField(max_length=1000, verbose_name="Director")
plot = models.CharField(max_length=10000, verbose_name="Plot")
language = models.CharField(max_length=1000, verbose_name="Language")
country = models.CharField(max_length=1000, verbose_name="Country")
awards = models.CharField(max_length=1000, verbose_name="Awards")
poster = models.CharField(max_length=1000, verbose_name="Poster URL")
metascore = models.CharField(max_length=1000, verbose_name="Metascore")
imdb_rating = models.CharField(max_length=1000, verbose_name="IMDB rating")
imdb_votes = models.CharField(max_length=1000, verbose_name="IMDB votes")
imdb_id = models.CharField(max_length=1000, verbose_name="IMDB ID")
movie_type = models.CharField(max_length=1000, verbose_name="Type")
dvd = models.CharField(max_length=1000, verbose_name="DVD")
box_office = models.CharField(max_length=1000, verbose_name="Box office")
production = models.CharField(max_length=1000, verbose_name="Production")
website = models.CharField(max_length=1000, verbose_name="Website")
writer = models.CharField(max_length=1000, verbose_name="Writer")
actors = models.CharField(max_length=1000, verbose_name="Actors")
class Meta:
indexes = [
models.Index(fields=['title'])
]
constraints = [
models.UniqueConstraint(fields=['title'], name="unique_title"),
]
def get_absolute_url(self):
return reverse('movies:details', kwargs={'pk': self.id})
def __str__(self):
return '{}, year {}'.format(self.title, self.year)
class Rating(models.Model):
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
source = models.CharField(max_length=100)
value = models.CharField(max_length=100)
class Meta:
indexes = [
models.Index(fields=['movie'])
]
| <filename>thesite/movies/models.py
from django.db import models
from django.urls import reverse
class Movie(models.Model):
# Values as strings because it's more convenient here
# Might be cleaner to have them nullable
title = models.CharField(max_length=1000, verbose_name="Title")
year = models.CharField(max_length=1000, verbose_name="Year")
rated = models.CharField(max_length=1000, verbose_name="Rated")
released = models.CharField(max_length=1000, verbose_name="Released")
runtime = models.CharField(max_length=1000, verbose_name="Runtime")
genre = models.CharField(max_length=1000, verbose_name="Genre")
director = models.CharField(max_length=1000, verbose_name="Director")
plot = models.CharField(max_length=10000, verbose_name="Plot")
language = models.CharField(max_length=1000, verbose_name="Language")
country = models.CharField(max_length=1000, verbose_name="Country")
awards = models.CharField(max_length=1000, verbose_name="Awards")
poster = models.CharField(max_length=1000, verbose_name="Poster URL")
metascore = models.CharField(max_length=1000, verbose_name="Metascore")
imdb_rating = models.CharField(max_length=1000, verbose_name="IMDB rating")
imdb_votes = models.CharField(max_length=1000, verbose_name="IMDB votes")
imdb_id = models.CharField(max_length=1000, verbose_name="IMDB ID")
movie_type = models.CharField(max_length=1000, verbose_name="Type")
dvd = models.CharField(max_length=1000, verbose_name="DVD")
box_office = models.CharField(max_length=1000, verbose_name="Box office")
production = models.CharField(max_length=1000, verbose_name="Production")
website = models.CharField(max_length=1000, verbose_name="Website")
writer = models.CharField(max_length=1000, verbose_name="Writer")
actors = models.CharField(max_length=1000, verbose_name="Actors")
class Meta:
indexes = [
models.Index(fields=['title'])
]
constraints = [
models.UniqueConstraint(fields=['title'], name="unique_title"),
]
def get_absolute_url(self):
return reverse('movies:details', kwargs={'pk': self.id})
def __str__(self):
return '{}, year {}'.format(self.title, self.year)
class Rating(models.Model):
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
source = models.CharField(max_length=100)
value = models.CharField(max_length=100)
class Meta:
indexes = [
models.Index(fields=['movie'])
]
| en | 0.968933 | # Values as strings because it's more convenient here # Might be cleaner to have them nullable | 2.435768 | 2 |
tests/share/models/test_rawdata.py | felliott/SHARE | 87 | 6614442 | import pytest
import hashlib
from django.core import exceptions
from django.db.utils import IntegrityError
from share.models import RawDatum
from share.harvest.base import FetchResult
@pytest.mark.django_db
class TestRawDatum:
def test_doesnt_mangle_data(self, suid):
rd = RawDatum(suid=suid, datum='This is just some data')
rd.save()
assert RawDatum.objects.first().datum == 'This is just some data'
def test_must_have_data(self, suid):
rd = RawDatum(suid)
with pytest.raises(exceptions.ValidationError) as e:
rd.clean_fields()
rd.save()
assert 'This field cannot be blank.' == e.value.message_dict['datum'][0]
def test_must_have_suid(self):
rd = RawDatum(datum='SomeData')
with pytest.raises(IntegrityError) as e:
rd.save()
assert 'null value in column "suid_id" violates not-null constraint' in e.value.args[0]
def test_store_data(self, source_config):
rd = RawDatum.objects.store_data(source_config, FetchResult('unique', 'mydatums'))
assert rd.date_modified is not None
assert rd.date_created is not None
assert rd.datum == 'mydatums'
assert rd.suid.identifier == 'unique'
assert rd.suid.source_config == source_config
assert rd.sha256 == hashlib.sha256(b'mydatums').hexdigest()
def test_store_data_dedups_simple(self, source_config):
rd1 = RawDatum.objects.store_data(source_config, FetchResult('unique', 'mydatums'))
rd2 = RawDatum.objects.store_data(source_config, FetchResult('unique', 'mydatums'))
assert rd1.pk == rd2.pk
assert rd1.created is True
assert rd2.created is False
assert rd1.date_created == rd2.date_created
assert rd1.date_modified < rd2.date_modified
def test_store_data_dedups_complex(self, source_config):
data = '{"providerUpdatedDateTime":"2016-08-25T11:37:40Z","uris":{"canonicalUri":"https://provider.domain/files/7d2792031","providerUris":["https://provider.domain/files/7d2792031"]},"contributors":[{"name":"Person1","email":"<EMAIL>"},{"name":"Person2","email":"<EMAIL>"},{"name":"Person3","email":"<EMAIL>"},{"name":"Person4","email":"<EMAIL>"}],"title":"ReducingMorbiditiesinNeonatesUndergoingMRIScannig"}'
rd1 = RawDatum.objects.store_data(source_config, FetchResult('unique', data))
rd2 = RawDatum.objects.store_data(source_config, FetchResult('unique', data))
assert rd1.pk == rd2.pk
assert rd1.created is True
assert rd2.created is False
assert rd1.date_modified < rd2.date_modified
assert rd1.date_created == rd2.date_created
| import pytest
import hashlib
from django.core import exceptions
from django.db.utils import IntegrityError
from share.models import RawDatum
from share.harvest.base import FetchResult
@pytest.mark.django_db
class TestRawDatum:
def test_doesnt_mangle_data(self, suid):
rd = RawDatum(suid=suid, datum='This is just some data')
rd.save()
assert RawDatum.objects.first().datum == 'This is just some data'
def test_must_have_data(self, suid):
rd = RawDatum(suid)
with pytest.raises(exceptions.ValidationError) as e:
rd.clean_fields()
rd.save()
assert 'This field cannot be blank.' == e.value.message_dict['datum'][0]
def test_must_have_suid(self):
rd = RawDatum(datum='SomeData')
with pytest.raises(IntegrityError) as e:
rd.save()
assert 'null value in column "suid_id" violates not-null constraint' in e.value.args[0]
def test_store_data(self, source_config):
rd = RawDatum.objects.store_data(source_config, FetchResult('unique', 'mydatums'))
assert rd.date_modified is not None
assert rd.date_created is not None
assert rd.datum == 'mydatums'
assert rd.suid.identifier == 'unique'
assert rd.suid.source_config == source_config
assert rd.sha256 == hashlib.sha256(b'mydatums').hexdigest()
def test_store_data_dedups_simple(self, source_config):
rd1 = RawDatum.objects.store_data(source_config, FetchResult('unique', 'mydatums'))
rd2 = RawDatum.objects.store_data(source_config, FetchResult('unique', 'mydatums'))
assert rd1.pk == rd2.pk
assert rd1.created is True
assert rd2.created is False
assert rd1.date_created == rd2.date_created
assert rd1.date_modified < rd2.date_modified
def test_store_data_dedups_complex(self, source_config):
data = '{"providerUpdatedDateTime":"2016-08-25T11:37:40Z","uris":{"canonicalUri":"https://provider.domain/files/7d2792031","providerUris":["https://provider.domain/files/7d2792031"]},"contributors":[{"name":"Person1","email":"<EMAIL>"},{"name":"Person2","email":"<EMAIL>"},{"name":"Person3","email":"<EMAIL>"},{"name":"Person4","email":"<EMAIL>"}],"title":"ReducingMorbiditiesinNeonatesUndergoingMRIScannig"}'
rd1 = RawDatum.objects.store_data(source_config, FetchResult('unique', data))
rd2 = RawDatum.objects.store_data(source_config, FetchResult('unique', data))
assert rd1.pk == rd2.pk
assert rd1.created is True
assert rd2.created is False
assert rd1.date_modified < rd2.date_modified
assert rd1.date_created == rd2.date_created
| none | 1 | 2.268939 | 2 | |
doit_api/__init__.py | smarie/python-doit-api | 5 | 6614443 | from .main import why_am_i_running, title_with_actions, task, taskgen, pytask, cmdtask, doit_config
try:
# -- Distribution mode --
# import from _version.py generated by setuptools_scm during release
from ._version import version as __version__
except ImportError:
# -- Source mode --
# use setuptools_scm to get the current version from src using git
from setuptools_scm import get_version as _gv
from os import path as _path
__version__ = _gv(_path.join(_path.dirname(__file__), _path.pardir))
__all__ = [
'__version__',
# submodules
'main',
# symbols
'task', 'taskgen', 'pytask', 'cmdtask', 'why_am_i_running', 'doit_config'
]
| from .main import why_am_i_running, title_with_actions, task, taskgen, pytask, cmdtask, doit_config
try:
# -- Distribution mode --
# import from _version.py generated by setuptools_scm during release
from ._version import version as __version__
except ImportError:
# -- Source mode --
# use setuptools_scm to get the current version from src using git
from setuptools_scm import get_version as _gv
from os import path as _path
__version__ = _gv(_path.join(_path.dirname(__file__), _path.pardir))
__all__ = [
'__version__',
# submodules
'main',
# symbols
'task', 'taskgen', 'pytask', 'cmdtask', 'why_am_i_running', 'doit_config'
]
| en | 0.655623 | # -- Distribution mode -- # import from _version.py generated by setuptools_scm during release # -- Source mode -- # use setuptools_scm to get the current version from src using git # submodules # symbols | 1.178792 | 1 |
bugbane/tools/builder/builders/libfuzzer.py | gardatech/bugbane | 9 | 6614444 | # Copyright 2022 Garda Technologies, LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Originally written by <NAME> <<EMAIL>>
from typing import Dict
import logging
log = logging.getLogger(__name__)
from bugbane.modules.build_type import BuildType
from .base_builders import LLVMBuilder, UnsupportedBuildException
from .factory import FuzzBuilderFactory
@FuzzBuilderFactory.register("libFuzzer")
class LibFuzzerBuilder(LLVMBuilder):
"""
LLVMBuilder capable of passing libFuzzer env variables according to desired build type
This builder uses clang/++ compilers.
"""
EXTRA_ENV = {
"CFLAGS": "-gline-tables-only -fno-omit-frame-pointer -fsanitize=fuzzer-no-link -fno-sanitize-recover=all",
"CXXFLAGS": "-gline-tables-only -fno-omit-frame-pointer -fsanitize=fuzzer-no-link -fno-sanitize-recover=all",
"LDFLAGS": "-fsanitize=fuzzer-no-link",
"LIB_FUZZING_ENGINE": "-fsanitize=fuzzer",
}
REQUIRED_BUILDS = {
BuildType.BASIC,
BuildType.COVERAGE,
}
def create_build_env(self, bt: BuildType) -> Dict[str, str]:
extra_env = super().create_build_env(bt)
bt_mapping = {
BuildType.ASAN: "-fsanitize=address",
BuildType.UBSAN: "-fsanitize=undefined",
BuildType.CFISAN: "-fsanitize=cfi",
BuildType.MSAN: "-fsanitize=memory",
BuildType.TSAN: "-fsanitize=thread",
BuildType.LSAN: "-fsanitize=leak",
}
supported_bts = set(bt_mapping).union({BuildType.BASIC, BuildType.COVERAGE})
if bt not in supported_bts:
raise UnsupportedBuildException(
f"BuildType {bt.name.upper()} is not supported in builder {self.__class__.__name__}"
)
if bt in bt_mapping:
flags = " " + bt_mapping[bt]
extra_env["CFLAGS"] += flags + " -O1"
extra_env["CXXFLAGS"] += flags + " -O1"
extra_env["LDFLAGS"] += flags
if bt == BuildType.ASAN:
extra_env["CFLAGS"] += " -fsanitize-address-use-after-scope"
extra_env["CXXFLAGS"] += " -fsanitize-address-use-after-scope"
return extra_env
| # Copyright 2022 Garda Technologies, LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Originally written by <NAME> <<EMAIL>>
from typing import Dict
import logging
log = logging.getLogger(__name__)
from bugbane.modules.build_type import BuildType
from .base_builders import LLVMBuilder, UnsupportedBuildException
from .factory import FuzzBuilderFactory
@FuzzBuilderFactory.register("libFuzzer")
class LibFuzzerBuilder(LLVMBuilder):
"""
LLVMBuilder capable of passing libFuzzer env variables according to desired build type
This builder uses clang/++ compilers.
"""
EXTRA_ENV = {
"CFLAGS": "-gline-tables-only -fno-omit-frame-pointer -fsanitize=fuzzer-no-link -fno-sanitize-recover=all",
"CXXFLAGS": "-gline-tables-only -fno-omit-frame-pointer -fsanitize=fuzzer-no-link -fno-sanitize-recover=all",
"LDFLAGS": "-fsanitize=fuzzer-no-link",
"LIB_FUZZING_ENGINE": "-fsanitize=fuzzer",
}
REQUIRED_BUILDS = {
BuildType.BASIC,
BuildType.COVERAGE,
}
def create_build_env(self, bt: BuildType) -> Dict[str, str]:
extra_env = super().create_build_env(bt)
bt_mapping = {
BuildType.ASAN: "-fsanitize=address",
BuildType.UBSAN: "-fsanitize=undefined",
BuildType.CFISAN: "-fsanitize=cfi",
BuildType.MSAN: "-fsanitize=memory",
BuildType.TSAN: "-fsanitize=thread",
BuildType.LSAN: "-fsanitize=leak",
}
supported_bts = set(bt_mapping).union({BuildType.BASIC, BuildType.COVERAGE})
if bt not in supported_bts:
raise UnsupportedBuildException(
f"BuildType {bt.name.upper()} is not supported in builder {self.__class__.__name__}"
)
if bt in bt_mapping:
flags = " " + bt_mapping[bt]
extra_env["CFLAGS"] += flags + " -O1"
extra_env["CXXFLAGS"] += flags + " -O1"
extra_env["LDFLAGS"] += flags
if bt == BuildType.ASAN:
extra_env["CFLAGS"] += " -fsanitize-address-use-after-scope"
extra_env["CXXFLAGS"] += " -fsanitize-address-use-after-scope"
return extra_env
| en | 0.847409 | # Copyright 2022 Garda Technologies, LLC. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Originally written by <NAME> <<EMAIL>> LLVMBuilder capable of passing libFuzzer env variables according to desired build type This builder uses clang/++ compilers. | 2.002685 | 2 |
docker/wizard.py | wis-software/rocketchat-tests-based-on-splinter | 2 | 6614445 | <reponame>wis-software/rocketchat-tests-based-on-splinter
from argparse import ArgumentParser
from sys import stderr
from time import sleep
from rocketchat_API.rocketchat import RocketChat
from base import SplinterTestCase
LOCALHOST = 'http://127.0.0.1:8006'
class SplinterWizardInit(SplinterTestCase):
def __init__(self, addr, username, password, wait=10, **kwargs):
SplinterTestCase.__init__(self, addr, **kwargs)
self.addr = addr
self.username = username
self.password = password
self.wait = wait
self.bot_name = 'meeseeks'
self.bot_password = '<PASSWORD>'
def _wait_until_loading_is_completed(self, header, selector):
for _ in range(self.wait):
title = self.find_by_css(selector)
if title.text.lower() == header:
return True
sleep(1)
return False
def test_administrator_info(self):
# Admin info
header = self.find_by_css('.setup-wizard-forms__header-title')
assert header.text.lower() in 'admin info'
self.browser.fill('registration-name', self.username)
self.browser.fill('registration-username', self.username)
self.browser.fill(
'registration-email', <EMAIL>(self.<EMAIL>)
)
self.browser.fill('registration-pass', self.password)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_organisation_info(self):
assert self._wait_until_loading_is_completed(
'organization info',
'.setup-wizard-forms__header-title'
)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_server_information(self):
assert self._wait_until_loading_is_completed(
'server info',
'.setup-wizard-forms__header-title'
)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_server_registration(self):
assert self._wait_until_loading_is_completed(
'register server',
'.setup-wizard-forms__header-title'
)
tariff_plan = self.find_by_css(
'.setup-wizard-forms__content-register-radio'
)
assert tariff_plan
tariff_plan.last.click()
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_fin(self):
assert self._wait_until_loading_is_completed(
'your workspace is ready to use 🎉',
'.setup-wizard-info__content-title.setup-wizard-final__box-title'
)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.js-finish'
)
assert submit_btn
submit_btn.click()
def test_creating_bot_account(self):
options_btn = self.browser.find_by_css(
'.sidebar__toolbar-button.rc-tooltip.rc-tooltip--down.js-button'
)
options_btn.last.click()
administration_btn = self.browser.find_by_css('.rc-popover__item-text')
administration_btn.click()
users_btn = self.browser.driver.find_elements_by_css_selector(
'a.sidebar-item__link[aria-label="Users"]')
self.browser.driver.execute_script("arguments[0].click();",
users_btn[0])
add_user_btn = self.find_by_css('button[aria-label="Add User"]')
assert add_user_btn
add_user_btn.click()
input_name_el = self.find_by_css('input#name')
assert input_name_el
input_name_el.first.fill(self.bot_name)
input_username_el = self.find_by_css('input#username')
assert input_username_el
input_username_el.first.fill(self.bot_name)
input_email_el = self.find_by_css('input#email')
assert input_email_el
input_email_el.first.fill('{}<EMAIL>'.format(self.<EMAIL>))
verified_btn = self.find_by_css('label.rc-switch__label')
assert verified_btn
verified_btn.first.click()
input_password_el = self.find_by_css('input#password')
assert input_password_el
input_password_el.first.fill(self.bot_password)
verified_btn = self.find_by_css('label.rc-switch__label')
assert verified_btn
verified_btn.last.click()
role_option = self.find_by_css('option[value="bot"]')
assert role_option
role_option.first.click()
add_role_btn = self.find_by_css('button#addRole')
assert add_role_btn
add_role_btn.first.click()
# Do not send welcome email
welcome_ckbx = self.find_by_css('label[for="sendWelcomeEmail"]')
assert welcome_ckbx
welcome_ckbx.first.click()
save_btn = self.find_by_css('.rc-button.rc-button--primary.save')
assert save_btn
save_btn.first.click()
def test_adding_permissions_to_bot(self):
permissions = {
'view-full-other-user-info': True
}
perms_btn = self.browser.driver.find_elements_by_css_selector(
'a.sidebar-item__link[aria-label="Permissions"]'
)
assert perms_btn
self.browser.driver.execute_script("arguments[0].click();",
perms_btn[0])
for name in permissions:
checkbox = self.browser.driver.find_element_by_css_selector(
'input.role-permission[name="perm[bot][{}]"]'.format(name)
)
assert checkbox
if permissions[name] != bool(checkbox.get_attribute('checked')):
checkbox.click()
exit_btn = self.find_by_css(
'.sidebar-flex__close-button'
)
assert exit_btn
exit_btn.click()
def test_create_necessary_rooms(self):
groups = [
'hr',
'leave-coordination'
]
rocket = RocketChat(
self.username,
self.password,
server_url=self.addr
)
for name in groups:
rocket.groups_create(name, members=['meeseeks'])
def main():
parser = ArgumentParser(description='usage: %prog [options] arguments')
parser.add_argument('-a', '--host', dest='host', type=str,
help='allows specifying domain or IP '
'of the Rocket.Chat host')
parser.add_argument('-u', '--username', dest='username', type=str,
help='allows specifying admin username')
parser.add_argument('-p', '--password', dest='password', type=str,
help='allows specifying admin password')
parser.add_argument('-w', '--wait', dest='wait', type=int,
help='allows specifying time '
'for waiting loading of page(secs)')
options = parser.parse_args()
if not options.host:
options.host = LOCALHOST
stderr.write(
'Host is not specified. Defaults to {}.\n'.format(options.host)
)
if not options.username:
parser.error('Username is not specified')
if not options.password:
parser.error('Password is not specified')
if not options.wait:
options.wait = 100
stderr.write(
'Waiting time is not specified. Defaults to {}.\n'
.format(options.wait)
)
test_cases = SplinterWizardInit(
options.host,
options.username,
options.password,
wait=options.wait
)
test_cases.run()
if __name__ == "__main__":
main()
| from argparse import ArgumentParser
from sys import stderr
from time import sleep
from rocketchat_API.rocketchat import RocketChat
from base import SplinterTestCase
LOCALHOST = 'http://127.0.0.1:8006'
class SplinterWizardInit(SplinterTestCase):
def __init__(self, addr, username, password, wait=10, **kwargs):
SplinterTestCase.__init__(self, addr, **kwargs)
self.addr = addr
self.username = username
self.password = password
self.wait = wait
self.bot_name = 'meeseeks'
self.bot_password = '<PASSWORD>'
def _wait_until_loading_is_completed(self, header, selector):
for _ in range(self.wait):
title = self.find_by_css(selector)
if title.text.lower() == header:
return True
sleep(1)
return False
def test_administrator_info(self):
# Admin info
header = self.find_by_css('.setup-wizard-forms__header-title')
assert header.text.lower() in 'admin info'
self.browser.fill('registration-name', self.username)
self.browser.fill('registration-username', self.username)
self.browser.fill(
'registration-email', <EMAIL>(self.<EMAIL>)
)
self.browser.fill('registration-pass', self.password)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_organisation_info(self):
assert self._wait_until_loading_is_completed(
'organization info',
'.setup-wizard-forms__header-title'
)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_server_information(self):
assert self._wait_until_loading_is_completed(
'server info',
'.setup-wizard-forms__header-title'
)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_server_registration(self):
assert self._wait_until_loading_is_completed(
'register server',
'.setup-wizard-forms__header-title'
)
tariff_plan = self.find_by_css(
'.setup-wizard-forms__content-register-radio'
)
assert tariff_plan
tariff_plan.last.click()
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_fin(self):
assert self._wait_until_loading_is_completed(
'your workspace is ready to use 🎉',
'.setup-wizard-info__content-title.setup-wizard-final__box-title'
)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.js-finish'
)
assert submit_btn
submit_btn.click()
def test_creating_bot_account(self):
options_btn = self.browser.find_by_css(
'.sidebar__toolbar-button.rc-tooltip.rc-tooltip--down.js-button'
)
options_btn.last.click()
administration_btn = self.browser.find_by_css('.rc-popover__item-text')
administration_btn.click()
users_btn = self.browser.driver.find_elements_by_css_selector(
'a.sidebar-item__link[aria-label="Users"]')
self.browser.driver.execute_script("arguments[0].click();",
users_btn[0])
add_user_btn = self.find_by_css('button[aria-label="Add User"]')
assert add_user_btn
add_user_btn.click()
input_name_el = self.find_by_css('input#name')
assert input_name_el
input_name_el.first.fill(self.bot_name)
input_username_el = self.find_by_css('input#username')
assert input_username_el
input_username_el.first.fill(self.bot_name)
input_email_el = self.find_by_css('input#email')
assert input_email_el
input_email_el.first.fill('{}<EMAIL>'.format(self.<EMAIL>))
verified_btn = self.find_by_css('label.rc-switch__label')
assert verified_btn
verified_btn.first.click()
input_password_el = self.find_by_css('input#password')
assert input_password_el
input_password_el.first.fill(self.bot_password)
verified_btn = self.find_by_css('label.rc-switch__label')
assert verified_btn
verified_btn.last.click()
role_option = self.find_by_css('option[value="bot"]')
assert role_option
role_option.first.click()
add_role_btn = self.find_by_css('button#addRole')
assert add_role_btn
add_role_btn.first.click()
# Do not send welcome email
welcome_ckbx = self.find_by_css('label[for="sendWelcomeEmail"]')
assert welcome_ckbx
welcome_ckbx.first.click()
save_btn = self.find_by_css('.rc-button.rc-button--primary.save')
assert save_btn
save_btn.first.click()
def test_adding_permissions_to_bot(self):
permissions = {
'view-full-other-user-info': True
}
perms_btn = self.browser.driver.find_elements_by_css_selector(
'a.sidebar-item__link[aria-label="Permissions"]'
)
assert perms_btn
self.browser.driver.execute_script("arguments[0].click();",
perms_btn[0])
for name in permissions:
checkbox = self.browser.driver.find_element_by_css_selector(
'input.role-permission[name="perm[bot][{}]"]'.format(name)
)
assert checkbox
if permissions[name] != bool(checkbox.get_attribute('checked')):
checkbox.click()
exit_btn = self.find_by_css(
'.sidebar-flex__close-button'
)
assert exit_btn
exit_btn.click()
def test_create_necessary_rooms(self):
groups = [
'hr',
'leave-coordination'
]
rocket = RocketChat(
self.username,
self.password,
server_url=self.addr
)
for name in groups:
rocket.groups_create(name, members=['meeseeks'])
def main():
parser = ArgumentParser(description='usage: %prog [options] arguments')
parser.add_argument('-a', '--host', dest='host', type=str,
help='allows specifying domain or IP '
'of the Rocket.Chat host')
parser.add_argument('-u', '--username', dest='username', type=str,
help='allows specifying admin username')
parser.add_argument('-p', '--password', dest='password', type=str,
help='allows specifying admin password')
parser.add_argument('-w', '--wait', dest='wait', type=int,
help='allows specifying time '
'for waiting loading of page(secs)')
options = parser.parse_args()
if not options.host:
options.host = LOCALHOST
stderr.write(
'Host is not specified. Defaults to {}.\n'.format(options.host)
)
if not options.username:
parser.error('Username is not specified')
if not options.password:
parser.error('Password is not specified')
if not options.wait:
options.wait = 100
stderr.write(
'Waiting time is not specified. Defaults to {}.\n'
.format(options.wait)
)
test_cases = SplinterWizardInit(
options.host,
options.username,
options.password,
wait=options.wait
)
test_cases.run()
if __name__ == "__main__":
main() | es | 0.221951 | # Admin info #name') #username') #email') #password') #addRole') # Do not send welcome email | 2.127085 | 2 |
backend/userapp/admin.py | Lenend-KPU/LBS-Platform | 15 | 6614446 | <filename>backend/userapp/admin.py
from userapp.models import User
from django.contrib import admin
class UserAdmin(admin.ModelAdmin):
list_display = ["id", "user_name", "user_password", "user_email", "user_address"]
admin.site.register(User, UserAdmin)
| <filename>backend/userapp/admin.py
from userapp.models import User
from django.contrib import admin
class UserAdmin(admin.ModelAdmin):
list_display = ["id", "user_name", "user_password", "user_email", "user_address"]
admin.site.register(User, UserAdmin)
| none | 1 | 1.756334 | 2 | |
theano/sparse/sandbox/truedot.py | bridgeland/Theano-PyMC | 0 | 6614447 | <gh_stars>0
import theano
import numpy as np
import scipy.sparse as sp
from theano import sparse
from theano import gof, tensor, compile
from theano.sparse.basic import (
_is_sparse_variable,
_is_dense_variable,
as_sparse_variable,
_is_sparse,
_mtypes,
_mtype_to_str,
)
from theano.sparse import SparseType, dense_from_sparse, transpose
from tests import unittest_tools as utt
from tests.sparse.test_basic import eval_outputs
from tests.theano.sparse.test_basic import sparse_random_inputs
# To maintain compatibility
from theano.sparse.basic import TrueDot, true_dot
| import theano
import numpy as np
import scipy.sparse as sp
from theano import sparse
from theano import gof, tensor, compile
from theano.sparse.basic import (
_is_sparse_variable,
_is_dense_variable,
as_sparse_variable,
_is_sparse,
_mtypes,
_mtype_to_str,
)
from theano.sparse import SparseType, dense_from_sparse, transpose
from tests import unittest_tools as utt
from tests.sparse.test_basic import eval_outputs
from tests.theano.sparse.test_basic import sparse_random_inputs
# To maintain compatibility
from theano.sparse.basic import TrueDot, true_dot | en | 0.493978 | # To maintain compatibility | 1.952742 | 2 |
video.py | kostaskaragiorgos/Python-Opencv-Learning-Path | 0 | 6614448 | <reponame>kostaskaragiorgos/Python-Opencv-Learning-Path<gh_stars>0
import cv2
vid = cv2.VideoCapture() #your video
while True:
ret, frame = vid.read()
cv2.imshow("VIDEO",frame)
key = cv2.waitKey(1)
if key == 27:
break
vid.release()
cv2.destroyAllWindows()
| import cv2
vid = cv2.VideoCapture() #your video
while True:
ret, frame = vid.read()
cv2.imshow("VIDEO",frame)
key = cv2.waitKey(1)
if key == 27:
break
vid.release()
cv2.destroyAllWindows() | en | 0.229393 | #your video | 2.97636 | 3 |
ooobuild/lo/ui/dialogs/x_file_picker2.py | Amourspirit/ooo_uno_tmpl | 0 | 6614449 | <reponame>Amourspirit/ooo_uno_tmpl
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.ui.dialogs
import typing
from abc import abstractmethod
from .x_file_picker import XFilePicker as XFilePicker_ec3e0d2d
class XFilePicker2(XFilePicker_ec3e0d2d):
"""
extends file picker interface to workaround some design problems.
See Also:
`API XFilePicker2 <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1ui_1_1dialogs_1_1XFilePicker2.html>`_
"""
__ooo_ns__: str = 'com.sun.star.ui.dialogs'
__ooo_full_ns__: str = 'com.sun.star.ui.dialogs.XFilePicker2'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.ui.dialogs.XFilePicker2'
@abstractmethod
def getSelectedFiles(self) -> 'typing.Tuple[str, ...]':
"""
Returns a sequence of the selected files including path information in URL format, conforming to Rfc1738.
If the user closed the dialog with cancel an empty sequence will be returned.
If the user closed the dialog with OK a list of all selected files will be returned.
Instead to the method getFiles() of base interface XFilePicker the new method return full qualified URLs for every selected file.
A list of all selected file as complete URLs.
Notes for the implementation of a FileSave dialog:If there exists a checkbox \"Automatic File Extension\" which is checked and a valid filter is currently selected the dialog may automatically add an extension to the selected file name(s).
"""
__all__ = ['XFilePicker2']
| # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.ui.dialogs
import typing
from abc import abstractmethod
from .x_file_picker import XFilePicker as XFilePicker_ec3e0d2d
class XFilePicker2(XFilePicker_ec3e0d2d):
"""
extends file picker interface to workaround some design problems.
See Also:
`API XFilePicker2 <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1ui_1_1dialogs_1_1XFilePicker2.html>`_
"""
__ooo_ns__: str = 'com.sun.star.ui.dialogs'
__ooo_full_ns__: str = 'com.sun.star.ui.dialogs.XFilePicker2'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.ui.dialogs.XFilePicker2'
@abstractmethod
def getSelectedFiles(self) -> 'typing.Tuple[str, ...]':
"""
Returns a sequence of the selected files including path information in URL format, conforming to Rfc1738.
If the user closed the dialog with cancel an empty sequence will be returned.
If the user closed the dialog with OK a list of all selected files will be returned.
Instead to the method getFiles() of base interface XFilePicker the new method return full qualified URLs for every selected file.
A list of all selected file as complete URLs.
Notes for the implementation of a FileSave dialog:If there exists a checkbox \"Automatic File Extension\" which is checked and a valid filter is currently selected the dialog may automatically add an extension to the selected file name(s).
"""
__all__ = ['XFilePicker2'] | en | 0.828452 | # coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Interface Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.ui.dialogs extends file picker interface to workaround some design problems. See Also: `API XFilePicker2 <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1ui_1_1dialogs_1_1XFilePicker2.html>`_ Returns a sequence of the selected files including path information in URL format, conforming to Rfc1738. If the user closed the dialog with cancel an empty sequence will be returned. If the user closed the dialog with OK a list of all selected files will be returned. Instead to the method getFiles() of base interface XFilePicker the new method return full qualified URLs for every selected file. A list of all selected file as complete URLs. Notes for the implementation of a FileSave dialog:If there exists a checkbox \"Automatic File Extension\" which is checked and a valid filter is currently selected the dialog may automatically add an extension to the selected file name(s). | 1.785713 | 2 |
src/models/net/functions/mixture_density_outputs.py | kristofbc/handwriting-synthesis | 0 | 6614450 | <gh_stars>0
from __future__ import print_function
import numpy
import six
import chainer
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _mat_ptrs(a):
"""Creates an array of pointers to matrices
Args:
a: A batch of matrices on GPU
Returns:
GPU array of pointers to matrices
"""
if a.shape[0] == 1:
return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.intp)
else:
stride = a[1].data.ptr - a[0].data.ptr
return cuda.cupy.arange(
a[0].data.ptr,
a[0].data.ptr + stride * a.shape[0],
stride,
dtype=numpy.intp)
def _as_batch_mat(x):
return x.reshape((x.shape[0], x.shape[1], 1)) if len(x.shape) == 2 else x
def _get_ld(a):
shape = a.shape[-2:]
strides = a.strides[-2:]
trans = numpy.argmin(strides)
return trans, int(max(shape[trans], max(strides) // a.itemsize))
def _batch_matmul_gpu(a, b, out, transa=False, transb=False, transout=False):
a = _as_batch_mat(a)
b = _as_batch_mat(b)
trans_axis = (0, 2, 1)
if transout:
out = out.transpose(trans_axis)
needtrans, _ = _get_ld(out)
if needtrans == 1:
# (A B)^T = B^T A^T
a, b = b, a
transa, transb = not transb, not transa
out = out.transpose(trans_axis)
if transa:
a = a.transpose(trans_axis)
if transb:
b = b.transpose(trans_axis)
transa, lda = _get_ld(a)
transb, ldb = _get_ld(b)
transout, ldout = _get_ld(out)
la, n, ka = a.shape
lb, kb, m = b.shape
assert ka == kb
assert transout == 0 or ldout == 1
assert out.shape == (la, n, m)
ap = _mat_ptrs(a)
bp = _mat_ptrs(b)
outp = _mat_ptrs(out)
cuda.cublas.sgemmBatched(
cuda.Device().cublas_handle,
transa,
transb,
n, m, ka, 1.0,
ap.data.ptr, lda,
bp.data.ptr, ldb,
0.0, outp.data.ptr, ldout, la)
class MixtureDensityOutputs(function.Function):
"""Mixture-Densiy-Outputs unit for handwriting prediction/synthesis (Graves 2013).
This function outputs Pr(x[t+1]|y[t]) where x[t] is a 3-dimensional vector (eg., x[t] = (x, y, z)).
It has five inputs (e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat),
and five outputs (e, pi, mux, muy, sgmx, sgmy, rho), where e_hat and e are scalar,
pi_hat, pi, mux_hat, muy_hat, sgmx_hat, sgmy_hat, mux, muy, sgmx, sgmy,
rho_hat and rho are M-length, 1 dimensional vectors.
"""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 9)
x_type, eow_type, e_type, pi_type, mux_type, muy_type, sgmx_type, sgmy_type, rho_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
e_type.dtype == numpy.float32,
x_type.ndim >= 2,
e_type.ndim >= 2,
x_type.shape[0] == e_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(e_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
pi_type.dtype == numpy.float32,
x_type.ndim >= 2,
pi_type.ndim >= 2,
x_type.shape[0] == pi_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(pi_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
mux_type.dtype == numpy.float32,
x_type.ndim >= 2,
mux_type.ndim >= 2,
x_type.shape[0] == mux_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(mux_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
muy_type.dtype == numpy.float32,
x_type.ndim >= 2,
muy_type.ndim >= 2,
x_type.shape[0] == muy_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(muy_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
sgmx_type.dtype == numpy.float32,
x_type.ndim >= 2,
sgmx_type.ndim >= 2,
x_type.shape[0] == sgmx_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(sgmx_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
sgmy_type.dtype == numpy.float32,
x_type.ndim >= 2,
sgmy_type.ndim >= 2,
x_type.shape[0] == sgmy_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(sgmy_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
rho_type.dtype == numpy.float32,
x_type.ndim >= 2,
rho_type.ndim >= 2,
x_type.shape[0] == rho_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(rho_type.shape[i] == x_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
mux_type.dtype == numpy.float32,
pi_type.ndim >= 2,
mux_type.ndim >= 2,
pi_type.shape[1] == mux_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(mux_type.shape[i] == pi_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
muy_type.dtype == numpy.float32,
pi_type.ndim >= 2,
muy_type.ndim >= 2,
pi_type.shape[1] == muy_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(muy_type.shape[i] == pi_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
sgmx_type.dtype == numpy.float32,
pi_type.ndim >= 2,
sgmx_type.ndim >= 2,
pi_type.shape[1] == sgmx_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(sgmx_type.shape[i] == pi_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
sgmy_type.dtype == numpy.float32,
pi_type.ndim >= 2,
sgmy_type.ndim >= 2,
pi_type.shape[1] == sgmy_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(sgmy_type.shape[i] == pi_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
rho_type.dtype == numpy.float32,
pi_type.ndim >= 2,
rho_type.ndim >= 2,
pi_type.shape[1] == rho_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(rho_type.shape[i] == pi_type.shape[i])
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
xnext, eow, e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat = inputs
batchsize, M = pi_hat.shape
x1 = xnext[:,0].reshape((batchsize, 1))
x2 = xnext[:,1].reshape((batchsize, 1))
x3 = xnext[:,2].reshape((batchsize, 1))
if isinstance(mux_hat, numpy.ndarray):
self.x = xnext
self.eos = 1./(1. + numpy.exp(e_hat)) #_sigmoid(e_hat)
self.pi_ = numpy.exp(pi_hat)/numpy.exp(pi_hat).sum(axis=1).reshape((batchsize,1))
self.mux = mux_hat
self.muy = muy_hat
self.sgmx = numpy.exp(sgmx_hat)
self.sgmy = numpy.exp(sgmy_hat)
self.rho_ = numpy.tanh(rho_hat)
if x3.sum() >= 0.0: #xnext is not None: # training & validation
#x1 = xnext[:,0].reshape((batchsize, 1))
#x2 = xnext[:,1].reshape((batchsize, 1))
#x3 = xnext[:,2].reshape((batchsize, 1))
dx1 = (x1 - self.mux)/self.sgmx
dx2 = (x2 - self.muy)/self.sgmy
self.Zs = dx1*dx1 + dx2*dx2 - 2.*self.rho_*dx1*dx2
Ns = numpy.exp(- 0.5*self.Zs/ (1.-self.rho_**2))/(2.* 3.1415927 * self.sgmx * self.sgmy * numpy.sqrt(1. - self.rho_**2)+1e-10)
gamma_hats = self.pi_*Ns
sum_gamma_hats = gamma_hats.sum(axis=1).reshape((batchsize, 1)) + 1e-10
self.gammas = gamma_hats/sum_gamma_hats
loss_t = -numpy.log(sum_gamma_hats) - x3*numpy.log(self.eos) - (1. - x3)*numpy.log(1. - self.eos)
idx = numpy.where(x3==2)[0]
self.update_or_not = numpy.ones_like(x3)
self.update_or_not[idx,0] = 0.0
loss_t = loss_t * self.update_or_not
self.xnext = xnext
# Prediction in training
xnext_h = numpy.copy(xnext)
with chainer.no_backprop_mode():
myux_min_h = mux_hat.min(axis=1).reshape((batchsize, 1))
myux_max_h = mux_hat.max(axis=1).reshape((batchsize, 1))
myuy_min_h = muy_hat.min(axis=1).reshape((batchsize, 1))
myuy_max_h = muy_hat.max(axis=1).reshape((batchsize, 1))
protect_mask = numpy.ones((batchsize, 1))
while protect_mask.sum() > 0:
z1_h = numpy.random.uniform(size=batchsize).reshape((batchsize, 1))
z2_ = numpy.random.uniform(size=batchsize).reshape((batchsize, 1))
x1_h = myux_min_h + (myux_max_h - myux_min_h) * z1_h
x2_h = myuy_min_h + (myuy_max_h - myuy_min_h) * z2_
dx1_h = (x1_h - self.mux)/self.sgmx
dx2_h = (x2_h - self.muy)/self.sgmy
self.Zs_h = dx1_h*dx1_h + dx2_h*dx2_h - 2.*self.rho_*dx1_h*dx2_h
Ns = numpy.exp(- 0.5*self.Zs_h/ (1.-self.rho_**2))/(2.* 3.1415927 * self.sgmx * self.sgmy * numpy.sqrt(1. - self.rho_**2)+1e-10)
gamma_hats_h = self.pi_*Ns
sum_gamma_hats = gamma_hats_h.sum(axis=1) # Pr(x|ys)
us_h = numpy.random.uniform(size=batchsize)
idx = numpy.where(sum_gamma_hats > us_h)[0]
xnext_h[idx, 0] += (x1_h*protect_mask)[idx, 0]
xnext_h[idx, 1] += (x2_h*protect_mask)[idx, 0]
protect_mask[idx, 0] = 0.0
#xnext[:, 2] = self.eos[:, 0]
#xnext[:, 2] = numpy.where(eow < 0, xnext[:, 2], 2.)
#xnext_h[:, 2] = self.eos[:, 0]
#mask = eow < 0
#if not mask.all():
# xnext_h[:, 2] = 2.0
#xnext[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.)
xnext_h[:, 2] = xp.where(self.eos[:, 0] > 0.10, 1.0, 0.0)
self.xnext = xnext_h
else: # prediction
xnext = numpy.zeros((batchsize, 3))
myux_min = mux_hat.min(axis=1).reshape((batchsize, 1))
myux_max = mux_hat.max(axis=1).reshape((batchsize, 1))
myuy_min = muy_hat.min(axis=1).reshape((batchsize, 1))
myuy_max = muy_hat.max(axis=1).reshape((batchsize, 1))
protect_mask = numpy.ones((batchsize, 1))
while protect_mask.sum() >0:
z1 = numpy.random.uniform(size=batchsize).reshape((batchsize, 1))
z2 = numpy.random.uniform(size=batchsize).reshape((batchsize, 1))
x1 = myux_min + (myux_max - myux_min) * z1
x2 = myuy_min + (myuy_max - myuy_min) * z2
dx1 = (x1 - self.mux)/self.sgmx
dx2 = (x2 - self.muy)/self.sgmy
self.Zs = dx1*dx1 + dx2*dx2 - 2.*self.rho_*dx1*dx2
Ns = numpy.exp(- 0.5*self.Zs/ (1.-self.rho_**2))/(2.* 3.1415927 * self.sgmx * self.sgmy * numpy.sqrt(1. - self.rho_**2)+1e-10)
gamma_hats = self.pi_*Ns
sum_gamma_hats = gamma_hats.sum(axis=1) # Pr(x|ys)
us = numpy.random.uniform(size=batchsize)
idx = numpy.where(sum_gamma_hats > us)[0]
xnext[idx, 0] += (x1*protect_mask)[idx, 0]
xnext[idx, 1] += (x2*protect_mask)[idx, 0]
protect_mask[idx, 0] = 0.0
#xnext[:, 2] = self.eos[:, 0]
#xnext[:, 2] = numpy.where(eow < 0, xnext[:, 2], 2.)
xnext[:, 2] = self.eos[:, 0]
mask = eow < 0
if not mask.all():
xnext[:, 2] = 2.0
#xnext[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.)
self.xnext = xnext
#loss_t = None
loss_t = xp.zeros((batchsize, 1)).astype(xp.float32)
self.Zs = None
else:
self.mux = mux_hat
self.muy = muy_hat
self.pi_hat = pi_hat - pi_hat.max(axis=1).reshape(batchsize, 1)
sum_exp_pi = cuda.reduce(
'T x', # input params
'T y', # output params
'exp(x)', # map
'a+b', # reduce
'y=a', # post-reduction map
'1e-10', # identity value
'mdout_sumexp' # kernel name
)(self.pi_hat, axis=1)
self.eos = 1./(1. + cuda.cupy.exp(e_hat))
if x3.sum() >= 0.0: #xnext is not None: # training & validation
gamma_hats, self.Zs, self.pi_, self.sgmx, self.sgmy, self.rho_ = cuda.elementwise(
'T x1, T x2, T pi_hat, T mux_, T muy_, T sgmx_hat, T sgmy_hat, T rho_hat, T sum_exp_pi', # input
'T gammas, T Zs, T pi_, T sgmx_, T sgmy_, T rho_', # output
'''
pi_ = exp(pi_hat)/sum_exp_pi;
sgmx_ = exp(sgmx_hat) + 1e-10;
sgmy_ = exp(sgmy_hat) + 1e-10;
rho_ = tanh(rho_hat);
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd1',
)(x1, x2, self.pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat, sum_exp_pi.reshape((batchsize, 1)))
sum_gamma_hats = gamma_hats.sum(axis=1).reshape((batchsize, 1)) + 1e-10
self.gammas = gamma_hats/sum_gamma_hats
loss_t = cuda.elementwise(
'T sum_, T x3, T eos',
'T loss',
'''
loss = -log(sum_) - x3 * log(eos) - (1. - x3) * log(1.-eos);
''',
'mdout_fwd2',
)(sum_gamma_hats, x3, self.eos)
self.update_or_not = xp.where(x3==2., 0.0, 1.0).astype(xp.float32)
loss_t = loss_t * self.update_or_not
self.xnext = xnext
# Prediction in training
with chainer.no_backprop_mode():
self.sgmx_h = xp.where( self.sgmx < 0.0015, 0.0015, self.sgmx)
self.sgmy_h = xp.where( self.sgmy < 0.0015, 0.0015, self.sgmy)
muxs = xp.empty((batchsize, M, M)).astype(xp.float32)
muys = xp.empty((batchsize, M, M)).astype(xp.float32)
_batch_matmul_gpu(mux_hat.reshape((batchsize, M, 1)), xp.ones((batchsize, 1, M)).astype(xp.float32), out=muxs)
_batch_matmul_gpu(muy_hat.reshape((batchsize, M, 1)), xp.ones((batchsize, 1, M)).astype(xp.float32), out=muys)
gamma_hats_at_components = cuda.elementwise(
'T x1, T x2, T pi_, T mux_, T muy_, T sgmx_, T sgmy_, T rho_', # input
'T gammas', # output
'''
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd5',
)(muxs,
muys,
self.pi_.reshape((batchsize, 1, M)),
mux_hat.reshape((batchsize, 1, M)),
muy_hat.reshape((batchsize, 1, M)),
self.sgmx_h.reshape((batchsize, 1, M)),
self.sgmy_h.reshape((batchsize, 1, M)),
self.rho_.reshape((batchsize, 1, M))
)
sum_gamma_hats_at_components = gamma_hats_at_components.sum(axis=2) # (batchsize, M)
p_maxs = sum_gamma_hats_at_components.max(axis=1).reshape((batchsize, 1)) # (batchsize, 1)
myux_min_h = mux_hat.min(axis=1).reshape((batchsize, 1, 1)) - 0.01
myux_max_h = mux_hat.max(axis=1).reshape((batchsize, 1, 1)) + 0.01
myuy_min_h = muy_hat.min(axis=1).reshape((batchsize, 1, 1)) - 0.01
myuy_max_h = muy_hat.max(axis=1).reshape((batchsize, 1, 1)) + 0.01
xnext_h = xp.zeros((batchsize, 3)).astype(xp.float32)
protect_mask = xp.ones((batchsize, 1)).astype(xp.float32)
n_samples = 32768 * 2 #16384 #8192 #4096 #2048 #1024 #512
x1_h = xp.copy(x1)
x2_h = xp.copy(x2)
while protect_mask.sum() >0:
# sampling n (=n_samples) samples in parallel at a step
z1_h = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples, 1))
z2_h = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples, 1))
x1__h = (myux_min_h + (myux_max_h - myux_min_h) * z1_h).astype(xp.float32) # (batchsize, n_samples, 1)
x2__h = (myuy_min_h + (myuy_max_h - myuy_min_h) * z2_h).astype(xp.float32) # (batchsize, n_samples, 1)
gamma_hats_h = cuda.elementwise(
'T x1, T x2, T pi_, T mux_, T muy_, T sgmx_, T sgmy_, T rho_', # input
'T gammas', # output
'''
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd4',
)(
x1__h, x2__h,
self.pi_.reshape(( batchsize, 1, M)),
mux_hat.reshape(( batchsize, 1, M)),
muy_hat.reshape(( batchsize, 1, M)),
self.sgmx_h.reshape((batchsize, 1, M)),
self.sgmy_h.reshape((batchsize, 1, M)),
self.rho_.reshape((batchsize, 1, M))
)
sum_gamma_hats_h = gamma_hats_h.sum(axis=2)
us_h = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples)) * p_maxs
update_mask__h = xp.where(sum_gamma_hats_h > us_h, 1.0, 0.0).astype(xp.float32).reshape((batchsize, n_samples))
update_mask_h = update_mask__h.max(axis=1).reshape((batchsize, 1))
sample_idx_h = update_mask__h.argmax(axis=1).reshape((batchsize, 1))
for bb in xrange(batchsize):
this_midx = sample_idx_h[bb, 0]
x1_h[bb:bb+1, 0] = x1__h[bb:bb+1, this_midx:this_midx+1, 0]
x2_h[bb:bb+1, 0] = x2__h[bb:bb+1, this_midx:this_midx+1, 0]
xnext_h[:, 0] += (x1_h*protect_mask*update_mask_h)[:, 0]
xnext_h[:, 1] += (x2_h*protect_mask*update_mask_h)[:, 0]
protect_mask -= protect_mask * update_mask_h
xnext_h[:, 2:] = xp.where(self.eos[:, 0:1] > 0.10, 1.0, 0.0)
#xnext_h[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.)
self.xnext = xnext_h
#loss_t = xp.zeros((batchsize, 1)).astype(xp.float32)
#self.Zs = None
else: # prediction (sampling from probability distribution)
# pi, sgmx, sgmy, rho <-- pi_hat, sgmx_hat, sgmy_hat, rho_hat
self.pi_, self.sgmx, self.sgmy, self.rho_ = cuda.elementwise(
'T pi_hat, T sgmx_hat, T sgmy_hat, T rho_hat, T sum_exp_pi', # input
'T pi_, T sgmx_, T sgmy_, T rho_', # output
'''
pi_ = exp(pi_hat)/sum_exp_pi;
sgmx_ = exp(sgmx_hat) + 1e-10;
sgmy_ = exp(sgmy_hat) + 1e-10;
rho_ = tanh(rho_hat);
''',
'mdout_fwd3',
)(self.pi_hat, sgmx_hat, sgmy_hat, rho_hat, sum_exp_pi.reshape((batchsize, 1)))
# because variances of gaussians are very small, sampling is virtually impossible, we set lower boundary for variances!
self.sgmx = xp.where( self.sgmx < 0.0015, 0.0015, self.sgmx)
self.sgmy = xp.where( self.sgmy < 0.0015, 0.0015, self.sgmy)
#print(self.sgmx.min(), self.sgmy.min())
# get the (aproximated) maximum p value of M-mixture gaussian distributions.
# Here I assume that the maximum p value is taken at a center of a gaussian component in the mixture.
# First, calculate p-values at each center of gaussian components,
# and the maximum of these p-values is considered as the upper boundary of the M-mixture gaussian distributions
# prepare x1 and x2 matrices like
# [ [mux0, mux0, ...., mux0],
# [mux1, mux1, ...., mux1],
# ...
# [muxn, muxn, ...., muxn]] where n = batchsize
muxs = xp.empty((batchsize, M, M)).astype(xp.float32)
muys = xp.empty((batchsize, M, M)).astype(xp.float32)
_batch_matmul_gpu(mux_hat.reshape((batchsize, M, 1)), xp.ones((batchsize, 1, M)).astype(xp.float32), out=muxs)
_batch_matmul_gpu(muy_hat.reshape((batchsize, M, 1)), xp.ones((batchsize, 1, M)).astype(xp.float32), out=muys)
# N_i((mux[j], muy[j])) for i = 0, 1, ..., M and j = 0, 1, ..., M
gamma_hats_at_components = cuda.elementwise(
'T x1, T x2, T pi_, T mux_, T muy_, T sgmx_, T sgmy_, T rho_', # input
'T gammas', # output
'''
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd5',
)(muxs,
muys,
self.pi_.reshape((batchsize, 1, M)),
mux_hat.reshape((batchsize, 1, M)),
muy_hat.reshape((batchsize, 1, M)),
self.sgmx.reshape((batchsize, 1, M)),
self.sgmy.reshape((batchsize, 1, M)),
self.rho_.reshape((batchsize, 1, M))
)
# p[j] = sum(N_i((mux[j], muy[j])) for i = 0, 1, ..., M
sum_gamma_hats_at_components = gamma_hats_at_components.sum(axis=2) # (batchsize, M)
# max(p[0], p[1], ..., p[M]) for each batch
p_maxs = sum_gamma_hats_at_components.max(axis=1).reshape((batchsize, 1)) # (batchsize, 1)
#print(p_maxs.reshape((1, batchsize)))
myux_min = mux_hat.min(axis=1).reshape((batchsize, 1, 1)) - 0.01
myux_max = mux_hat.max(axis=1).reshape((batchsize, 1, 1)) + 0.01
myuy_min = muy_hat.min(axis=1).reshape((batchsize, 1, 1)) - 0.01
myuy_max = muy_hat.max(axis=1).reshape((batchsize, 1, 1)) + 0.01
xnext = xp.zeros((batchsize, 3)).astype(xp.float32)
protect_mask = xp.ones((batchsize, 1)).astype(xp.float32)
n_samples = 32768 * 2 #16384 #8192 #4096 #2048 #1024 #512
while protect_mask.sum() >0:
# sampling n (=n_samples) samples in parallel at a step
z1 = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples, 1))
z2 = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples, 1))
x1_ = (myux_min + (myux_max - myux_min) * z1).astype(xp.float32) # (batchsize, n_samples, 1)
x2_ = (myuy_min + (myuy_max - myuy_min) * z2).astype(xp.float32) # (batchsize, n_samples, 1)
gamma_hats = cuda.elementwise(
'T x1, T x2, T pi_, T mux_, T muy_, T sgmx_, T sgmy_, T rho_', # input
'T gammas', # output
'''
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd4',
)(
x1_, x2_,
self.pi_.reshape(( batchsize, 1, M)),
mux_hat.reshape(( batchsize, 1, M)),
muy_hat.reshape(( batchsize, 1, M)),
self.sgmx.reshape((batchsize, 1, M)),
self.sgmy.reshape((batchsize, 1, M)),
self.rho_.reshape((batchsize, 1, M))
)
sum_gamma_hats_ = gamma_hats.sum(axis=2)
"""
sum_gamma_hats = sum_gamma_hats_.max(axis=1).reshape((batchsize, 1))
sample_idx = sum_gamma_hats_.argmax(axis=1).reshape((batchsize, 1))
for bb in xrange(batchsize):
this_midx = sample_idx[bb, 0]
x1[bb:bb+1, 0] = x1_[bb:bb+1, this_midx:this_midx+1, 0]
x2[bb:bb+1, 0] = x2_[bb:bb+1, this_midx:this_midx+1, 0]
us = xp.random.uniform(size=batchsize).reshape((batchsize, 1)) * p_maxs
update_mask = xp.where(sum_gamma_hats > us, 1.0, 0.0).astype(xp.float32).reshape((batchsize, 1))
xnext[:, 0] += (x1*protect_mask*update_mask)[:, 0]
xnext[:, 1] += (x2*protect_mask*update_mask)[:, 0]
protect_mask -= protect_mask * update_mask
"""
"""
us_ = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples)) * p_maxs
update_mask_ = xp.where(sum_gamma_hats_ > us_, 1.0, 0.0).astype(xp.float32).reshape((batchsize, n_samples))
x1 = x1_.reshape((batchsize, n_samples)) * update_mask_
x2 = x2_.reshape((batchsize, n_samples)) * update_mask_
for i in xrange(n_samples):
xnext[:, 0] += (x1_[:,i, :]*protect_mask)[:, 0]
xnext[:, 1] += (x2_[:,i, :]*protect_mask)[:, 0]
#print(protect_mask.shape, update_mask_[:, i:(i+1)].shape)
protect_mask -= protect_mask * update_mask_[:, i:(i+1)]
"""
us_ = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples)) * p_maxs
update_mask_ = xp.where(sum_gamma_hats_ > us_, 1.0, 0.0).astype(xp.float32).reshape((batchsize, n_samples))
update_mask = update_mask_.max(axis=1).reshape((batchsize, 1))
sample_idx = update_mask_.argmax(axis=1).reshape((batchsize, 1))
for bb in xrange(batchsize):
this_midx = sample_idx[bb, 0]
x1[bb:bb+1, 0] = x1_[bb:bb+1, this_midx:this_midx+1, 0]
x2[bb:bb+1, 0] = x2_[bb:bb+1, this_midx:this_midx+1, 0]
xnext[:, 0] += (x1*protect_mask*update_mask)[:, 0]
xnext[:, 1] += (x2*protect_mask*update_mask)[:, 0]
protect_mask -= protect_mask * update_mask
xnext[:, 2:] = self.eos[:, 0:1]
xnext[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.)
self.xnext = xnext
loss_t = xp.zeros((batchsize, 1)).astype(xp.float32)
self.Zs = None
return loss_t, self.xnext, self.eos, self.pi_, self.mux, self.muy, self.sgmx, self.sgmy, self.rho_,
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
xnext, eow, e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat = inputs
batchsize, M = pi_hat.shape
x1 = xnext[:,0].reshape((batchsize, 1))
x2 = xnext[:,1].reshape((batchsize, 1))
x3 = xnext[:,2].reshape((batchsize, 1))
#gpi, = grad_outputs
gpi = xp.empty_like(pi_hat)
gmux = xp.empty_like(mux_hat)
gmuy = xp.empty_like(muy_hat)
gsgmx = xp.empty_like(sgmx_hat)
gsgmy = xp.empty_like(sgmy_hat)
grho = xp.empty_like(rho_hat)
geos = xp.empty_like(e_hat)
gxs = xp.zeros_like(xnext)
# Consider the case that either gradient is not given
if gpi is None:
gpi = 0
if gmux is None:
gmux = 0
if gmuy is None:
gmuy = 0
if gsgmx is None:
gsgmx = 0
if gsgmy is None:
gsgmy = 0
if grho is None:
grho = 0
if geos is None:
geos = 0
if xp is numpy:
#update_or_not = xp.ones_like(x3)
#idx = numpy.where(x3==2)[0]
#update_or_not[idx,0] = 0.0
C_ = 1./(1. - self.rho_*self.rho_)
gpi = (self.pi_ - self.gammas) * self.update_or_not
dx1 = (x1 - self.mux)/self.sgmx
dx2 = (x2 - self.muy)/self.sgmy
CA1_ = C_*( dx1 - self.rho_*dx2 ) /self.sgmx
CA2_ = C_*( dx2 - self.rho_*dx1 ) /self.sgmy
gmux = - self.gammas * CA1_ * self.update_or_not
gmuy = - self.gammas * CA2_ * self.update_or_not
gsgmx = - self.gammas * ( CA1_ * (x1 - self.mux) - 1.) * self.update_or_not
gsgmy = - self.gammas * ( CA2_ * (x2 - self.muy) - 1.) * self.update_or_not
grho = - self.gammas * ( dx1*dx2 + self.rho_ * (1. - C_ * self.Zs) ) * self.update_or_not
geos = (x3 - self.eos) * self.update_or_not
else:
#update_or_not = xp.where(x3==2., 0.0, 1.0).astype(xp.float32)
gpi, gmux, gmuy, gsgmx, gsgmy, grho = cuda.elementwise(
'T x1, T x2, T gammas, T pi_, T mux, T muy, T sgmx, T sgmy, T rho_, T Zs, T un',
'T gpi, T gmux, T gmuy, T gsgmx, T gsgmy, T grho',
'''
T C_ = 1. / (1. - rho_ * rho_ + 1e-10);
T dx1 = (x1 - mux)/sgmx;
T dx2 = (x2 - muy)/sgmy;
T CA1 = C_ * ( dx1 - rho_*dx2 ) /sgmx;
T CA2 = C_ * ( dx2 - rho_*dx1 ) /sgmy;
gpi = (pi_ - gammas) * un;
gmux = - gammas * CA1 * un;
gmuy = - gammas * CA2 * un;
gsgmx = - gammas * ( CA1 * (x1 - mux) - 1.) * un;
gsgmy = - gammas * ( CA2 * (x2 - muy) - 1.) * un;
grho = - gammas * ( dx1*dx2 + rho_*(1. - C_ * Zs)) * un;
''',
'mdout_bwd',
)(x1, x2, self.gammas, self.pi_, self.mux, self.muy, self.sgmx, self.sgmy, self.rho_, self.Zs, self.update_or_not)
geos = (x3 - self.eos) * self.update_or_not #* 4.0 #* 1000.0
th_min = -100.0
th_max = 100.0
geos_max = xp.max(xp.absolute(geos), axis=1).reshape((batchsize, 1))
rate = xp.where(geos_max > th_max, th_max/geos_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
geos *= rate
gpi_max = xp.max(xp.absolute(gpi), axis=1).reshape((batchsize, 1))
rate = xp.where(gpi_max > th_max, th_max/gpi_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gpi *= rate
gmux_max = xp.max(xp.absolute(gmux), axis=1).reshape((batchsize, 1))
rate = xp.where(gmux_max > th_max, th_max/gmux_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gmux *= rate
gmuy_max = xp.max(xp.absolute(gmuy), axis=1).reshape((batchsize, 1))
rate = xp.where(gmuy_max > th_max, th_max/gmuy_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gmuy *= rate
gsgmx_max = xp.max(xp.absolute(gsgmx), axis=1).reshape((batchsize, 1))
rate = xp.where(gsgmx_max > th_max, th_max/gsgmx_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gsgmx *= rate
gsgmy_max = xp.max(xp.absolute(gsgmy), axis=1).reshape((batchsize, 1))
rate = xp.where(gsgmy_max > th_max, th_max/gsgmy_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gsgmy *= rate
grho_max = xp.max(xp.absolute(grho), axis=1).reshape((batchsize, 1))
rate = xp.where(grho_max > th_max, th_max/grho_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
grho *= rate
#return gxs, geos.clip(th_min, th_max), gpi.clip(th_min, th_max), gmux.clip(th_min, th_max), gmuy.clip(th_min, th_max), gsgmx.clip(th_min, th_max), gsgmy.clip(th_min, th_max), grho.clip(th_min, th_max),
#print('mdn', geos, gpi, gmux, gmuy, gsgmx, gsgmy, grho)
#return None, None, geos.clip(th_min, th_max), gpi.clip(th_min, th_max), gmux.clip(th_min, th_max), gmuy.clip(th_min, th_max), gsgmx.clip(th_min, th_max), gsgmy.clip(th_min, th_max), grho.clip(th_min, th_max),
return None, None, geos, gpi, gmux, gmuy, gsgmx, gsgmy, grho,
def mixture_density_outputs(xnext, eow, e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat):
return MixtureDensityOutputs()(xnext, eow, e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat)
| from __future__ import print_function
import numpy
import six
import chainer
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _mat_ptrs(a):
"""Creates an array of pointers to matrices
Args:
a: A batch of matrices on GPU
Returns:
GPU array of pointers to matrices
"""
if a.shape[0] == 1:
return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.intp)
else:
stride = a[1].data.ptr - a[0].data.ptr
return cuda.cupy.arange(
a[0].data.ptr,
a[0].data.ptr + stride * a.shape[0],
stride,
dtype=numpy.intp)
def _as_batch_mat(x):
return x.reshape((x.shape[0], x.shape[1], 1)) if len(x.shape) == 2 else x
def _get_ld(a):
shape = a.shape[-2:]
strides = a.strides[-2:]
trans = numpy.argmin(strides)
return trans, int(max(shape[trans], max(strides) // a.itemsize))
def _batch_matmul_gpu(a, b, out, transa=False, transb=False, transout=False):
a = _as_batch_mat(a)
b = _as_batch_mat(b)
trans_axis = (0, 2, 1)
if transout:
out = out.transpose(trans_axis)
needtrans, _ = _get_ld(out)
if needtrans == 1:
# (A B)^T = B^T A^T
a, b = b, a
transa, transb = not transb, not transa
out = out.transpose(trans_axis)
if transa:
a = a.transpose(trans_axis)
if transb:
b = b.transpose(trans_axis)
transa, lda = _get_ld(a)
transb, ldb = _get_ld(b)
transout, ldout = _get_ld(out)
la, n, ka = a.shape
lb, kb, m = b.shape
assert ka == kb
assert transout == 0 or ldout == 1
assert out.shape == (la, n, m)
ap = _mat_ptrs(a)
bp = _mat_ptrs(b)
outp = _mat_ptrs(out)
cuda.cublas.sgemmBatched(
cuda.Device().cublas_handle,
transa,
transb,
n, m, ka, 1.0,
ap.data.ptr, lda,
bp.data.ptr, ldb,
0.0, outp.data.ptr, ldout, la)
class MixtureDensityOutputs(function.Function):
"""Mixture-Densiy-Outputs unit for handwriting prediction/synthesis (Graves 2013).
This function outputs Pr(x[t+1]|y[t]) where x[t] is a 3-dimensional vector (eg., x[t] = (x, y, z)).
It has five inputs (e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat),
and five outputs (e, pi, mux, muy, sgmx, sgmy, rho), where e_hat and e are scalar,
pi_hat, pi, mux_hat, muy_hat, sgmx_hat, sgmy_hat, mux, muy, sgmx, sgmy,
rho_hat and rho are M-length, 1 dimensional vectors.
"""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 9)
x_type, eow_type, e_type, pi_type, mux_type, muy_type, sgmx_type, sgmy_type, rho_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
e_type.dtype == numpy.float32,
x_type.ndim >= 2,
e_type.ndim >= 2,
x_type.shape[0] == e_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(e_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
pi_type.dtype == numpy.float32,
x_type.ndim >= 2,
pi_type.ndim >= 2,
x_type.shape[0] == pi_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(pi_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
mux_type.dtype == numpy.float32,
x_type.ndim >= 2,
mux_type.ndim >= 2,
x_type.shape[0] == mux_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(mux_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
muy_type.dtype == numpy.float32,
x_type.ndim >= 2,
muy_type.ndim >= 2,
x_type.shape[0] == muy_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(muy_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
sgmx_type.dtype == numpy.float32,
x_type.ndim >= 2,
sgmx_type.ndim >= 2,
x_type.shape[0] == sgmx_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(sgmx_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
sgmy_type.dtype == numpy.float32,
x_type.ndim >= 2,
sgmy_type.ndim >= 2,
x_type.shape[0] == sgmy_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(sgmy_type.shape[i] == x_type.shape[i])
type_check.expect(
x_type.dtype == numpy.float32,
rho_type.dtype == numpy.float32,
x_type.ndim >= 2,
rho_type.ndim >= 2,
x_type.shape[0] == rho_type.shape[0],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(rho_type.shape[i] == x_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
mux_type.dtype == numpy.float32,
pi_type.ndim >= 2,
mux_type.ndim >= 2,
pi_type.shape[1] == mux_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(mux_type.shape[i] == pi_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
muy_type.dtype == numpy.float32,
pi_type.ndim >= 2,
muy_type.ndim >= 2,
pi_type.shape[1] == muy_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(muy_type.shape[i] == pi_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
sgmx_type.dtype == numpy.float32,
pi_type.ndim >= 2,
sgmx_type.ndim >= 2,
pi_type.shape[1] == sgmx_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(sgmx_type.shape[i] == pi_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
sgmy_type.dtype == numpy.float32,
pi_type.ndim >= 2,
sgmy_type.ndim >= 2,
pi_type.shape[1] == sgmy_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(sgmy_type.shape[i] == pi_type.shape[i])
type_check.expect(
pi_type.dtype == numpy.float32,
rho_type.dtype == numpy.float32,
pi_type.ndim >= 2,
rho_type.ndim >= 2,
pi_type.shape[1] == rho_type.shape[1],
)
for i in range(2, type_check.eval(x_type.ndim)):
type_check.expect(rho_type.shape[i] == pi_type.shape[i])
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
xnext, eow, e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat = inputs
batchsize, M = pi_hat.shape
x1 = xnext[:,0].reshape((batchsize, 1))
x2 = xnext[:,1].reshape((batchsize, 1))
x3 = xnext[:,2].reshape((batchsize, 1))
if isinstance(mux_hat, numpy.ndarray):
self.x = xnext
self.eos = 1./(1. + numpy.exp(e_hat)) #_sigmoid(e_hat)
self.pi_ = numpy.exp(pi_hat)/numpy.exp(pi_hat).sum(axis=1).reshape((batchsize,1))
self.mux = mux_hat
self.muy = muy_hat
self.sgmx = numpy.exp(sgmx_hat)
self.sgmy = numpy.exp(sgmy_hat)
self.rho_ = numpy.tanh(rho_hat)
if x3.sum() >= 0.0: #xnext is not None: # training & validation
#x1 = xnext[:,0].reshape((batchsize, 1))
#x2 = xnext[:,1].reshape((batchsize, 1))
#x3 = xnext[:,2].reshape((batchsize, 1))
dx1 = (x1 - self.mux)/self.sgmx
dx2 = (x2 - self.muy)/self.sgmy
self.Zs = dx1*dx1 + dx2*dx2 - 2.*self.rho_*dx1*dx2
Ns = numpy.exp(- 0.5*self.Zs/ (1.-self.rho_**2))/(2.* 3.1415927 * self.sgmx * self.sgmy * numpy.sqrt(1. - self.rho_**2)+1e-10)
gamma_hats = self.pi_*Ns
sum_gamma_hats = gamma_hats.sum(axis=1).reshape((batchsize, 1)) + 1e-10
self.gammas = gamma_hats/sum_gamma_hats
loss_t = -numpy.log(sum_gamma_hats) - x3*numpy.log(self.eos) - (1. - x3)*numpy.log(1. - self.eos)
idx = numpy.where(x3==2)[0]
self.update_or_not = numpy.ones_like(x3)
self.update_or_not[idx,0] = 0.0
loss_t = loss_t * self.update_or_not
self.xnext = xnext
# Prediction in training
xnext_h = numpy.copy(xnext)
with chainer.no_backprop_mode():
myux_min_h = mux_hat.min(axis=1).reshape((batchsize, 1))
myux_max_h = mux_hat.max(axis=1).reshape((batchsize, 1))
myuy_min_h = muy_hat.min(axis=1).reshape((batchsize, 1))
myuy_max_h = muy_hat.max(axis=1).reshape((batchsize, 1))
protect_mask = numpy.ones((batchsize, 1))
while protect_mask.sum() > 0:
z1_h = numpy.random.uniform(size=batchsize).reshape((batchsize, 1))
z2_ = numpy.random.uniform(size=batchsize).reshape((batchsize, 1))
x1_h = myux_min_h + (myux_max_h - myux_min_h) * z1_h
x2_h = myuy_min_h + (myuy_max_h - myuy_min_h) * z2_
dx1_h = (x1_h - self.mux)/self.sgmx
dx2_h = (x2_h - self.muy)/self.sgmy
self.Zs_h = dx1_h*dx1_h + dx2_h*dx2_h - 2.*self.rho_*dx1_h*dx2_h
Ns = numpy.exp(- 0.5*self.Zs_h/ (1.-self.rho_**2))/(2.* 3.1415927 * self.sgmx * self.sgmy * numpy.sqrt(1. - self.rho_**2)+1e-10)
gamma_hats_h = self.pi_*Ns
sum_gamma_hats = gamma_hats_h.sum(axis=1) # Pr(x|ys)
us_h = numpy.random.uniform(size=batchsize)
idx = numpy.where(sum_gamma_hats > us_h)[0]
xnext_h[idx, 0] += (x1_h*protect_mask)[idx, 0]
xnext_h[idx, 1] += (x2_h*protect_mask)[idx, 0]
protect_mask[idx, 0] = 0.0
#xnext[:, 2] = self.eos[:, 0]
#xnext[:, 2] = numpy.where(eow < 0, xnext[:, 2], 2.)
#xnext_h[:, 2] = self.eos[:, 0]
#mask = eow < 0
#if not mask.all():
# xnext_h[:, 2] = 2.0
#xnext[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.)
xnext_h[:, 2] = xp.where(self.eos[:, 0] > 0.10, 1.0, 0.0)
self.xnext = xnext_h
else: # prediction
xnext = numpy.zeros((batchsize, 3))
myux_min = mux_hat.min(axis=1).reshape((batchsize, 1))
myux_max = mux_hat.max(axis=1).reshape((batchsize, 1))
myuy_min = muy_hat.min(axis=1).reshape((batchsize, 1))
myuy_max = muy_hat.max(axis=1).reshape((batchsize, 1))
protect_mask = numpy.ones((batchsize, 1))
while protect_mask.sum() >0:
z1 = numpy.random.uniform(size=batchsize).reshape((batchsize, 1))
z2 = numpy.random.uniform(size=batchsize).reshape((batchsize, 1))
x1 = myux_min + (myux_max - myux_min) * z1
x2 = myuy_min + (myuy_max - myuy_min) * z2
dx1 = (x1 - self.mux)/self.sgmx
dx2 = (x2 - self.muy)/self.sgmy
self.Zs = dx1*dx1 + dx2*dx2 - 2.*self.rho_*dx1*dx2
Ns = numpy.exp(- 0.5*self.Zs/ (1.-self.rho_**2))/(2.* 3.1415927 * self.sgmx * self.sgmy * numpy.sqrt(1. - self.rho_**2)+1e-10)
gamma_hats = self.pi_*Ns
sum_gamma_hats = gamma_hats.sum(axis=1) # Pr(x|ys)
us = numpy.random.uniform(size=batchsize)
idx = numpy.where(sum_gamma_hats > us)[0]
xnext[idx, 0] += (x1*protect_mask)[idx, 0]
xnext[idx, 1] += (x2*protect_mask)[idx, 0]
protect_mask[idx, 0] = 0.0
#xnext[:, 2] = self.eos[:, 0]
#xnext[:, 2] = numpy.where(eow < 0, xnext[:, 2], 2.)
xnext[:, 2] = self.eos[:, 0]
mask = eow < 0
if not mask.all():
xnext[:, 2] = 2.0
#xnext[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.)
self.xnext = xnext
#loss_t = None
loss_t = xp.zeros((batchsize, 1)).astype(xp.float32)
self.Zs = None
else:
self.mux = mux_hat
self.muy = muy_hat
self.pi_hat = pi_hat - pi_hat.max(axis=1).reshape(batchsize, 1)
sum_exp_pi = cuda.reduce(
'T x', # input params
'T y', # output params
'exp(x)', # map
'a+b', # reduce
'y=a', # post-reduction map
'1e-10', # identity value
'mdout_sumexp' # kernel name
)(self.pi_hat, axis=1)
self.eos = 1./(1. + cuda.cupy.exp(e_hat))
if x3.sum() >= 0.0: #xnext is not None: # training & validation
gamma_hats, self.Zs, self.pi_, self.sgmx, self.sgmy, self.rho_ = cuda.elementwise(
'T x1, T x2, T pi_hat, T mux_, T muy_, T sgmx_hat, T sgmy_hat, T rho_hat, T sum_exp_pi', # input
'T gammas, T Zs, T pi_, T sgmx_, T sgmy_, T rho_', # output
'''
pi_ = exp(pi_hat)/sum_exp_pi;
sgmx_ = exp(sgmx_hat) + 1e-10;
sgmy_ = exp(sgmy_hat) + 1e-10;
rho_ = tanh(rho_hat);
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd1',
)(x1, x2, self.pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat, sum_exp_pi.reshape((batchsize, 1)))
sum_gamma_hats = gamma_hats.sum(axis=1).reshape((batchsize, 1)) + 1e-10
self.gammas = gamma_hats/sum_gamma_hats
loss_t = cuda.elementwise(
'T sum_, T x3, T eos',
'T loss',
'''
loss = -log(sum_) - x3 * log(eos) - (1. - x3) * log(1.-eos);
''',
'mdout_fwd2',
)(sum_gamma_hats, x3, self.eos)
self.update_or_not = xp.where(x3==2., 0.0, 1.0).astype(xp.float32)
loss_t = loss_t * self.update_or_not
self.xnext = xnext
# Prediction in training
with chainer.no_backprop_mode():
self.sgmx_h = xp.where( self.sgmx < 0.0015, 0.0015, self.sgmx)
self.sgmy_h = xp.where( self.sgmy < 0.0015, 0.0015, self.sgmy)
muxs = xp.empty((batchsize, M, M)).astype(xp.float32)
muys = xp.empty((batchsize, M, M)).astype(xp.float32)
_batch_matmul_gpu(mux_hat.reshape((batchsize, M, 1)), xp.ones((batchsize, 1, M)).astype(xp.float32), out=muxs)
_batch_matmul_gpu(muy_hat.reshape((batchsize, M, 1)), xp.ones((batchsize, 1, M)).astype(xp.float32), out=muys)
gamma_hats_at_components = cuda.elementwise(
'T x1, T x2, T pi_, T mux_, T muy_, T sgmx_, T sgmy_, T rho_', # input
'T gammas', # output
'''
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd5',
)(muxs,
muys,
self.pi_.reshape((batchsize, 1, M)),
mux_hat.reshape((batchsize, 1, M)),
muy_hat.reshape((batchsize, 1, M)),
self.sgmx_h.reshape((batchsize, 1, M)),
self.sgmy_h.reshape((batchsize, 1, M)),
self.rho_.reshape((batchsize, 1, M))
)
sum_gamma_hats_at_components = gamma_hats_at_components.sum(axis=2) # (batchsize, M)
p_maxs = sum_gamma_hats_at_components.max(axis=1).reshape((batchsize, 1)) # (batchsize, 1)
myux_min_h = mux_hat.min(axis=1).reshape((batchsize, 1, 1)) - 0.01
myux_max_h = mux_hat.max(axis=1).reshape((batchsize, 1, 1)) + 0.01
myuy_min_h = muy_hat.min(axis=1).reshape((batchsize, 1, 1)) - 0.01
myuy_max_h = muy_hat.max(axis=1).reshape((batchsize, 1, 1)) + 0.01
xnext_h = xp.zeros((batchsize, 3)).astype(xp.float32)
protect_mask = xp.ones((batchsize, 1)).astype(xp.float32)
n_samples = 32768 * 2 #16384 #8192 #4096 #2048 #1024 #512
x1_h = xp.copy(x1)
x2_h = xp.copy(x2)
while protect_mask.sum() >0:
# sampling n (=n_samples) samples in parallel at a step
z1_h = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples, 1))
z2_h = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples, 1))
x1__h = (myux_min_h + (myux_max_h - myux_min_h) * z1_h).astype(xp.float32) # (batchsize, n_samples, 1)
x2__h = (myuy_min_h + (myuy_max_h - myuy_min_h) * z2_h).astype(xp.float32) # (batchsize, n_samples, 1)
gamma_hats_h = cuda.elementwise(
'T x1, T x2, T pi_, T mux_, T muy_, T sgmx_, T sgmy_, T rho_', # input
'T gammas', # output
'''
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd4',
)(
x1__h, x2__h,
self.pi_.reshape(( batchsize, 1, M)),
mux_hat.reshape(( batchsize, 1, M)),
muy_hat.reshape(( batchsize, 1, M)),
self.sgmx_h.reshape((batchsize, 1, M)),
self.sgmy_h.reshape((batchsize, 1, M)),
self.rho_.reshape((batchsize, 1, M))
)
sum_gamma_hats_h = gamma_hats_h.sum(axis=2)
us_h = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples)) * p_maxs
update_mask__h = xp.where(sum_gamma_hats_h > us_h, 1.0, 0.0).astype(xp.float32).reshape((batchsize, n_samples))
update_mask_h = update_mask__h.max(axis=1).reshape((batchsize, 1))
sample_idx_h = update_mask__h.argmax(axis=1).reshape((batchsize, 1))
for bb in xrange(batchsize):
this_midx = sample_idx_h[bb, 0]
x1_h[bb:bb+1, 0] = x1__h[bb:bb+1, this_midx:this_midx+1, 0]
x2_h[bb:bb+1, 0] = x2__h[bb:bb+1, this_midx:this_midx+1, 0]
xnext_h[:, 0] += (x1_h*protect_mask*update_mask_h)[:, 0]
xnext_h[:, 1] += (x2_h*protect_mask*update_mask_h)[:, 0]
protect_mask -= protect_mask * update_mask_h
xnext_h[:, 2:] = xp.where(self.eos[:, 0:1] > 0.10, 1.0, 0.0)
#xnext_h[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.)
self.xnext = xnext_h
#loss_t = xp.zeros((batchsize, 1)).astype(xp.float32)
#self.Zs = None
else: # prediction (sampling from probability distribution)
# pi, sgmx, sgmy, rho <-- pi_hat, sgmx_hat, sgmy_hat, rho_hat
self.pi_, self.sgmx, self.sgmy, self.rho_ = cuda.elementwise(
'T pi_hat, T sgmx_hat, T sgmy_hat, T rho_hat, T sum_exp_pi', # input
'T pi_, T sgmx_, T sgmy_, T rho_', # output
'''
pi_ = exp(pi_hat)/sum_exp_pi;
sgmx_ = exp(sgmx_hat) + 1e-10;
sgmy_ = exp(sgmy_hat) + 1e-10;
rho_ = tanh(rho_hat);
''',
'mdout_fwd3',
)(self.pi_hat, sgmx_hat, sgmy_hat, rho_hat, sum_exp_pi.reshape((batchsize, 1)))
# because variances of gaussians are very small, sampling is virtually impossible, we set lower boundary for variances!
self.sgmx = xp.where( self.sgmx < 0.0015, 0.0015, self.sgmx)
self.sgmy = xp.where( self.sgmy < 0.0015, 0.0015, self.sgmy)
#print(self.sgmx.min(), self.sgmy.min())
# get the (aproximated) maximum p value of M-mixture gaussian distributions.
# Here I assume that the maximum p value is taken at a center of a gaussian component in the mixture.
# First, calculate p-values at each center of gaussian components,
# and the maximum of these p-values is considered as the upper boundary of the M-mixture gaussian distributions
# prepare x1 and x2 matrices like
# [ [mux0, mux0, ...., mux0],
# [mux1, mux1, ...., mux1],
# ...
# [muxn, muxn, ...., muxn]] where n = batchsize
muxs = xp.empty((batchsize, M, M)).astype(xp.float32)
muys = xp.empty((batchsize, M, M)).astype(xp.float32)
_batch_matmul_gpu(mux_hat.reshape((batchsize, M, 1)), xp.ones((batchsize, 1, M)).astype(xp.float32), out=muxs)
_batch_matmul_gpu(muy_hat.reshape((batchsize, M, 1)), xp.ones((batchsize, 1, M)).astype(xp.float32), out=muys)
# N_i((mux[j], muy[j])) for i = 0, 1, ..., M and j = 0, 1, ..., M
gamma_hats_at_components = cuda.elementwise(
'T x1, T x2, T pi_, T mux_, T muy_, T sgmx_, T sgmy_, T rho_', # input
'T gammas', # output
'''
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd5',
)(muxs,
muys,
self.pi_.reshape((batchsize, 1, M)),
mux_hat.reshape((batchsize, 1, M)),
muy_hat.reshape((batchsize, 1, M)),
self.sgmx.reshape((batchsize, 1, M)),
self.sgmy.reshape((batchsize, 1, M)),
self.rho_.reshape((batchsize, 1, M))
)
# p[j] = sum(N_i((mux[j], muy[j])) for i = 0, 1, ..., M
sum_gamma_hats_at_components = gamma_hats_at_components.sum(axis=2) # (batchsize, M)
# max(p[0], p[1], ..., p[M]) for each batch
p_maxs = sum_gamma_hats_at_components.max(axis=1).reshape((batchsize, 1)) # (batchsize, 1)
#print(p_maxs.reshape((1, batchsize)))
myux_min = mux_hat.min(axis=1).reshape((batchsize, 1, 1)) - 0.01
myux_max = mux_hat.max(axis=1).reshape((batchsize, 1, 1)) + 0.01
myuy_min = muy_hat.min(axis=1).reshape((batchsize, 1, 1)) - 0.01
myuy_max = muy_hat.max(axis=1).reshape((batchsize, 1, 1)) + 0.01
xnext = xp.zeros((batchsize, 3)).astype(xp.float32)
protect_mask = xp.ones((batchsize, 1)).astype(xp.float32)
n_samples = 32768 * 2 #16384 #8192 #4096 #2048 #1024 #512
while protect_mask.sum() >0:
# sampling n (=n_samples) samples in parallel at a step
z1 = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples, 1))
z2 = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples, 1))
x1_ = (myux_min + (myux_max - myux_min) * z1).astype(xp.float32) # (batchsize, n_samples, 1)
x2_ = (myuy_min + (myuy_max - myuy_min) * z2).astype(xp.float32) # (batchsize, n_samples, 1)
gamma_hats = cuda.elementwise(
'T x1, T x2, T pi_, T mux_, T muy_, T sgmx_, T sgmy_, T rho_', # input
'T gammas', # output
'''
T rho2 = 1. - rho_*rho_ + 1e-10;
T dx1 = (x1 - mux_)/sgmx_;
T dx2 = (x2 - muy_)/sgmy_;
T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2;
T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2));
gammas = pi_ * Ns;
''',
'mdout_fwd4',
)(
x1_, x2_,
self.pi_.reshape(( batchsize, 1, M)),
mux_hat.reshape(( batchsize, 1, M)),
muy_hat.reshape(( batchsize, 1, M)),
self.sgmx.reshape((batchsize, 1, M)),
self.sgmy.reshape((batchsize, 1, M)),
self.rho_.reshape((batchsize, 1, M))
)
sum_gamma_hats_ = gamma_hats.sum(axis=2)
"""
sum_gamma_hats = sum_gamma_hats_.max(axis=1).reshape((batchsize, 1))
sample_idx = sum_gamma_hats_.argmax(axis=1).reshape((batchsize, 1))
for bb in xrange(batchsize):
this_midx = sample_idx[bb, 0]
x1[bb:bb+1, 0] = x1_[bb:bb+1, this_midx:this_midx+1, 0]
x2[bb:bb+1, 0] = x2_[bb:bb+1, this_midx:this_midx+1, 0]
us = xp.random.uniform(size=batchsize).reshape((batchsize, 1)) * p_maxs
update_mask = xp.where(sum_gamma_hats > us, 1.0, 0.0).astype(xp.float32).reshape((batchsize, 1))
xnext[:, 0] += (x1*protect_mask*update_mask)[:, 0]
xnext[:, 1] += (x2*protect_mask*update_mask)[:, 0]
protect_mask -= protect_mask * update_mask
"""
"""
us_ = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples)) * p_maxs
update_mask_ = xp.where(sum_gamma_hats_ > us_, 1.0, 0.0).astype(xp.float32).reshape((batchsize, n_samples))
x1 = x1_.reshape((batchsize, n_samples)) * update_mask_
x2 = x2_.reshape((batchsize, n_samples)) * update_mask_
for i in xrange(n_samples):
xnext[:, 0] += (x1_[:,i, :]*protect_mask)[:, 0]
xnext[:, 1] += (x2_[:,i, :]*protect_mask)[:, 0]
#print(protect_mask.shape, update_mask_[:, i:(i+1)].shape)
protect_mask -= protect_mask * update_mask_[:, i:(i+1)]
"""
us_ = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples)) * p_maxs
update_mask_ = xp.where(sum_gamma_hats_ > us_, 1.0, 0.0).astype(xp.float32).reshape((batchsize, n_samples))
update_mask = update_mask_.max(axis=1).reshape((batchsize, 1))
sample_idx = update_mask_.argmax(axis=1).reshape((batchsize, 1))
for bb in xrange(batchsize):
this_midx = sample_idx[bb, 0]
x1[bb:bb+1, 0] = x1_[bb:bb+1, this_midx:this_midx+1, 0]
x2[bb:bb+1, 0] = x2_[bb:bb+1, this_midx:this_midx+1, 0]
xnext[:, 0] += (x1*protect_mask*update_mask)[:, 0]
xnext[:, 1] += (x2*protect_mask*update_mask)[:, 0]
protect_mask -= protect_mask * update_mask
xnext[:, 2:] = self.eos[:, 0:1]
xnext[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.)
self.xnext = xnext
loss_t = xp.zeros((batchsize, 1)).astype(xp.float32)
self.Zs = None
return loss_t, self.xnext, self.eos, self.pi_, self.mux, self.muy, self.sgmx, self.sgmy, self.rho_,
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
xnext, eow, e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat = inputs
batchsize, M = pi_hat.shape
x1 = xnext[:,0].reshape((batchsize, 1))
x2 = xnext[:,1].reshape((batchsize, 1))
x3 = xnext[:,2].reshape((batchsize, 1))
#gpi, = grad_outputs
gpi = xp.empty_like(pi_hat)
gmux = xp.empty_like(mux_hat)
gmuy = xp.empty_like(muy_hat)
gsgmx = xp.empty_like(sgmx_hat)
gsgmy = xp.empty_like(sgmy_hat)
grho = xp.empty_like(rho_hat)
geos = xp.empty_like(e_hat)
gxs = xp.zeros_like(xnext)
# Consider the case that either gradient is not given
if gpi is None:
gpi = 0
if gmux is None:
gmux = 0
if gmuy is None:
gmuy = 0
if gsgmx is None:
gsgmx = 0
if gsgmy is None:
gsgmy = 0
if grho is None:
grho = 0
if geos is None:
geos = 0
if xp is numpy:
#update_or_not = xp.ones_like(x3)
#idx = numpy.where(x3==2)[0]
#update_or_not[idx,0] = 0.0
C_ = 1./(1. - self.rho_*self.rho_)
gpi = (self.pi_ - self.gammas) * self.update_or_not
dx1 = (x1 - self.mux)/self.sgmx
dx2 = (x2 - self.muy)/self.sgmy
CA1_ = C_*( dx1 - self.rho_*dx2 ) /self.sgmx
CA2_ = C_*( dx2 - self.rho_*dx1 ) /self.sgmy
gmux = - self.gammas * CA1_ * self.update_or_not
gmuy = - self.gammas * CA2_ * self.update_or_not
gsgmx = - self.gammas * ( CA1_ * (x1 - self.mux) - 1.) * self.update_or_not
gsgmy = - self.gammas * ( CA2_ * (x2 - self.muy) - 1.) * self.update_or_not
grho = - self.gammas * ( dx1*dx2 + self.rho_ * (1. - C_ * self.Zs) ) * self.update_or_not
geos = (x3 - self.eos) * self.update_or_not
else:
#update_or_not = xp.where(x3==2., 0.0, 1.0).astype(xp.float32)
gpi, gmux, gmuy, gsgmx, gsgmy, grho = cuda.elementwise(
'T x1, T x2, T gammas, T pi_, T mux, T muy, T sgmx, T sgmy, T rho_, T Zs, T un',
'T gpi, T gmux, T gmuy, T gsgmx, T gsgmy, T grho',
'''
T C_ = 1. / (1. - rho_ * rho_ + 1e-10);
T dx1 = (x1 - mux)/sgmx;
T dx2 = (x2 - muy)/sgmy;
T CA1 = C_ * ( dx1 - rho_*dx2 ) /sgmx;
T CA2 = C_ * ( dx2 - rho_*dx1 ) /sgmy;
gpi = (pi_ - gammas) * un;
gmux = - gammas * CA1 * un;
gmuy = - gammas * CA2 * un;
gsgmx = - gammas * ( CA1 * (x1 - mux) - 1.) * un;
gsgmy = - gammas * ( CA2 * (x2 - muy) - 1.) * un;
grho = - gammas * ( dx1*dx2 + rho_*(1. - C_ * Zs)) * un;
''',
'mdout_bwd',
)(x1, x2, self.gammas, self.pi_, self.mux, self.muy, self.sgmx, self.sgmy, self.rho_, self.Zs, self.update_or_not)
geos = (x3 - self.eos) * self.update_or_not #* 4.0 #* 1000.0
th_min = -100.0
th_max = 100.0
geos_max = xp.max(xp.absolute(geos), axis=1).reshape((batchsize, 1))
rate = xp.where(geos_max > th_max, th_max/geos_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
geos *= rate
gpi_max = xp.max(xp.absolute(gpi), axis=1).reshape((batchsize, 1))
rate = xp.where(gpi_max > th_max, th_max/gpi_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gpi *= rate
gmux_max = xp.max(xp.absolute(gmux), axis=1).reshape((batchsize, 1))
rate = xp.where(gmux_max > th_max, th_max/gmux_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gmux *= rate
gmuy_max = xp.max(xp.absolute(gmuy), axis=1).reshape((batchsize, 1))
rate = xp.where(gmuy_max > th_max, th_max/gmuy_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gmuy *= rate
gsgmx_max = xp.max(xp.absolute(gsgmx), axis=1).reshape((batchsize, 1))
rate = xp.where(gsgmx_max > th_max, th_max/gsgmx_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gsgmx *= rate
gsgmy_max = xp.max(xp.absolute(gsgmy), axis=1).reshape((batchsize, 1))
rate = xp.where(gsgmy_max > th_max, th_max/gsgmy_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
gsgmy *= rate
grho_max = xp.max(xp.absolute(grho), axis=1).reshape((batchsize, 1))
rate = xp.where(grho_max > th_max, th_max/grho_max, 1.0).astype(xp.float32).reshape((batchsize, 1))
grho *= rate
#return gxs, geos.clip(th_min, th_max), gpi.clip(th_min, th_max), gmux.clip(th_min, th_max), gmuy.clip(th_min, th_max), gsgmx.clip(th_min, th_max), gsgmy.clip(th_min, th_max), grho.clip(th_min, th_max),
#print('mdn', geos, gpi, gmux, gmuy, gsgmx, gsgmy, grho)
#return None, None, geos.clip(th_min, th_max), gpi.clip(th_min, th_max), gmux.clip(th_min, th_max), gmuy.clip(th_min, th_max), gsgmx.clip(th_min, th_max), gsgmy.clip(th_min, th_max), grho.clip(th_min, th_max),
return None, None, geos, gpi, gmux, gmuy, gsgmx, gsgmy, grho,
def mixture_density_outputs(xnext, eow, e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat):
return MixtureDensityOutputs()(xnext, eow, e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat) | en | 0.431415 | Creates an array of pointers to matrices Args: a: A batch of matrices on GPU Returns: GPU array of pointers to matrices # (A B)^T = B^T A^T Mixture-Densiy-Outputs unit for handwriting prediction/synthesis (Graves 2013). This function outputs Pr(x[t+1]|y[t]) where x[t] is a 3-dimensional vector (eg., x[t] = (x, y, z)). It has five inputs (e_hat, pi_hat, mux_hat, muy_hat, sgmx_hat, sgmy_hat, rho_hat), and five outputs (e, pi, mux, muy, sgmx, sgmy, rho), where e_hat and e are scalar, pi_hat, pi, mux_hat, muy_hat, sgmx_hat, sgmy_hat, mux, muy, sgmx, sgmy, rho_hat and rho are M-length, 1 dimensional vectors. #_sigmoid(e_hat) #xnext is not None: # training & validation #x1 = xnext[:,0].reshape((batchsize, 1)) #x2 = xnext[:,1].reshape((batchsize, 1)) #x3 = xnext[:,2].reshape((batchsize, 1)) # Prediction in training # Pr(x|ys) #xnext[:, 2] = self.eos[:, 0] #xnext[:, 2] = numpy.where(eow < 0, xnext[:, 2], 2.) #xnext_h[:, 2] = self.eos[:, 0] #mask = eow < 0 #if not mask.all(): # xnext_h[:, 2] = 2.0 #xnext[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.) # prediction # Pr(x|ys) #xnext[:, 2] = self.eos[:, 0] #xnext[:, 2] = numpy.where(eow < 0, xnext[:, 2], 2.) #xnext[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.) #loss_t = None # input params # output params # map # reduce # post-reduction map # identity value # kernel name #xnext is not None: # training & validation # input # output pi_ = exp(pi_hat)/sum_exp_pi; sgmx_ = exp(sgmx_hat) + 1e-10; sgmy_ = exp(sgmy_hat) + 1e-10; rho_ = tanh(rho_hat); T rho2 = 1. - rho_*rho_ + 1e-10; T dx1 = (x1 - mux_)/sgmx_; T dx2 = (x2 - muy_)/sgmy_; Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2; T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2)); gammas = pi_ * Ns; loss = -log(sum_) - x3 * log(eos) - (1. - x3) * log(1.-eos); # Prediction in training # input # output T rho2 = 1. - rho_*rho_ + 1e-10; T dx1 = (x1 - mux_)/sgmx_; T dx2 = (x2 - muy_)/sgmy_; T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2; T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2)); gammas = pi_ * Ns; # (batchsize, M) # (batchsize, 1) #16384 #8192 #4096 #2048 #1024 #512 # sampling n (=n_samples) samples in parallel at a step # (batchsize, n_samples, 1) # (batchsize, n_samples, 1) # input # output T rho2 = 1. - rho_*rho_ + 1e-10; T dx1 = (x1 - mux_)/sgmx_; T dx2 = (x2 - muy_)/sgmy_; T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2; T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2)); gammas = pi_ * Ns; #xnext_h[:, 2:] = xp.where(eow < 0, self.eos[:, 0:1], 2.) #loss_t = xp.zeros((batchsize, 1)).astype(xp.float32) #self.Zs = None # prediction (sampling from probability distribution) # pi, sgmx, sgmy, rho <-- pi_hat, sgmx_hat, sgmy_hat, rho_hat # input # output pi_ = exp(pi_hat)/sum_exp_pi; sgmx_ = exp(sgmx_hat) + 1e-10; sgmy_ = exp(sgmy_hat) + 1e-10; rho_ = tanh(rho_hat); # because variances of gaussians are very small, sampling is virtually impossible, we set lower boundary for variances! #print(self.sgmx.min(), self.sgmy.min()) # get the (aproximated) maximum p value of M-mixture gaussian distributions. # Here I assume that the maximum p value is taken at a center of a gaussian component in the mixture. # First, calculate p-values at each center of gaussian components, # and the maximum of these p-values is considered as the upper boundary of the M-mixture gaussian distributions # prepare x1 and x2 matrices like # [ [mux0, mux0, ...., mux0], # [mux1, mux1, ...., mux1], # ... # [muxn, muxn, ...., muxn]] where n = batchsize # N_i((mux[j], muy[j])) for i = 0, 1, ..., M and j = 0, 1, ..., M # input # output T rho2 = 1. - rho_*rho_ + 1e-10; T dx1 = (x1 - mux_)/sgmx_; T dx2 = (x2 - muy_)/sgmy_; T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2; T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2)); gammas = pi_ * Ns; # p[j] = sum(N_i((mux[j], muy[j])) for i = 0, 1, ..., M # (batchsize, M) # max(p[0], p[1], ..., p[M]) for each batch # (batchsize, 1) #print(p_maxs.reshape((1, batchsize))) #16384 #8192 #4096 #2048 #1024 #512 # sampling n (=n_samples) samples in parallel at a step # (batchsize, n_samples, 1) # (batchsize, n_samples, 1) # input # output T rho2 = 1. - rho_*rho_ + 1e-10; T dx1 = (x1 - mux_)/sgmx_; T dx2 = (x2 - muy_)/sgmy_; T Zs = dx1*dx1 + dx2*dx2- 2.*rho_*dx1*dx2; T Ns = exp( -0.5*Zs /rho2)/(2. * 3.1415927 * sgmx_ * sgmy_ * sqrt(rho2)); gammas = pi_ * Ns; sum_gamma_hats = sum_gamma_hats_.max(axis=1).reshape((batchsize, 1)) sample_idx = sum_gamma_hats_.argmax(axis=1).reshape((batchsize, 1)) for bb in xrange(batchsize): this_midx = sample_idx[bb, 0] x1[bb:bb+1, 0] = x1_[bb:bb+1, this_midx:this_midx+1, 0] x2[bb:bb+1, 0] = x2_[bb:bb+1, this_midx:this_midx+1, 0] us = xp.random.uniform(size=batchsize).reshape((batchsize, 1)) * p_maxs update_mask = xp.where(sum_gamma_hats > us, 1.0, 0.0).astype(xp.float32).reshape((batchsize, 1)) xnext[:, 0] += (x1*protect_mask*update_mask)[:, 0] xnext[:, 1] += (x2*protect_mask*update_mask)[:, 0] protect_mask -= protect_mask * update_mask us_ = xp.random.uniform(size=batchsize* n_samples).reshape((batchsize, n_samples)) * p_maxs update_mask_ = xp.where(sum_gamma_hats_ > us_, 1.0, 0.0).astype(xp.float32).reshape((batchsize, n_samples)) x1 = x1_.reshape((batchsize, n_samples)) * update_mask_ x2 = x2_.reshape((batchsize, n_samples)) * update_mask_ for i in xrange(n_samples): xnext[:, 0] += (x1_[:,i, :]*protect_mask)[:, 0] xnext[:, 1] += (x2_[:,i, :]*protect_mask)[:, 0] #print(protect_mask.shape, update_mask_[:, i:(i+1)].shape) protect_mask -= protect_mask * update_mask_[:, i:(i+1)] #gpi, = grad_outputs # Consider the case that either gradient is not given #update_or_not = xp.ones_like(x3) #idx = numpy.where(x3==2)[0] #update_or_not[idx,0] = 0.0 #update_or_not = xp.where(x3==2., 0.0, 1.0).astype(xp.float32) T C_ = 1. / (1. - rho_ * rho_ + 1e-10); T dx1 = (x1 - mux)/sgmx; T dx2 = (x2 - muy)/sgmy; T CA1 = C_ * ( dx1 - rho_*dx2 ) /sgmx; T CA2 = C_ * ( dx2 - rho_*dx1 ) /sgmy; gpi = (pi_ - gammas) * un; gmux = - gammas * CA1 * un; gmuy = - gammas * CA2 * un; gsgmx = - gammas * ( CA1 * (x1 - mux) - 1.) * un; gsgmy = - gammas * ( CA2 * (x2 - muy) - 1.) * un; grho = - gammas * ( dx1*dx2 + rho_*(1. - C_ * Zs)) * un; #* 4.0 #* 1000.0 #return gxs, geos.clip(th_min, th_max), gpi.clip(th_min, th_max), gmux.clip(th_min, th_max), gmuy.clip(th_min, th_max), gsgmx.clip(th_min, th_max), gsgmy.clip(th_min, th_max), grho.clip(th_min, th_max), #print('mdn', geos, gpi, gmux, gmuy, gsgmx, gsgmy, grho) #return None, None, geos.clip(th_min, th_max), gpi.clip(th_min, th_max), gmux.clip(th_min, th_max), gmuy.clip(th_min, th_max), gsgmx.clip(th_min, th_max), gsgmy.clip(th_min, th_max), grho.clip(th_min, th_max), | 2.629047 | 3 |