text stringlengths 38 1.54M |
|---|
import boto3
from typing import Dict
import traceback
from search_task import search_task_by_id
from schema import UpdateTask
from datetime import datetime
def update_task(user_id: str, task: UpdateTask) -> bool:
"""
DynamoDB内のタスクを更新
"""
is_task_exists = search_task_by_id(user_id, task.task_id)
if is_task_exists:
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table("python-todoapi")
table.update_item(
Key={"user_id": user_id, "task_id": task.task_id},
UpdateExpression="""
set
task_name = :task_name,
description = :description,
updated_at = :updated_at
""",
ExpressionAttributeValues={
":task_name": task.task_name,
":description": task.description,
":updated_at": datetime.now().strftime("%Y%m%d"),
},
)
return True
else:
return False
|
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
import blosc
from rest_framework.test import APITestCase, APIRequestFactory
from rest_framework.test import force_authenticate
from rest_framework import status
from bossspatialdb.views import Cutout
from bosscore.test.setup_db import SetupTestDB
from bosscore.error import BossError
import numpy as np
from unittest.mock import patch
from fakeredis import FakeStrictRedis
import spdb
import bossutils
import os
import unittest
version = settings.BOSS_VERSION
_test_globals = {'kvio_engine': None}
class MockBossConfig(bossutils.configuration.BossConfig):
"""Basic mock for BossConfig so 'test databases' are used for redis (1) instead of the default where real data
can live (0)"""
def __init__(self):
super().__init__()
self.config["aws"]["cache-db"] = "1"
self.config["aws"]["cache-state-db"] = "1"
def read(self, filename):
pass
def __getitem__(self, key):
return self.config[key]
class MockSpatialDB(spdb.spatialdb.SpatialDB):
"""mock for redis kvio so the actual server isn't used during unit testing, but a static mockredis-py instead"""
@patch('bossutils.configuration.BossConfig', MockBossConfig)
@patch('redis.StrictRedis', FakeStrictRedis)
def __init__(self):
super().__init__()
if not _test_globals['kvio_engine']:
_test_globals['kvio_engine'] = spdb.spatialdb.KVIO.get_kv_engine('redis')
self.kvio = _test_globals['kvio_engine']
class CutoutInterfaceViewUint16TestMixin(object):
def test_channel_uint16_wrong_data_type(self):
""" Test posting the wrong bitdepth data """
test_mat = np.random.randint(1, 2 ** 16 - 1, (16, 128, 128))
test_mat = test_mat.astype(np.uint8)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=8)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/0:128/0:128/0:16/', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_channel_uint16_wrong_data_type_numpy(self):
""" Test posting the wrong bitdepth data using the blosc-numpy interface"""
test_mat = np.random.randint(1, 2 ** 16 - 1, (16, 128, 128))
test_mat = test_mat.astype(np.uint8)
bb = blosc.pack_array(test_mat)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/0:128/0:128/0:16/', bb,
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_channel_uint16_wrong_dimensions(self):
""" Test posting with the wrong xyz dims"""
test_mat = np.random.randint(1, 2 ** 16 - 1, (16, 128, 128))
test_mat = test_mat.astype(np.uint16)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=16)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/0:100/0:128/0:16/', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:100', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_channel_uint16_wrong_dimensions_numpy(self):
""" Test posting with the wrong xyz dims using the numpy interface"""
test_mat = np.random.randint(1, 2 ** 16 - 1, (16, 128, 128))
test_mat = test_mat.astype(np.uint16)
bb = blosc.pack_array(test_mat)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/0:100/0:128/0:16/', bb,
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:100', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_channel_uint16_get_too_big(self):
""" Test getting a cutout that is over 1GB uncompressed"""
# Create request
factory = APIRequestFactory()
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/0:100000/0:100000/0:10000/',
accepts='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:100000', y_range='0:100000', z_range='0:10000', t_range=None)
self.assertEqual(response.status_code, status.HTTP_413_REQUEST_ENTITY_TOO_LARGE)
def test_channel_uint16_cuboid_aligned_no_offset_no_time_blosc(self):
""" Test uint16 data, cuboid aligned, no offset, no time samples"""
test_mat = np.random.randint(1, 2**16-1, (16, 128, 128))
test_mat = test_mat.astype(np.uint16)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=16)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/0:128/0:128/0:16/', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/0:128/0:128/0:16/',
accepts='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
raw_data = blosc.decompress(response.content)
data_mat = np.fromstring(raw_data, dtype=np.uint16)
data_mat = np.reshape(data_mat, (16, 128, 128), order='C')
# Test for data equality (what you put in is what you got back!)
np.testing.assert_array_equal(data_mat, test_mat)
def test_channel_uint16_cuboid_aligned_offset_no_time_blosc(self):
""" Test uint16 data, cuboid aligned, offset, no time samples, blosc interface"""
test_mat = np.random.randint(1, 2**16-1, (16, 128, 128))
test_mat = test_mat.astype(np.uint16)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=16)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/128:256/256:384/16:32/', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='128:256', y_range='256:384', z_range='16:32', t_range=None)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/128:256/256:384/16:32/',
accepts='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='128:256', y_range='256:384', z_range='16:32', t_range=None).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
raw_data = blosc.decompress(response.content)
data_mat = np.fromstring(raw_data, dtype=np.uint16)
data_mat = np.reshape(data_mat, (16, 128, 128), order='C')
# Test for data equality (what you put in is what you got back!)
np.testing.assert_array_equal(data_mat, test_mat)
def test_channel_uint16_cuboid_unaligned_offset_no_time_blosc(self):
""" Test uint16 data, not cuboid aligned, offset, no time samples, blosc interface"""
test_mat = np.random.randint(1, 2**16-1, (17, 300, 500))
test_mat = test_mat.astype(np.uint16)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=16)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/100:600/450:750/20:37/', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:600', y_range='450:750', z_range='20:37', t_range=None)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/100:600/450:750/20:37/',
HTTP_ACCEPT='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:600', y_range='450:750', z_range='20:37', t_range=None).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
raw_data = blosc.decompress(response.content)
data_mat = np.fromstring(raw_data, dtype=np.uint16)
data_mat = np.reshape(data_mat, (17, 300, 500), order='C')
# Test for data equality (what you put in is what you got back!)
np.testing.assert_array_equal(data_mat, test_mat)
@unittest.skipUnless(settings.RUN_HIGH_MEM_TESTS, "Test Requires >2.5GB of Memory")
def test_channel_uint16_cuboid_unaligned_offset_time_blosc(self):
""" Test uint16 data, not cuboid aligned, offset, time samples, blosc interface
Test Requires >=2GB of memory!
"""
test_mat = np.random.randint(1, 2**16-1, (3, 17, 300, 500))
test_mat = test_mat.astype(np.uint16)
h = test_mat.tobytes()
bb = blosc.compress(h, typesize=16)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/100:600/450:750/20:37/0:3', bb,
content_type='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:600', y_range='450:750', z_range='20:37', t_range='0:3')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/100:600/450:750/20:37/0:3',
HTTP_ACCEPT='application/blosc')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:600', y_range='450:750', z_range='20:37', t_range='0:3').render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
raw_data = blosc.decompress(response.content)
data_mat = np.fromstring(raw_data, dtype=np.uint16)
data_mat = np.reshape(data_mat, (3, 17, 300, 500), order='C')
# Test for data equality (what you put in is what you got back!)
np.testing.assert_array_equal(data_mat, test_mat)
def test_channel_uint16_cuboid_aligned_no_offset_no_time_blosc_numpy(self):
""" Test uint16 data, cuboid aligned, no offset, no time samples"""
test_mat = np.random.randint(1, 2**16-1, (16, 128, 128))
test_mat = test_mat.astype(np.uint16)
bb = blosc.pack_array(test_mat)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/0:128/0:128/0:16/', bb,
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/0:128/0:128/0:16/',
HTTP_ACCEPT='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='0:128', y_range='0:128', z_range='0:16', t_range=None).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
data_mat = blosc.unpack_array(response.content)
# Test for data equality (what you put in is what you got back!)
np.testing.assert_array_equal(data_mat, test_mat)
def test_channel_uint16_cuboid_aligned_offset_no_time_blosc_numpy(self):
""" Test uint16 data, cuboid aligned, offset, no time samples, blosc interface"""
test_mat = np.random.randint(1, 2**16-1, (16, 128, 128))
test_mat = test_mat.astype(np.uint16)
bb = blosc.pack_array(test_mat)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/128:256/256:384/16:32/', bb,
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='128:256', y_range='256:384', z_range='16:32', t_range=None)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/128:256/256:384/16:32/',
HTTP_ACCEPT='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='128:256', y_range='256:384', z_range='16:32', t_range=None).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
data_mat = blosc.unpack_array(response.content)
# Test for data equality (what you put in is what you got back!)
np.testing.assert_array_equal(data_mat, test_mat)
def test_channel_uint16_cuboid_unaligned_offset_no_time_blosc_numpy(self):
""" Test uint16 data, not cuboid aligned, offset, no time samples, blosc interface"""
test_mat = np.random.randint(1, 2**16-1, (17, 300, 500))
test_mat = test_mat.astype(np.uint16)
bb = blosc.pack_array(test_mat)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/100:600/450:750/20:37/', bb,
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:600', y_range='450:750', z_range='20:37', t_range=None)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/100:600/450:750/20:37/',
HTTP_ACCEPT='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:600', y_range='450:750', z_range='20:37', t_range=None).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
data_mat = blosc.unpack_array(response.content)
# Test for data equality (what you put in is what you got back!)
np.testing.assert_array_equal(data_mat, test_mat)
@unittest.skipUnless(settings.RUN_HIGH_MEM_TESTS, "Test Requires >2.5GB of Memory")
def test_channel_uint16_cuboid_unaligned_offset_time_blosc_numpy(self):
""" Test uint16 data, not cuboid aligned, offset, time samples, blosc interface"""
test_mat = np.random.randint(1, 2**16-1, (3, 17, 300, 500))
test_mat = test_mat.astype(np.uint16)
bb = blosc.pack_array(test_mat)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/100:600/450:750/20:37/0:3', bb,
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:600', y_range='450:750', z_range='20:37', t_range='0:3')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/100:600/450:750/20:37/0:3',
HTTP_ACCEPT='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:600', y_range='450:750', z_range='20:37', t_range='0:3').render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
data_mat = blosc.unpack_array(response.content)
# Test for data equality (what you put in is what you got back!)
np.testing.assert_array_equal(data_mat, test_mat)
@unittest.skipUnless(settings.RUN_HIGH_MEM_TESTS, "Test Requires >2.5GB of Memory")
def test_channel_uint16_cuboid_unaligned_offset_time_offset_blosc_numpy(self):
""" Test uint16 data, not cuboid aligned, offset, time samples, blosc interface"""
test_mat = np.random.randint(1, 2**16-1, (3, 17, 225, 200))
test_mat = test_mat.astype(np.uint16)
bb = blosc.pack_array(test_mat)
# Create request
factory = APIRequestFactory()
request = factory.post('/' + version + '/cutout/col1/exp1/channel2/0/100:300/450:675/20:37/203:206/', bb,
content_type='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:300', y_range='450:675', z_range='20:37', t_range='203:206')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Create Request to get data you posted
request = factory.get('/' + version + '/cutout/col1/exp1/channel2/0/100:300/450:675/20:37/203:206/',
HTTP_ACCEPT='application/blosc-python')
# log in user
force_authenticate(request, user=self.user)
# Make request
response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='channel2',
resolution='0', x_range='100:300', y_range='450:675', z_range='20:37', t_range='203:206').render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Decompress
data_mat = blosc.unpack_array(response.content)
# Test for data equality (what you put in is what you got back!)
np.testing.assert_array_equal(data_mat, test_mat)
@patch('redis.StrictRedis', FakeStrictRedis)
@patch('bossutils.configuration.BossConfig', MockBossConfig)
@patch('spdb.spatialdb.kvio.KVIO', MockSpatialDB)
class TestCutoutInterfaceView(CutoutInterfaceViewUint16TestMixin, APITestCase):
def setUp(self):
"""
Initialize the database
:return:
"""
# Create a user
dbsetup = SetupTestDB()
self.user = dbsetup.create_user('testuser')
# Populate DB
dbsetup.insert_spatialdb_test_data()
# Mock config parser so dummy params get loaded (redis is also mocked)
self.patcher = patch('bossutils.configuration.BossConfig', MockBossConfig)
self.mock_tests = self.patcher.start()
self.spdb_patcher = patch('spdb.spatialdb.SpatialDB', MockSpatialDB)
self.mock_spdb = self.spdb_patcher.start()
def tearDown(self):
# Stop mocking
self.mock_tests = self.patcher.stop()
self.mock_spdb = self.spdb_patcher.stop()
|
import os
import sys
import pylons
import pylons.configuration as configuration
from beaker.cache import CacheManager
from beaker.middleware import SessionMiddleware
from paste.fixture import TestApp
from paste.registry import RegistryManager
from paste.deploy.converters import asbool
from pylons import url
from pylons.decorators import jsonify
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.wsgiapp import PylonsApp
from routes import Mapper
from routes.middleware import RoutesMiddleware
from routes.util import URLGenerator
from nose.tools import raises
def make_app(global_conf, full_stack=True, static_files=True, **app_conf):
root = os.path.dirname(os.path.abspath(__file__))
paths = dict(root=os.path.join(root, 'sample_controllers'), controllers=os.path.join(root, 'sample_controllers', 'controllers'))
sys.path.append(root)
config = configuration.pylons_config
config.init_app(global_conf, app_conf, package='sample_controllers', paths=paths)
map = Mapper(directory=config['pylons.paths']['controllers'])
map.connect('/{controller}/{action}')
config['routes.map'] = map
class AppGlobals(object): pass
config['pylons.app_globals'] = AppGlobals()
app = PylonsApp(config=config)
app = RoutesMiddleware(app, config['routes.map'], singleton=False)
app = SessionMiddleware(app, config)
if asbool(full_stack):
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
if asbool(config['debug']):
app = StatusCodeRedirect(app)
else:
app = StatusCodeRedirect(app, [401, 403, 404, 500])
app = RegistryManager(app)
app.config = config
return app
class TestJsonifyDecorator(object):
def setUp(self):
self.app = TestApp(make_app({}))
url._push_object(URLGenerator(configuration.pylons_config['routes.map'], {}))
def test_basic_response(self):
response = self.app.get('/hello/index')
assert 'Hello World' in response
def test_config(self):
assert pylons.config == configuration.config
@raises(AssertionError)
def test_eval(self):
app = TestApp(make_app(dict(debug='True')))
app.get('/hello/oops', status=500, extra_environ={'paste.throw_errors': False})
def test_set_lang(self):
self._test_set_lang('set_lang')
def test_set_lang_pylonscontext(self):
self._test_set_lang('set_lang_pylonscontext')
def _test_set_lang(self, action):
response = self.app.get(url(controller='i18nc', action=action, lang='ja'))
assert u'\u8a00\u8a9e\u8a2d\u5b9a\u3092\u300cja\u300d\u306b\u5909\u66f4\u3057\u307e\u3057\u305f'.encode('utf-8') in response
response = self.app.get(url(controller='i18nc', action=action, lang='fr'))
assert 'Could not set language to "fr"' in response
def test_detect_lang(self):
response = self.app.get(url(controller='i18nc', action='i18n_index'), headers={
'Accept-Language':'fr;q=0.6, en;q=0.1, ja;q=0.3'})
# expect japanese fallback for nonexistent french.
assert u'\u6839\u672c\u30a4\u30f3\u30c7\u30af\u30b9\u30da\u30fc\u30b8'.encode('utf-8') in response
def test_no_lang(self):
response = self.app.get(url(controller='i18nc', action='no_lang'))
assert 'No language' in response
assert 'No languages' in response
def test_langs(self):
response = self.app.get(url(controller='i18nc', action='langs'), headers={
'Accept-Language':'fr;q=0.6, en;q=0.1, ja;q=0.3'})
assert "['fr', 'ja', 'en', 'en-us']" in response
|
"""
Created on Oct 20, 2013
@author: Ofra
"""
from util import Pair
class PropositionLayer(object):
"""
A class for an PropositionLayer in a level of the graph.
The layer contains a list of propositions (Proposition objects) and a list of mutex propositions (Pair objects)
"""
def __init__(self):
"""
Constructor
"""
self.propositions = [] # list of all the propositions in the layer
self.mutexPropositions = [] # list of pairs of propositions that are mutex in the layer
def addProposition(self, proposition):
self.propositions.append(proposition)
def removePropositions(self, proposition):
self.propositions.remove(proposition)
def getPropositions(self):
return self.propositions
def addMutexProp(self, p1, p2):
self.mutexPropositions.append(Pair(p1,p2))
"""
returns true if proposition p1 and proposition p2 are mutex at this layer
"""
def isMutex(self, p1, p2):
return Pair(p1,p2) in self.mutexPropositions
def getMutexProps(self):
return self.mutexPropositions
def allPrecondsInLayer(self, action):
"""
returns true if all propositions that are preconditions of the
action exist in this layer (i.e. the action can be applied)
"""
for pre in action.getPre():
if not(pre in self.propositions):
return False
for pre1 in action.getPre():
for pre2 in action.getPre():
if Pair(pre1,pre2) in self.mutexPropositions:
return False
return True
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
|
import numpy as np
import cv2
img=cv2.imread("lena.jpg", 1)
img=np.zeros([512,512,3], np.uint8) #create image with numpy arrays
img=cv2.line(img, (0,0), (255,255), (0,0,255), 10)
img=cv2.arrowedLine(img, (0,255), (380,55), (0,180,255), 10)
img=cv2.rectangle(img, (384,0), (510,128), (0,0,255), 5)
img=cv2.circle(img, (447, 63), 63, (0,255,0), -1)
font=cv2.FONT_HERSHEY_COMPLEX
img=cv2.putText(img, "OpenCV", (5,450), font, 4,(0,255,255), 10, cv2.LINE_AA)
cv2.imshow("frame", img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
#Swapping value using tuple concept
x = 10
y = 11
#we learn tuple
x, y = (y, x)
print("x", x)
print("y", y) |
import odsh_parser
import odsh_lib
import json
blk = odsh_parser.parse('''
ls / && echo "OK"
ls /niwf3w4y3nxif4 || echo "Failed"
ls / | grep etc
''')
print(json.dumps(blk.build()))
exec_blk = odsh_lib.Block(blk.build())
engine = odsh_lib.Engine()
engine.eval_block(exec_blk)
|
import sys
sys.path.append('..')
from winnie.util.singletons import Singleton
class Singletest:
__metaclass__ = Singleton
def __init__(self, text):
self.text=text
def write(self):
print self.text
|
def even_number(start, end):
count = 0
number_list = []
type = input("From OR Between : ").lower()
if type == "from":
end += 1
elif type == "between":
end -= 2 for number in range(start, end):
if number % 2 == 0:
number_list.append(number)
count += 1
output = (f'''
Even Numbers : {count}
List Of Numbers : {number_list}
''')
return output
if __name__ == "__main__":
start = int(input("Start: "))
end = int(input("End: "))
print(even_number(start, end))
|
import string
import sys
N=0x5851F42D4C957F2DL
mask=2**64-1
mask32=2**32-1
def swap(arr, a, b):
arr[a], arr[b] = arr[b], arr[a]
# Rotate right: 0b1001 --> 0b1100
ror = lambda val, r_bits, max_bits: \
((val & (2**max_bits-1)) >> r_bits%max_bits) | \
(val << (max_bits-(r_bits%max_bits)) & (2**max_bits-1))
class Random:
def __init__(self, seed):
self.state = ((seed * N&mask)+N+1)&mask
def generate(self):
a=self.state
data=ror(((a ^ (a >> 18)) >> 27)&mask32, a >> 59, 32)
self.state=((0x5851F42D4C957F2D * self.state&mask)+1)&mask
return data
input=bytearray('flag{cuxnvyrsuy}')
keyval=34895
R=Random(keyval);
round = 0;
for round in range(16):
state = range(256)
curState = state[:]
for v46 in range(len(state), 1, -1):
r=(mask32+1-v46)%v46
if r:
r=mask32+1-r
while True:
v53=R.generate()
if v53<r:
break
else:
v53 = R.generate()
swap(curState, (v53 % v46), v46 - 1)
sliced = curState[:len(input)]
r64 = R.generate();
r64 |= R.generate() << 32
sliced=sorted(sliced, key=lambda x: -x)
input = [x^y for x, y in zip(sliced, input)]
input = input[::-1]
input=str(bytearray(input))
print input.encode('hex')
# orig='04dd5a70faea88b76e4733d0fa346b086e2c0efd7d2815e3b6ca118ab945719970642b2929b18a71b28d87855796e344d8'.decode('hex')
orig=input
orig=list(bytearray(orig))
lol = [x<=128 for x in range(256)]
slices=[None]*16
# for keyval in range(0, 65536):
for keyval in [keyval]:
input=orig[:]
R=Random(keyval)
if keyval & 0xff == 0:
print keyval
for round in range(16):
state = range(256)
curState = state[:]
for v46 in range(256, 1, -1):
r=(mask32+1-v46)%v46
if r:
r=mask32+1-r
while True:
v53 = R.generate()
if v53<r:
break
print keyval, '!'
else:
v53 = R.generate()
# print '0x%x'%(v53%v46),
curState[v53%v46],curState[v46-1]=curState[v46-1],curState[v53%v46]
# swap(curState, (v53 % v46), v46 - 1)
sliced = curState[:len(input)]
r64 = R.generate();
r64 |= R.generate() << 32
slices[round]=sorted(sliced, key=lambda x: -x)
for round in range(16):
input = input[::-1]
input = [x^y for x, y in zip(slices[15-round], input)]
# print input
input=str(bytearray(input))
if 1:
print input
# break
# open('payload','wb').write(input)
|
# Generated by Django 3.0.4 on 2020-04-11 06:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0016_auto_20200403_0628'),
('services_manager', '0008_auto_20200406_1259'),
]
operations = [
migrations.RemoveField(
model_name='consumertimeslotmapping',
name='time_slot',
),
migrations.AddField(
model_name='consumertimeslotmapping',
name='booking_status',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Pending'), (2, 'Failed'), (3, 'Completed'), (4, 'Canceled')], default=1, null=True),
),
migrations.AddField(
model_name='providerstimeslot',
name='home_delivery',
field=models.BooleanField(default=False),
),
migrations.CreateModel(
name='Notifications',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('msg', models.TextField(blank=True, null=True)),
('color', models.CharField(blank=True, max_length=32, null=True)),
('is_sent', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(blank=True, null=True)),
('deleted', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.User')),
],
),
migrations.CreateModel(
name='AppVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.DecimalField(decimal_places=5, default=0.0, max_digits=12)),
('last_updated', models.DateTimeField(blank=True, null=True)),
('force_update', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.User')),
],
),
]
|
import json
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
def pushData(value):
now = datetime.now()
timeNow = now.strftime("%d/%m/%Y, %H:%M:%S")
with open("data.json", "r+") as file:
data = json.load(file)
a = {value: 1, "date": timeNow}
data["data"].append(a)
file.seek(0)
json.dump(data, file)
def erase_data():
now = datetime.now()
timeNow = now.strftime("%d/%m/%Y, %H:%M:%S")
with open('data.json', 'r+') as data_file:
data = json.load(data_file)
data.clear()
with open('data.json', 'w') as data_file:
a = {"data":[{"NoMask": 0, "date": timeNow },{"WithMask": 0, "date": timeNow }]}
data = json.dump(a, data_file)
def plot_data():
'''read json file data.json group with and without mask by date and plot it'''
with open("data.json", "r+") as file:
data = json.load(file)
table = pd.DataFrame(data['data'])
table['WithMask'].fillna(0, inplace=True)
table['NoMask'].fillna(0, inplace=True)
table['date']= pd.to_datetime(table['date'])
table= table.groupby([table['date'].dt.strftime('%m/%d/%Y')]).sum().reset_index()
#do plot by date
labels=table['date']
x = np.arange(len(labels)) # the label locations
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
fig.canvas.set_window_title('Mask detector plot at %s' % datetime.now().strftime("%d/%m/%Y, %H:%M:%S"))
print(table['WithMask'],table['NoMask'])
rects1 = ax.bar(x - width/2, table['WithMask'], width,color='#02bf61',label='With Mask')
rects2 = ax.bar(x + width/2, table['NoMask'], width, color= '#b00000', label='Without Mask')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Total')
ax.set_title('Total by date')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
#Attach a text label above each bar in *rects*, displaying its height.
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
plt.show()
def export_data():
'''convert and explort data.json at Documents file'''
home = os.path.expanduser('~')
folder_path = os.path.join(home,'Documents')
check = os.path.isdir(folder_path)
if check:
try:
with open("data.json", "r+") as file:
data = json.load(file)
table = pd.DataFrame(data['data'])
table['WithMask'].fillna(0, inplace=True)
table['NoMask'].fillna(0, inplace=True)
table['date']= pd.to_datetime(table['date'])
table.to_excel(folder_path + "\mask-detector.xlsx", index=False)
except OSError:
print("erroexeption")
else:
print("erroelse")
|
from TruthTable import TruthTable
def get_extra_cols():
num_extra_cols = -1
while num_extra_cols < 0:
try:
num_extra_cols = int(input("How many additional columns do you need? "))
while num_extra_cols < 0:
num_extra_cols = int(input("The number of additional columns cannot be negative, please enter in a new number:"))
except:
print('Please only enter numbers.')
return num_extra_cols
def main():
#Get the number of pure variables
vars = input("Enter the names of the variables (Space Separated):").split()
while len(vars) <= 0:
vars = input("You must supply at least one variable name, try again:").split()
table = TruthTable(vars)
#See how many extra columns to generate
num_extra_cols = get_extra_cols()
table.make_columns(num_extra_cols)
table.write_to_file('output.tex')
if __name__ == '__main__':
main() |
# -*- coding=gbk -*-
import sys, getopt
import re
import os
from os.path import isfile, join, isdir, getsize
def is_video(filename):
houzhui_list = [".mp4", ".avi", ".mkv", ".flv", ".rmvb", ".wmv"]
for houzhui in houzhui_list:
if filename.endswith(houzhui):
return True
return False
def get_url_list(file_path, url_list):
if isfile(file_path):
if is_video(file_path):
url_list.append("/".join(file_path.split("\\")[2:]))
elif isdir(file_path):
for filename in os.listdir(file_path):
get_url_list(join(file_path, filename), url_list)
def work(ff = sys.stdin, fout = sys.stdout):
input_path="F:\\aaa"
url_list = []
get_url_list(input_path, url_list)
s = ""
for line in ff:
line = line.rstrip("\n")
if line.endswith("###urls###"):
for i, url in enumerate(url_list):
fout.write(("url[%i] = \"%s\"\n" %(i, url)).decode("gbk").encode("utf8"))
else:
fout.write(("%s\n" %(line)).decode("gbk").encode("utf8"))
# for line in ff:
# line = line.rstrip("\n")
# splits = line.split("\t")
def main(args):
ff = sys.stdin
fout = sys.stdout
opts, args = getopt.getopt(args, "f:o:")
for op, value in opts:
if op == '-f':
ff = open(value, 'r')
if op == '-o':
fout = open(value, 'w')
work(ff, fout)
if __name__ == '__main__':
main(sys.argv[1:]) |
import argparse
import time
import math
import numpy as np
import sklearn.metrics as sk
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import data
import model
from utils_lm import batchify, get_batch, repackage_hidden
# go through rigamaroo to do ..utils.display_results import show_performance
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from utils.display_results import show_performance
from utils.log_sum_exp import log_sum_exp
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='data/penn/',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (LSTM, QRNN, GRU)')
parser.add_argument('--emsize', type=int, default=400,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1150,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=3,
help='number of layers')
parser.add_argument('--lr', type=float, default=30,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=80, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=70,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.4,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.3,
help='dropout for rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.65,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.5,
help='amount of weight dropout to apply to the RNN hidden to hidden matrix')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
randomhash = ''.join(str(time.time()).split('.'))
parser.add_argument('--save', type=str, default=randomhash+'.pt',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=2,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=1.2e-6,
help='weight decay applied to all weights')
parser.add_argument('--resume', type=str, default='',
help='path of model to resume')
parser.add_argument('--optimizer', type=str, default='sgd',
help='optimizer to use (sgd, adam)')
parser.add_argument('--when', nargs="+", type=int, default=[-1],
help='When (which epochs) to divide the learning rate by 10 - accepts multiple')
parser.add_argument('--character_level', action='store_true', help="Use this flag to evaluate character-level models.")
args = parser.parse_args()
args.tied = True
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
def model_save(fn):
with open(fn, 'wb') as f:
torch.save([model, criterion, optimizer], f)
def model_load(fn):
global model, criterion, optimizer
with open(fn, 'rb') as f:
model, criterion, optimizer = torch.load(f)
import os
import hashlib
fn = 'corpus.{}.data'.format(hashlib.md5(args.data.encode()).hexdigest())
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn)
else:
print('Producing dataset...')
corpus = data.Corpus(args.data)
torch.save(corpus, fn)
eval_batch_size = 10
test_batch_size = 1 # DON'T CHANGE THIS
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
print('Producing ood datasets...')
answers_corpus = data.OODCorpus('eng_web_tbk/answers/conll/answers_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
answers_data = batchify(answers_corpus.data, test_batch_size, args)
email_corpus = data.OODCorpus('eng_web_tbk/email/conll/email_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
email_data = batchify(email_corpus.data, test_batch_size, args)
newsgroup_corpus = data.OODCorpus('eng_web_tbk/newsgroup/conll/newsgroup_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
newsgroup_data = batchify(newsgroup_corpus.data, test_batch_size, args)
reviews_corpus = data.OODCorpus('eng_web_tbk/reviews/conll/reviews_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
reviews_data = batchify(reviews_corpus.data, test_batch_size, args)
weblog_corpus = data.OODCorpus('eng_web_tbk/weblog/conll/weblog_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
weblog_data = batchify(weblog_corpus.data, test_batch_size, args)
###############################################################################
# Build the model
###############################################################################
from splitcross import SplitCrossEntropyLoss
criterion = None
ntokens = len(corpus.dictionary)
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop, args.tied)
###
assert args.resume, 'must provide a --resume argument'
print('Resuming model ...')
model_load(args.resume)
optimizer.param_groups[0]['lr'] = args.lr
model.dropouti, model.dropouth, model.dropout, args.dropoute = args.dropouti, args.dropouth, args.dropout, args.dropoute
if args.wdrop:
from weight_drop import WeightDrop
for rnn in model.rnns:
if type(rnn) == WeightDrop: rnn.dropout = args.wdrop
elif rnn.zoneout > 0: rnn.zoneout = args.wdrop
###
if not criterion:
splits = []
if ntokens > 500000:
# One Billion
# This produces fairly even matrix mults for the buckets:
# 0: 11723136, 1: 10854630, 2: 11270961, 3: 11219422
splits = [4200, 35000, 180000]
elif ntokens > 75000:
# WikiText-103
splits = [2800, 20000, 76000]
print('Using', splits)
criterion = SplitCrossEntropyLoss(args.emsize, splits=splits, verbose=False)
###
if args.cuda:
model = model.cuda()
criterion = criterion.cuda()
###
params = list(model.parameters()) + list(criterion.parameters())
total_params = sum(x.size()[0] * x.size()[1] if len(x.size()) > 1 else x.size()[0] for x in params if x.size())
print('Args:', args)
print('Model total parameters:', total_params)
###############################################################################
# Eval code
###############################################################################
ood_num_examples = test_data.size(0) // 5
expected_ap = ood_num_examples / (ood_num_examples + test_data.size(0))
recall_level = 0.9
def get_base_rates():
batch, i = 0, 0
seq_len = args.bptt
ntokens = len(corpus.dictionary)
token_counts = np.zeros(ntokens)
total_count = 0
for i in range(0, train_data.size(0), args.bptt): # Assume OE dataset is larger. It is, because we're using wikitext-2.
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
for j in range(targets.numel()):
token_counts[targets[j].data.cpu().numpy()[0]] += 1
total_count += 1
batch += 1
return token_counts / total_count
print('Getting base rates...')
# base_rates = get_base_rates()
# np.save('./base_rates.npy', base_rates)
base_rates = Variable(torch.from_numpy(np.load('./base_rates.npy').astype(np.float32))).cuda().float().squeeze() # shit happens
uniform_base_rates = Variable(torch.from_numpy(np.ones(len(corpus.dictionary)).astype(np.float32))).cuda().float().squeeze()
uniform_base_rates /= uniform_base_rates.numel()
print('Done.')
def evaluate(data_source, corpus, batch_size=10, ood=False):
# Turn on evaluation mode which disables dropout.
model.eval()
if args.model == 'QRNN': model.reset()
loss_accum = 0
losses = []
ntokens = len(corpus.dictionary)
for i in range(0, data_source.size(0) - 1, args.bptt):
if (i >= ood_num_examples // test_batch_size) and (ood is True):
break
hidden = model.init_hidden(batch_size)
hidden = repackage_hidden(hidden)
data, targets = get_batch(data_source, i, args, evaluation=True)
output, hidden = model(data, hidden)
logits = model.decoder(output)
smaxes = F.softmax(logits - torch.max(logits, dim=1, keepdim=True)[0], dim=1)
tmp = smaxes[range(targets.size(0)), targets]
log_prob = torch.log(tmp).mean(0) # divided by seq len, so this is the negative nats per char
loss = -log_prob.data.cpu().numpy()[0]
loss_accum += loss
# losses.append(loss)
# Experimental!
# anomaly_score = -torch.max(smaxes, dim=1)[0].mean() # negative MSP
anomaly_score = ((smaxes).add(1e-18).log() * uniform_base_rates.unsqueeze(0)).sum(1).mean(0) # negative KL to uniform
losses.append(anomaly_score.data.cpu().numpy()[0])
#
return loss_accum / (len(data_source) // args.bptt), losses
# Run on test data.
print('\nPTB')
test_loss, test_losses = evaluate(test_data, corpus, test_batch_size)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
test_loss, math.exp(test_loss), test_loss / math.log(2)))
print('=' * 89)
print('\nAnswers (OOD)')
ood_loss, ood_losses = evaluate(answers_data, answers_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
print('\nEmail (OOD)')
ood_loss, ood_losses = evaluate(email_data, email_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
print('\nNewsgroup (OOD)')
ood_loss, ood_losses = evaluate(newsgroup_data, newsgroup_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
print('\nReviews (OOD)')
ood_loss, ood_losses = evaluate(reviews_data, reviews_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
print('\nWeblog (OOD)')
ood_loss, ood_losses = evaluate(weblog_data, weblog_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
|
from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include, re_path
from django.contrib.auth import views as auth_views
from core.views import *
urlpatterns = [
# path('admin/', admin.site.urls),
path('', index.as_view(), name='index'),
path('account/', include('django.contrib.auth.urls'), name='account'),
path('enderecos/', enderecos.as_view(), name='enderecos'),
path('add_endereco/', add_endereco.as_view(), name='add_endereco'),
path('del_endereco/<int:id_endereco>/', del_endereco, name='del_endereco'),
path('ajax/new_endereco/', new_endereco, name='new_endereco'),
path('lojas/', lojas.as_view(), name='lojas'),
path('add_loja/', add_loja.as_view(), name='add_loja'),
path('del_loja/<int:id_loja>/', del_loja, name='del_loja'),
path('ajax/new_loja/', new_loja, name='new_loja'),
path('areas/', areas.as_view(), name='areas'),
path('add_area/', add_area.as_view(), name='add_area'),
path('del_area/<int:id_area>/', del_area, name='del_area'),
path('ajax/new_area/', new_area, name='new_area'),
path('gps/', gps.as_view(), name='gps'),
path('ajax/valid_gps/', valid_gps, name='valid_gps'),
################################################################################
# TURF ########################################################################
path('endereco_turf/', endereco_turf.as_view(), name='endereco_turf'),
path('loja_turf/', loja_turf.as_view(), name='loja_turf'),
path('area_turf/', area_turf.as_view(), name='area_turf'),
path('gps_turf/', gps_turf.as_view(), name='gps_turf'),
path('usuarios/', usuarios.as_view(), name='usuarios'),
path('new_usuario/', new_usuario, name='new_usuario'),
]
|
from django.contrib import admin
# Register your models here.
from .models import Motif, CommentLike, Comment
admin.site.register(Motif)
admin.site.register(Comment)
admin.site.register(CommentLike) |
import cv2 as cv
import json
import os
import pickle as pkl
import ffmpeg
import argparse
parser = argparse.ArgumentParser(
description="Collect the frames from video.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--mode',
default='train',
help='train/val/test',
required=True)
parser.add_argument('--clean',
action='store_true',
help='remove deleted videos entries from the map.')
# Read the video metadata and return the correct rotation
def get_rotation_correct(path):
meta_dict = ffmpeg.probe(path)
rotation = None
if int(meta_dict['streams'][0]['tags']['rotate']) == 90:
rotation = cv.ROTATE_90_CLOCKWISE
elif int(meta_dict['streams'][0]['tags']['rotate']) == 180:
rotation = cv.ROTATE_180
elif int(meta_dict['streams'][0]['tags']['rotate']) == 270:
rotation = cv.ROTATE_90_COUNTERCLOCKWISE
return rotation
if __name__ == '__main__':
args = parser.parse_args()
mode = args.mode
json_dir = f'../../info/{mode}/'
vid_dir = f'../../videos/{mode}/'
image_dir = f'../../images/{mode}/'
image_map_file = os.path.join(image_dir, f'image_map_{mode}.pkl')
# try and read in the image map (video key -> [(prev_frame, current_frame, speed)])
image_map = {} if not os.path.isfile(image_map_file) else pkl.load(open(image_map_file, 'rb'))
# (video key -> video_file_path)
vid_map = {}
# (video key -> json_file_path)
json_map = {}
if args.clean:
for k, v in image_map:
if not os.path.isfile(os.path.join(vid_dir, f'{k}.mov')):
for (prev, current, _) in v:
os.remove(os.path.join(image_dir, k, prev))
os.remove(os.path.join(image_dir, k, current))
image_map.pop(k, None)
else:
# populate the json and video maps
for i, filename in enumerate(os.listdir(json_dir)):
if filename.endswith('.json'):
json_map[filename[:-5]] = os.path.join(json_dir, filename)
for i, filename in enumerate(os.listdir(vid_dir)):
if filename.endswith('.mov'):
vid_map[filename[:-4]] = os.path.join(vid_dir, filename)
for i, key in enumerate(vid_map.keys()):
print(f'Processing {key} | {i+1}/{len(vid_map.keys())}')
if key in image_map.keys():
print('Already extracted frames for video..')
continue
if not key in json_map.keys():
print('No JSON file found for video..')
continue
try:
json_obj = json.load(open(json_map[key]))
if json_obj['gps'] is None or json_obj['startTime'] is None:
print('No GPS or startTime data found..')
continue
start_time = json_obj['startTime']
vid = cv.VideoCapture(vid_map[key])
rotation = get_rotation_correct(vid_map[key])
if int(vid.get(cv.CAP_PROP_FRAME_HEIGHT)) != 1280 or int(vid.get(cv.CAP_PROP_FRAME_WIDTH) != 720):
print('Video not high res..')
continue
image_counter = 0
for gps_entry in json_obj['gps']:
timestamp = gps_entry['timestamp']
speed = gps_entry['speed']
if speed < 0:
print('Speed is negative..')
continue
# Extract the current frame
vid.set(cv.CAP_PROP_POS_MSEC, int(timestamp - start_time))
current_frame = vid.get(cv.CAP_PROP_POS_FRAMES)
try:
success, current_image = vid.read()
if rotation is not None:
current_image = cv.rotate(current_image, rotation)
except:
continue
if not success or current_frame < 1:
continue
# Extract the previous frame
vid.set(cv.CAP_PROP_POS_FRAMES, current_frame - 1)
try:
success, prev_image = vid.read()
if rotation is not None:
prev_image = cv.rotate(prev_image, rotation)
except:
continue
if not success:
continue
parent_folder = os.path.join(image_dir, key)
try:
os.mkdir(parent_folder)
except FileExistsError:
pass
# Save the frames to the correct folder
prev_filename = f'{key}-{image_counter}-prev.png'
current_filename = f'{key}-{image_counter}-current.png'
prev_path = os.path.join(parent_folder, prev_filename)
current_path = os.path.join(parent_folder, current_filename)
cv.imwrite(prev_path, prev_image)
cv.imwrite(current_path, current_image)
image_map.setdefault(key, []).append((prev_filename, current_filename, speed))
image_counter += 1
vid.release()
except:
print('Problem loading JSON..')
# Save the image map to file
pkl.dump(image_map, open(os.path.join(image_dir, f'image_map_{mode}.pkl'), 'wb')) |
import FWCore.ParameterSet.Config as cms
########################################
# Command line argument parsing
########################################
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing ('analysis')
options.register ('crab',
0, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"Set to 1 to run on CRAB.")
options.register ('data',
0, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"Set to 1 for data.")
sample_options = ['signal', 'background', 'wjet', 'gjet'] # Valid options for sample
options.register ('sample',
'signal', # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"Specify type of sample. Valid values: %s" % sample_options)
steps_options = ['skim', 'analyze'] # Valid options for steps
options.register ('steps',
[],
VarParsing.VarParsing.multiplicity.list, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"Steps to execute. Possible values: skim, analyze.")
options.steps = ['skim', 'analyze'] # default value
options.register ('doHLT',
0, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"Set to 1 to turn on HLT selection for skim step.")
options.register ('doJetFilter',
1, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"Set to 1 to turn on JetFilter for skim step.")
# options.register ('ntupleFile',
# 'ntuple.root', # default value
# VarParsing.VarParsing.multiplicity.singleton, # singleton or list
# VarParsing.VarParsing.varType.string, # string, int, or float
# "Specify plain root output file created by TFileService")
options.register ('outputLabel',
'', # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"Specify label for both PoolOutputModule and TFileService output files.")
# Get and parse the command line arguments
options.parseArguments()
print ''
print 'Printing options:'
print options
print 'Only the following options are used: crab, data, sample, steps, doHLT, doJetFilter'
print ''
# Check validity of command line arguments
if options.sample not in sample_options:
print 'Invalid sample type. Setting to sample type to signal.'
options.sample = 'signal'
for step in options.steps:
if step not in steps_options:
print "Skipping invalid steps: %s" % step
options.steps.remove(step)
print options.steps
from EmergingJetAnalysis.Configuration.emjetTools import *
process = cms.Process('TEST')
if 'skim' in options.steps and len(options.steps)==1:
# If only running skim, change process name
process.setName_('SKIM')
########################################
# Stable configuration
########################################
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
## Geometry and Detector Conditions (needed for a few patTuple production steps)
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
## Options and Output Report
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False),
# SkipEvent = cms.untracked.vstring('ProductNotFound')
)
# Unscheduled execution
#process.options.allowUnscheduled = cms.untracked.bool(False)
process.options.allowUnscheduled = cms.untracked.bool(True)
########################################
# Skim
########################################
import os
cmssw_version = os.environ['CMSSW_VERSION']
skimStep = cms.Sequence()
if 'skim' in options.steps:
print ''
print '####################'
print 'Adding Skim step'
print '####################'
print ''
if options.sample=='wjet':
skimStep = addWJetSkim(process, options.data)
if 'CMSSW_7_4_12' in cmssw_version:
process.wJetFilter.electronID = cms.string('cutBasedElectronID-Spring15-25ns-V1-standalone-medium')
elif 'CMSSW_7_4_1_patch4' in cmssw_version:
process.wJetFilter.electronID = cms.string('cutBasedElectronID-CSA14-50ns-V1-standalone-medium')
elif options.sample=='gjet':
skimStep = addGJetSkim(process, isData=options.data)
else:
skimStep = addSkim(process, isData=options.data, doJetFilter=options.doJetFilter, doHLT=options.doHLT)
########################################
# Analyze
########################################
analyzeStep = cms.Sequence()
if 'analyze' in options.steps:
print ''
print '####################'
print 'Adding Analyze step'
print '####################'
print ''
analyzeStep = addAnalyze(process, options.data, options.sample)
if options.sample=='gjet':
phoSrc = cms.InputTag("gedPhotons")
process.egmPhotonIDs.physicsObjectSrc = phoSrc
process.photonIDValueMapProducer.src = phoSrc
########################################
# Testing step
########################################
testing = 0
testingStep = cms.Sequence()
if testing:
testingStep = addTesting(process, options.data, options.sample)
##testMetFilters = 0
##if testMetFilters:
## process.load('RecoMET.METFilters.BadPFMuonFilter_cfi')
## #process.BadPFMuonFilter.muons = cms.InputTag("slimmedMuons")#miniAOD
## process.BadPFMuonFilter.muons = cms.InputTag("muons")
## process.BadPFMuonFilter.PFCandidates = cms.InputTag("packedPFCandidates")
## process.BadPFMuonFilter.taggingMode = cms.bool(True)
## process.load('RecoMET.METFilters.BadChargedCandidateFilter_cfi')
## #process.BadChargedCandidateFilter.muons = cms.InputTag("slimmedMuons")#miniAOD
## process.BadChargedCandidateFilter.muons = cms.InputTag("muons")
## process.BadChargedCandidateFilter.PFCandidates = cms.InputTag("packedPFCandidates")
## process.BadChargedCandidateFilter.taggingMode = cms.bool(True)
## process.emJetAnalyzer.BadChargedCandidateFilter = cms.InputTag("BadChargedCandidateFilter")
## process.emJetAnalyzer.BadPFMuonFilter = cms.InputTag("BadPFMuonFilter")
## testMetFilterStep = cms.Sequence(process.BadPFMuonFilter * process.BadChargedCandidateFilter)
process.p = cms.Path( skimStep * testingStep * analyzeStep )
##if testMetFilters: process.p = cms.Path( testMetFilterStep * skimStep * testingStep * analyzeStep )
# MET Uncertainties
# from PhysicsTools.PatUtils.tools.runMETCorrectionsAndUncertainties import runMetCorAndUncFromMiniAOD
# runMetCorAndUncFromMiniAOD(process,
# isData=True,
# )
########################################
# Configure EDM Output
########################################
if 'skim' in options.steps and len(options.steps)==1:
# If only running skim, add AOD/AODSIM and jetFilter/wJetFilter to output
print ''
print '####################'
print 'Adding EDM output'
print '####################'
print ''
addEdmOutput(process, options.data, options.sample)
else:
# Otherwise only save EDM output of jetFilter and wJetFilter
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('output.root'),
outputCommands = cms.untracked.vstring('drop *'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring("p")
)
)
## if testMetFilters:
## process.out.outputCommands.extend(cms.untracked.vstring('keep *_BadPFMuonFilter_*_*',))
## process.out.outputCommands.extend(cms.untracked.vstring('keep *_BadChargedCandidateFilter_*_*',))
## if options.sample=='wjet' : process.out.outputCommands.extend(cms.untracked.vstring('keep *_wJetFilter_*_*',))
## elif options.sample=='gjet': process.out.outputCommands.extend(cms.untracked.vstring('keep *_gJetFilter_*_*',))
## else : process.out.outputCommands.extend(cms.untracked.vstring('keep *_jetFilter_*_*',))
testMETUnc = 0
process.emJetAnalyzer.doPATMET = cms.untracked.bool( False )
if testMETUnc:
process.emJetAnalyzer.doPATMET = cms.untracked.bool( True )
process.out = cms.OutputModule("PoolOutputModule",
compressionLevel = cms.untracked.int32(4),
compressionAlgorithm = cms.untracked.string('LZMA'),
eventAutoFlushCompressedSize = cms.untracked.int32(15728640),
outputCommands = cms.untracked.vstring( "keep *_slimmedMETs_*_*",
"keep *_slimmedMETsNoHF_*_*",
"keep *_patPFMet_*_*",
"keep *_patPFMetT1_*_*",
"keep *_patPFMetT1JetResDown_*_*",
"keep *_patPFMetT1JetResUp_*_*",
"keep *_patPFMetT1Smear_*_*",
"keep *_patPFMetT1SmearJetResDown_*_*",
"keep *_patPFMetT1SmearJetResUp_*_*",
"keep *_patPFMetT1Puppi_*_*",
"keep *_slimmedMETsPuppi_*_*",
),
fileName = cms.untracked.string('corMETMiniAOD.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('')
),
dropMetaData = cms.untracked.string('ALL'),
fastCloning = cms.untracked.bool(False),
overrideInputFileSplitLevels = cms.untracked.bool(True)
)
########################################
# Generic configuration
########################################
if 'CMSSW_7_4_12' in cmssw_version:
globalTags=['74X_mcRun2_design_v2','74X_dataRun2_Prompt_v3']
elif 'CMSSW_7_4_1_patch4' in cmssw_version:
globalTags=['MCRUN2_74_V9','74X_dataRun2_Prompt_v0']
elif 'CMSSW_7_6_3' in cmssw_version:
globalTags=['76X_mcRun2_asymptotic_RunIIFall15DR76_v1','76X_dataRun2_16Dec2015_v0']
elif 'CMSSW_8_0_26_patch1' in cmssw_version:
# globalTags=['80X_mcRun2_asymptotic_2016_miniAODv2_v1','80X_dataRun2_2016SeptRepro_v7']
globalTags=['80X_mcRun2_asymptotic_2016_TrancheIV_v8','80X_dataRun2_2016SeptRepro_v7']
elif 'CMSSW_9_4_10' in cmssw_version:
#globalTags=['80X_mcRun2_asymptotic_2016_TrancheIV_v8','94X_dataRun2_v6']#2016 signal
globalTags=['94X_mc2017_realistic_v14','94X_dataRun2_ReReco_EOY17_v6']
else: print 'No global tag specified for CMSSW_VERSION: %s' % cmssw_version
print 'CMSSW_VERSION is %s' % cmssw_version
print 'Using the following global tags [MC, DATA]:'
print globalTags
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, globalTags[options.data], '')
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(1)
##process.MessageLogger.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.cerr.FwkReport.limit = 20
process.MessageLogger.cerr.default.limit = 1000
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )
process.source = cms.Source("PoolSource",
# eventsToProcess = cms.untracked.VEventRange("1:36:3523-1:36:3523"),
# eventsToProcess = cms.untracked.VEventRange("281976:2166:min-281976:2166:max"),
# eventsToProcess = cms.untracked.VEventRange("281976:2166:3740421624-281976:2166:max"),
# eventsToProcess = cms.untracked.VEventRange("281976:2166:3739658361-281976:2166:3739658361"),
fileNames = cms.untracked.vstring(
# Signal samples
# 2016 EmergingJet Official MC
# modelA
#'/store/mc/RunIISummer16DR80/EmergingJets_mX-1000-m_dpi-5-tau_dpi-150_TuneCUETP8M1_13TeV_pythia8_v2/AODSIM/FlatPU0to75TuneCP0_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v2/70000/E8524A42-D992-E811-B912-FA163EBE0C61.root'
#'file:/data/users/jengbou/EmJetMC/Tests/2017/E8524A42-D992-E811-B912-FA163EBE0C61.root'
#'file:/home/jengbou/workspace/CMSSW_9_4_10/src/EmergingJetAnalysis/Configuration/test/output.root'# skim output
#'/store/mc/RunIISummer16DR80/EmergingJets_mX-1000-m_dpi-5-tau_dpi-150_TuneCUETP8M1_13TeV_pythia8_v2/AODSIM/FlatPU0to75TuneCP0_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v2/70000/D254A5FE-8592-E811-8345-A0369FD0B266.root'
# modelB
#'/store/mc/RunIISummer16DR80/EmergingJets_mX-1000-m_dpi-2-tau_dpi-5_TuneCUETP8M1_13TeV_pythia8_v2/AODSIM/FlatPU0to75TuneCP0_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v2/70000/6247EBBD-B891-E811-93E2-FA163E1199C7.root'
# 2017 QCD
#'/store/mc/RunIIFall17DRPremix/QCD_HT2000toInf_TuneCP5_13TeV-madgraph-pythia8/AODSIM/PU2017_94X_mc2017_realistic_v11-v2/100000/0AC96F0B-CD59-E811-8B9F-0CC47A4D76D2.root'
#'/store/mc/RunIIFall17DRPremix/QCD_HT1500to2000_TuneCP5_13TeV-madgraph-pythia8/AODSIM/94X_mc2017_realistic_v10-v1/10000/00B3E0FF-70F8-E711-9F37-0025905A6126.root'
#'/store/mc/RunIIFall17DRPremix/QCD_HT1500to2000_TuneCP5_13TeV-madgraph-pythia8/AODSIM/94X_mc2017_realistic_v10-v1/60000/64F7B641-25E8-E711-B504-FA163E5F2E72.root'
#'/store/mc/RunIIFall17DRPremix/QCD_HT1500to2000_TuneCP5_13TeV-madgraph-pythia8/AODSIM/PU2017_94X_mc2017_realistic_v11-v2/1010000/FE7566F2-9A62-E811-974F-FA163EE6807E.root'
# 2017 data C JetHT
#'/store/data/Run2017C/JetHT/AOD/17Nov2017-v1/70000/744DA565-A1DA-E711-AFDB-001E6739730A.root'
#'file:/data/users/jengbou/EmJet/Tests/2017/744DA565-A1DA-E711-AFDB-001E6739730A.root'
# SinglePhoton
#'/store/data/Run2017B/SinglePhoton/AOD/17Nov2017-v1/20000/58EF0879-65D3-E711-AE06-7CD30ACE0FE7.root'
'file:/data/users/jengbou/EmJet/Tests/2017/SinglePhoton/58EF0879-65D3-E711-AE06-7CD30ACE0FE7.root'
#"/store/mc/RunIIFall17DRPremix/GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/AODSIM/PU2017_94X_mc2017_realistic_v11-v1/010000/1CF5277A-B679-E811-A138-F01FAFE15CBD.root"
#"/store/mc/RunIIFall17DRPremix/GJets_DR-0p4_HT-200To400_TuneCP5_13TeV-madgraphMLM-pythia8/AODSIM/PU2017_94X_mc2017_realistic_v11-v1/90000/8E035DB1-1150-E811-9AA4-24BE05C38CA1.root"
#"/store/mc/RunIIFall17DRPremix/GJets_DR-0p4_HT-600ToInf_TuneCP5_13TeV-madgraphMLM-pythia8/AODSIM/PU2017_94X_mc2017_realistic_v11-v2/10000/D48E5941-CBB0-E811-8C56-AC1F6B0DE140.root"
#'/store/mc/RunIIFall17DRPremix/GJets_HT-600ToInf_TuneCP5_13TeV-madgraphMLM-pythia8/AODSIM/PU2017_94X_mc2017_realistic_v11-v1/00000/B6B3AB6C-CB3B-E811-8D3C-F01FAFD8F9BA.root'
#'/store/mc/RunIIFall17DRPremix/GJets_HT-600ToInf_TuneCP5_13TeV-madgraphMLM-pythia8/AODSIM/94X_mc2017_realistic_v10-v1/30000/30B22337-02D7-E711-A2FE-0CC47A5FC619.root'
#'file:/data/users/jengbou/EmJetMC/Tests/2017/D48E5941-CBB0-E811-8C56-AC1F6B0DE140.root'
#"/store/mc/RunIIFall17DRPremix/GJets_HT-40To100_TuneCP5_13TeV-madgraphMLM-pythia8/AODSIM/PU2017_94X_mc2017_realistic_v11-v2/00000/2AFFA030-37AC-E811-843A-A0369FD0B130.root"
),
)
producePdfWeights = 0
if producePdfWeights:
# if options.data==0:
# Produce PDF weights (maximum is 3)
process.pdfWeights = cms.EDProducer("PdfWeightProducer",
# Fix POWHEG if buggy (this PDF set will also appear on output,
# so only two more PDF sets can be added in PdfSetNames if not "")
#FixPOWHEG = cms.untracked.string("cteq66.LHgrid"),
#GenTag = cms.untracked.InputTag("genParticles"),
PdfInfoTag = cms.untracked.InputTag("generator"),
PdfSetNames = cms.untracked.vstring(
"CT14nlo.LHgrid",
"NNPDF30_nlo_as_0118.LHgrid",
"NNPDF23_lo_as_0130_qed.LHgrid",
# , "MRST2006nnlo.LHgrid"
# , "NNPDF10_100.LHgrid"
)
)
process.TFileService = cms.Service("TFileService", fileName = cms.string('ntuple_20181018.root') )
testVertexReco = 0
if testVertexReco:
# addEdmOutput(process, options.data, options.sample)
# Keep all objects created by emJetAnalyzer
process.out.outputCommands.extend(cms.untracked.vstring('keep *_emJetAnalyzer_*_*',))
# Keep genParticles
process.out.outputCommands.extend(cms.untracked.vstring('keep *_genParticles_*_*',))
if options.outputLabel:
process.out.fileName = cms.untracked.string('output-%s.root' % options.outputLabel)
process.TFileService.fileName = cms.string('ntuple-%s.root' % options.outputLabel)
# storage
process.outpath = cms.EndPath(process.out)
process.load("TrackingTools/TransientTrack/TransientTrackBuilder_cfi")
# # Needed for GetTrackTrajInfo
# process.load("RecoTracker.Configuration.RecoTracker_cff")
# process.load('Configuration.StandardSequences.Reconstruction_cff') #new for navigation
# process.load('Configuration.StandardSequences.GeometryExtended_cff') #new for navigation
# # process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff') #new for navigation
# # process.load('JetMETCorrections.Configuration.DefaultJEC_cff')
# # process.load('JetMETCorrections.Configuration.CorrectedJetProducers_cff')
# # # #get the jet energy corrections from the db file
# # process.load("CondCore.CondDB.CondDB_cfi")
# process.out.outputCommands = cms.untracked.vstring('drop *')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils import count_divisors, triangle_numbers
def pe12(sub=500):
"""
What is the value of the first triangle number
to have over five hundred divisors?
>>> pe12()
76576500
"""
for t in triangle_numbers():
c = count_divisors(t)
if c >= sub:
return(t)
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
try:
while True:
s = input('> ')
n = int(s)
print(pe12(n))
except (SyntaxError, EOFError, KeyboardInterrupt, NameError):
pass
|
my_var = "hello"
print(my_var[0])
for cc in my_var:
print(cc)
my_list = [10,20,30,40,50]
for num in my_list:
print(num)
user_want_number = True
while user_want_number == True:
print(10)
user_input= input("Do you want to print again (y/n)")
if(user_input == 'n'):
user_want_number = False
|
def solution(inp):
return
if __name__ == "__main__":
with open('x.in') as f:
inp = f.readlines()
#with open('7.in') as f:
#inp = f.read().strip().split('\n\n')
#inp = [l.strip() for l in inp]
#inp = [int(l.strip()) for l in inp]
#inp = [l.strip().split(':') for l in inp]
print(solution(inp))
|
'''
Created on 2016. 10. 26.
다중 상속 : 순서가 중요
'''
class Tiger:
data = '호랑이 세상'
def Cry(self):
print('호랑이 어흥!!!')
def Eat(self):
print('맹수는 고기를 먹음')
class Lion:
def Cry(self):
print('사자는 으르렁!!!!!!')
def Hobby(self):
print('백수의 왕은 낮잠이 취미')
class Liger1(Tiger, Lion): #다중 상속
pass
a1 = Liger1()
a1.Cry() #멤버를 찾아가는 순서 Liger1 > Tiger > Lion
a1.Eat()
a1.Hobby()
print(a1.data)
|
"""# -*- coding: binary -*-
require 'msf/core/plugin'
=begin
require 'active_record'
#
# This monkeypatch can help to diagnose errors involving connection pool
# exhaustion and other strange ActiveRecord including errors like:
#
# DEPRECATION WARNING: Database connections will not be closed automatically, please close your
# database connection at the end of the thread by calling `close` on your
# connection. For example: ApplicationRecord.connection.close
#
# and
#
# ActiveRecord::StatementInvalid NoMethodError: undefined method `fields' for nil:NilClass: SELECT "workspaces".* FROM "workspaces" WHERE "workspaces"."id" = 24 LIMIT 1
#
#
# Based on this code: https://gist.github.com/1364551 linked here:
# http://bibwild.wordpress.com/2011/11/14/-in-rails-activerecord-3-0-3-1/
module ActiveRecord
class Base
class << self
def connection
unless connection_pool.active_connection?
$stdout.puts("AR::B.connection implicit checkout")
$stdout.puts(caller.join("\n"))
raise ImplicitConnectionForbiddenError.new("Implicit ActiveRecord checkout attempted!")
end
retrieve_connection
end
end
end
class ImplicitConnectionForbiddenError < ActiveRecord::ConnectionTimeoutError ; end
end
=end
module Msf
###
#
# This class manages the threads spawned by the framework object, this provides some additional
# features over standard ruby threads.
#
###
class < Array
include Framework::Offspring
attr_accessor :monitor
#
# Initializes the thread manager.
#
def initialize(framework)
self.framework = framework
self.monitor = spawn_monitor
# XXX: Preserve Ruby < 2.5 thread exception reporting behavior
# https://ruby-doc.org/core-2.5.0/Thread.html#method-c-report_on_exception
if Thread.method_defined?(:report_on_exception=)
Thread.report_on_exception = false
end
end
#
# Spawns a monitor thread for removing dead threads
#
def spawn_monitor
::Thread.new do
begin
::Thread.current[:tm_name] = "Thread Monitor"
::Thread.current[:tm_crit] = true
while true
::IO.select(nil, nil, nil, 1.0)
self.each_index do |i|
state = self[i].alive? rescue false
self[i] = nil if not state
end
self.delete(nil)
end
rescue ::Exception => e
elog("Thread Monitor Exception | Source: #{self[:tm_call].inspect}", error: e)
end
end
end
#
# Spawns a new thread
#
def spawn(name, crit, *args, &block)
t = nil
if block
t = ::Thread.new(name, crit, caller, block, *args) do |*argv|
::Thread.current[:tm_name] = argv.shift.to_s
::Thread.current[:tm_crit] = argv.shift
::Thread.current[:tm_call] = argv.shift
::Thread.current[:tm_time] = Time.now
begin
argv.shift.call(*argv)
rescue ::Exception => e
elog(
"Thread Exception: #{::Thread.current[:tm_name]} critical=#{::Thread.current[:tm_crit]} " \
" source:\n" \
" #{::Thread.current[:tm_call].join "\n "}",
error: e
)
raise e
ensure
if framework.db && framework.db.active && framework.db.is_local?
# NOTE: despite the Deprecation Warning's advice, this should *NOT*
# be ApplicationRecord.connection.close which causes unrelated
# threads to raise ActiveRecord::StatementInvalid exceptions at
# some point in the future, presumably due to the pool manager
# believing that the connection is still usable and handing it out
# to another thread.
::ApplicationRecord.connection_pool.release_connection
end
end
end
else
t = ::Thread.new(name, crit, caller, *args) do |*argv|
::Thread.current[:tm_name] = argv.shift
::Thread.current[:tm_crit] = argv.shift
::Thread.current[:tm_call] = argv.shift
::Thread.current[:tm_time] = Time.now
# Calling spawn without a block means we cannot force a database
# connection release when the thread completes, so doing so can
# potentially use up all database resources and starve all subsequent
# threads that make use of the database. Log a warning so we can track
# down this kind of usage.
dlog("Thread spawned without a block!")
dlog("Call stack: \n#{::Thread.current[:tm_call].join("\n")}")
end
end
self << t
t
end
#
# Registers an existing thread
#
def register(t, name, crit)
t[:tm_name] = name
t[:tm_crit] = crit
t[:tm_call] = caller
t[:tm_time] = Time.now
self << t
t
end
#
# Updates an existing thread
#
def update(ut, name, crit)
ti = nil
self.each_index do |i|
tt = self[i]
next if not tt
if ut.__id__ == tt.__id__
ti = i
break
end
end
t = self[ti]
if not t
raise RuntimeError, "Thread not found"
end
t[:tm_name] = name
t[:tm_crit] = crit
t
end
#
# Kills a thread by index
#
def kill(idx)
self[idx].kill rescue false
end
end
end
""" |
import numpy as np
def port_return(weights_m, mu_m):
return np.dot(np.transpose(weights_m), mu_m)
def calculate_mu(return_data):
"""
Calculates the Mu matrix for the securities
:param return_data: the data frame containing the returns
:return: returns an array containing the arithmetic average return
"""
return np.array(return_data.mean())
def port_variance(weights_m, sigma_m):
return np.dot(np.dot(np.transpose(weights_m), sigma_m), weights_m)
def efficient_weights_mean_variance(cov_m, mu0, mu_m):
matrix_a = np.zeros((len(cov_m) + 2, len(cov_m) + 2))
matrix_a[0:len(cov_m), 0:len(cov_m)] = cov_m * 2
matrix_a[len(cov_m), 0:len(cov_m)] = 1
matrix_a[len(cov_m) + 1, 0:len(cov_m)] = np.transpose(mu_m)
matrix_a[0:len(cov_m), len(cov_m)] = 1
matrix_a[0:len(cov_m), len(cov_m) + 1] = list(mu_m)
matrix_b = np.zeros((len(mu_m) + 2, 1))
matrix_b[len(mu_m), 0] = 1
matrix_b[len(mu_m) + 1, 0] = mu0
opt = np.dot(np.linalg.inv(matrix_a), matrix_b)
return opt[:-2] |
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
# 정규분포를 따르는 합성데이터 생성함수 정의
def generate_normal(n_samples, train_test_ratio=0.8, seed=2019):
np.random.seed(seed)
n = n_samples // 2
n_train = int(n * train_test_ratio)
X1 = np.random.normal(loc=10, scale=5, size=(n, 2))
X2 = np.random.normal(loc=20, scale=5, size=(n, 2))
Y1 = np.ones(n)
Y2 = - np.ones(n)
X_train = np.concatenate((X1[:n_train], X2[:n_train]))
X_test = np.concatenate((X1[n_train:], X2[n_train:]))
Y_train = np.concatenate((Y1[:n_train], Y2[:n_train]))
Y_test = np.concatenate((Y1[n_train:], Y2[n_train:]))
return (X_train, Y_train), (X_test, Y_test)
# 데이터 플롯 함수 정의
def plot(data, labels, title='Train data'):
plt.scatter(data[labels==1][:, 0], data[labels==1][:, 1], color='b', edgecolor='k', label='label : 1')
plt.scatter(data[labels==-1][:, 0], data[labels==-1][:, 1], color='r', edgecolor='k', label='label : -1')
plt.axvline(x=0, color='k')
plt.axhline(y=0, color='k')
plt.grid(True)
plt.title(title)
plt.legend()
# Decision boundary를 그리는 함수정의
# meshgrid 메소드이용
def decision_boundary(w, xlim, ylim, colormap, bias_flag=False):
xmin, xmax = xlim
ymin, ymax = ylim
xx, yy = np.meshgrid(np.linspace(xmin, xmax, 30), np.linspace(ymin, ymax, 30))
grids = np.c_[xx.ravel(), yy.ravel()]
if bias_flag:
grids = add_bias(grids)
pred = predict(w, grids)
Z = pred.reshape(xx.shape)
plt.contour(xx, yy, Z, levels=[0], colors='k')
if colormap == True:
plt.contourf(xx, yy, Z, cmap='RdBu', alpha=0.7)
def draw_boundary(w, data, labels, title='Train data', colormap=False):
# 먼저 데이터 플롯한다
plot(data, labels, title=title)
axes = plt.gca() # 현재 플롯된 axes객체를 가져온다
xlim = axes.get_xlim()
ylim = axes.get_ylim()
# 학습모델의 Decision boundary
bias_flag = False
if len(data.T) != len(w):
bias_flag = True
decision_boundary(w, xlim, ylim, colormap, bias_flag)
def predict(w, X):
pred = X.dot(w)
return pred
def add_bias(X):
X_bias = np.concatenate((X, np.ones((len(X), 1))), axis=1)
return X_bias |
from io import BytesIO
import os
import unittest
from fontTools.misc.textTools import bytesjoin, tobytes
from fontTools.misc.xmlWriter import XMLWriter
HEADER = b'<?xml version="1.0" encoding="UTF-8"?>\n'
class TestXMLWriter(unittest.TestCase):
def test_comment_escaped(self):
writer = XMLWriter(BytesIO())
writer.comment("This&that are <comments>")
self.assertEqual(HEADER + b"<!-- This&that are <comments> -->", writer.file.getvalue())
def test_comment_multiline(self):
writer = XMLWriter(BytesIO())
writer.comment("Hello world\nHow are you?")
self.assertEqual(HEADER + b"<!-- Hello world\n How are you? -->",
writer.file.getvalue())
def test_encoding_default(self):
writer = XMLWriter(BytesIO())
self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>\n',
writer.file.getvalue())
def test_encoding_utf8(self):
# https://github.com/fonttools/fonttools/issues/246
writer = XMLWriter(BytesIO(), encoding="utf8")
self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>\n',
writer.file.getvalue())
def test_encoding_UTF_8(self):
# https://github.com/fonttools/fonttools/issues/246
writer = XMLWriter(BytesIO(), encoding="UTF-8")
self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>\n',
writer.file.getvalue())
def test_encoding_UTF8(self):
# https://github.com/fonttools/fonttools/issues/246
writer = XMLWriter(BytesIO(), encoding="UTF8")
self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>\n',
writer.file.getvalue())
def test_encoding_other(self):
self.assertRaises(Exception, XMLWriter, BytesIO(),
encoding="iso-8859-1")
def test_write(self):
writer = XMLWriter(BytesIO())
writer.write("foo&bar")
self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue())
def test_indent_dedent(self):
writer = XMLWriter(BytesIO())
writer.write("foo")
writer.newline()
writer.indent()
writer.write("bar")
writer.newline()
writer.dedent()
writer.write("baz")
self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], "\n"),
writer.file.getvalue())
def test_writecdata(self):
writer = XMLWriter(BytesIO())
writer.writecdata("foo&bar")
self.assertEqual(HEADER + b"<![CDATA[foo&bar]]>", writer.file.getvalue())
def test_simpletag(self):
writer = XMLWriter(BytesIO())
writer.simpletag("tag", a="1", b="2")
self.assertEqual(HEADER + b'<tag a="1" b="2"/>', writer.file.getvalue())
def test_begintag_endtag(self):
writer = XMLWriter(BytesIO())
writer.begintag("tag", attr="value")
writer.write("content")
writer.endtag("tag")
self.assertEqual(HEADER + b'<tag attr="value">content</tag>', writer.file.getvalue())
def test_dumphex(self):
writer = XMLWriter(BytesIO())
writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.")
self.assertEqual(HEADER + bytesjoin([
"54797065 20697320 61206265 61757469",
"66756c20 67726f75 70206f66 206c6574",
"74657273 2c206e6f 74206120 67726f75",
"70206f66 20626561 75746966 756c206c",
"65747465 72732e ", ""], joiner="\n"), writer.file.getvalue())
def test_stringifyattrs(self):
writer = XMLWriter(BytesIO())
expected = ' attr="0"'
self.assertEqual(expected, writer.stringifyattrs(attr=0))
self.assertEqual(expected, writer.stringifyattrs(attr=b'0'))
self.assertEqual(expected, writer.stringifyattrs(attr='0'))
self.assertEqual(expected, writer.stringifyattrs(attr=u'0'))
def test_carriage_return_escaped(self):
writer = XMLWriter(BytesIO())
writer.write("two lines\r\nseparated by Windows line endings")
self.assertEqual(
HEADER + b'two lines \nseparated by Windows line endings',
writer.file.getvalue())
def test_newlinestr(self):
header = b'<?xml version="1.0" encoding="UTF-8"?>'
for nls in (None, '\n', '\r\n', '\r', ''):
writer = XMLWriter(BytesIO(), newlinestr=nls)
writer.write("hello")
writer.newline()
writer.write("world")
writer.newline()
linesep = tobytes(os.linesep) if nls is None else tobytes(nls)
self.assertEqual(
header + linesep + b"hello" + linesep + b"world" + linesep,
writer.file.getvalue())
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='beautifulsoupselect',
version='0.2',
description='Simple wrapper to integrate BeautifulSoup and soupselect.py in a single package',
url='http://github.com/sbma44/beautifulsoupselect',
author='Tom Lee',
author_email='thomas.j.lee@gmail.com',
license='MIT',
packages=['beautifulsoupselect'],
install_requires=['BeautifulSoup'],
zip_safe=False)
|
'''
Main run script for TracPy system.
'''
from __future__ import absolute_import
import numpy as np
from tracpy.time_class import Time
# from . import tracpy.time_class as Time
def run(tp, date, lon0, lat0, T0=None, U=None, V=None):
"""
some variables are not specifically called because f2py is hides
them like imt, jmt, km, ntractot.
Look at tracmass.step to see what it is doing and making optional at the
end.
Args:
tp: TracPy object, from the Tracpy class.
date (datetime object): Start date.
lon0 (array): Drifter starting locations in x/zonal direction.
lat0 (array): Drifter starting locations in y/meridional direction.
T0 (Optional[array]): Weighting of drifters for use with stream
functions. Is not used if dostream=0.
U,V (Optional[array]): For east-west/north-south transport, is
updated by TRACMASS. Only used if dostream=1.
Other variables:
* xp: x-locations in x,y coordinates for drifters
* yp: y-locations in x,y coordinates for drifters
* zp: z-locations (depths from mean sea level) for drifters
* t: time for drifter tracks
"""
timer = Time() # start timer for simulation
# Initialize everything for a simulation
tinds, nc, t0save, xend, yend, \
zend, zp, ttend, flag = tp.prepare_for_model_run(date, lon0, lat0)
timer.addtime('1: Preparing for simulation ')
# Loop through model outputs.
for j, tind in enumerate(tinds[:-1]):
print('Using GCM model output index ', j)
# Loop through substeps in call to TRACMASS in case we want to add on
# windage, etc, for each step
for nsubstep in range(tp.nsubsteps):
xstart, ystart, zstart, ufsub, vfsub, T0 = \
tp.prepare_for_model_step(tinds[j+1], nc, flag, xend, yend,
zend, j, nsubstep, T0)
# indices where the drifters are still inside the domain
ind = (flag[:] == 0)
timer.addtime('2: Preparing for model step ')
# exit if all of the drifters have exited the domain
if not np.ma.compressed(xstart).any():
break
# Do stepping in Tracpy class
xend_temp,\
yend_temp,\
zend_temp,\
flag[ind],\
ttend_temp, U, V = tp.step(xstart, ystart, zstart, ufsub.filled(0),
vfsub.filled(0), T0, U, V)
timer.addtime('3: Stepping, using TRACMASS ')
xend[ind, j*tp.N+1:j*tp.N+tp.N+1], \
yend[ind, j*tp.N+1:j*tp.N+tp.N+1], \
zend[ind, j*tp.N+1:j*tp.N+tp.N+1], \
zp[ind, j*tp.N+1:j*tp.N+tp.N+1], \
ttend[ind, j*tp.N+1:j*tp.N+tp.N+1] = \
tp.model_step_is_done(xend_temp, yend_temp, zend_temp,
ttend_temp, ttend[ind, j*tp.N])
timer.addtime('4: Processing after model step')
nc.close()
lonp, latp, zp, ttend, T0, U, V = tp.finishSimulation(ttend, t0save,
xend, yend, zp, T0,
U, V)
timer.addtime('5: Processing after simulation')
print("=============================================")
print("")
print("Simulation name: ", tp.name)
print("")
print("=============================================")
timer.write()
return lonp, latp, zp, ttend, T0, U, V
|
import random
import sys
width = 4
# TODO: verify that the user gave exactly width characters
def main():
hidden = list(map(str, random.sample(range(10), width)))
print(f"Hidden numbers: {hidden}")
while True:
inp = input("Guess a number: (e.g. 1234) or x to eXit. ")
if inp == 'x' or inp == 'X':
exit()
guess = list(inp)
print(guess)
result = []
for ix in range(len(hidden)):
if guess[ix] == hidden[ix]:
result += '*'
elif guess[ix] in hidden:
result += '+'
print(result)
if result == ['*'] * width:
print("SUCCESS")
break
main()
|
import crossmod
from crossmod.tasks.data_table_updater import perform_update
from celery.schedules import crontab
@crossmod.celery.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
# Update data table banned_at_utc, banned_by columns at 00:00 on Tuesday, Thursday and Saturday
sender.add_periodic_task(crontab(minute="01", hour="00", day_of_week="2,4,6"),
perform_update, name="perform_update") |
radius=float(input("enter the number"))
PI=3.14
circumference=PI*radius
print("circumfrence of the circle is :%2f"%circumference)
|
COUNTY_MAP = {
'Albany' : 36001,
'Allegany' : 36003,
'Bronx' : 36005,
'Broome' : 36007,
'Capital Region' : 36001,
'Cattaraugus' : 36009,
'Cayuga' : 36011,
'Chautauqua' : 36013,
'Chemung' : 36015,
'Chenango' : 36017,
'Clinton' : 36019,
'Columbia' : 36021,
'Cortland' : 36023,
'Delaware' : 36025,
'Dutchess' : 36027,
'Erie' : 36029,
'Essex' : 36031,
'Franklin' : 36033,
'Fulton' : 36035,
'Genesee' : 36037,
'Greene' : 36039,
'Hamilton' : 36041,
'Herkimer' : 36043,
'Jefferson' : 36045,
'Kings' : 36047,
'Lewis' : 36049,
'Livingston' : 36051,
'Madison' : 36053,
'Monroe' : 36055,
'Montgomery' : 36057,
'Nassau' : 36059,
'New York' : 36061,
'New York City' : 36061,
'Niagara' : 36063,
'Oneida' : 36065,
'Onondaga' : 36067,
'Ontario' : 36069,
'Orange' : 36071,
'Orleans' : 36073,
'Oswego' : 36075,
'Otsego' : 36077,
'Putnam' : 36079,
'Queens' : 36081,
'Rensselaer' : 36083,
'Richmond' : 36085,
'Rockland' : 36087,
'Saratoga' : 36091,
'Schenectady' : 36093,
'Schoharie' : 36095,
'Schuyler' : 36097,
'Seneca' : 36099,
'Steuben' : 36101,
'Suffolk' : 36103,
'Sullivan' : 36105,
'Tioga' : 36107,
'Tompkins' : 36109,
'Ulster' : 36111,
'Warren' : 36113,
'Washington' : 36115,
'Wayne' : 36117,
'Westchester' : 36119,
'Wyoming' : 36121,
'Yates' : 36123,
'Essex/Hamilton' : 36041,
'Hamilton/Essex' : 36031
} |
#!/usr/local/bin/python3
import git
repo = git.Repo("~/src/freebsd-ports")
commits = list(repo.iter_commits("3bd153c2494182bb89915e6fc9222288c154285f..HEAD"))
for commit in commits[::-1]:
print(commit.hexsha)
|
from get_data_without_box.get_movie_info import get_details_without_box
import requests
import time
#功能:抓取没有票房信息的电影数据
#解释:要先从网页上看爬网页的套路
# 常量定义
# 访问JSON API的headers
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0',
'Cookie': 'bid=mX5JoV-6zIs; __yadk_uid=l0Vp43WiHzMy3sNeTiT8cidnCFShnuEW; douban-fav-remind=1; douban-profile-remind=1; dbcl2="70820094:N5wIjv23m4I"; ll="108088"; ct=y; gr_user_id=f8468c45-4579-4a91-80bb-bc3320e042cb; ck=NSf_; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1555679903%2C%22https%3A%2F%2Fwww.douban.com%2F%22%5D; __utmt=1; _vwo_uuid_v2=DB9EED638DF4E43689945E7939854DDD9|904d1fd966f7217fc34c216bc11e0cdb; push_noty_num=0; push_doumail_num=0; _pk_id.100001.4cf6=a9e508f8247aaccd.1548340344.61.1555681962.1555667441.; _pk_ses.100001.4cf6=*; __utma=30149280.1609998180.1555232765.1555667220.1555679548.16; __utmb=30149280.6.10.1555679548; __utmc=30149280; __utmz=30149280.1555679548.16.15.utmcsr=baidu.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmv=30149280.7082; __utma=223695111.1678517466.1551056746.1555667436.1555679903.53; __utmb=223695111.0.10.1555679903; __utmc=223695111; __utmz=223695111.1555679903.53.41.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/'
}
count = 0
fw = open('txtdata/6.12.txt', 'w', encoding='utf-8')
fw.write('name,movie_url,comment,review' + '\n')
#程序开始
while(count < 4600):
url = 'https://movie.douban.com/j/new_search_subjects?sort=T&range=0,10&tags=%E7%94%B5%E5%BD%B1&start=' + str(count) + '&year_range=2010,2019'
print('当前轮第一部电影的序号是:', count)
response = requests.get(url=url, headers=headers)
response.encoding = 'utf-8'
jsondata = response.json()
if len(jsondata):
data = jsondata['data']
for item in data:
movie_url = item['url']
# return name, rat
# e, rating_people,director, writer, actor, genre, place, language, year, movietime
# result 是一个字典,通过里面的status可以知道抓详情页是否成功
result = get_details_without_box(movie_url)
if result['status'] == 404:
time.sleep(1)
continue
else:
record = result['name'] + ',' + movie_url + ',' + result['rate'] + ',' + result['rating_people'] \
+ ',' + result['director'] + ',' + result['writer'] + ',' + result['actor'] + ',' \
+ result['genre'] + ',' + result['place'] + ',' \
+ result['language'] + ',' + result['year'] \
+ ',' + result['movietime'] + ',' + '\n'
print(record)
fw.write(record)
time.sleep(1)
#走到这里:说明这一页的JSON已经扒完,换下一页
count += 20
else:
break
fw.close()
# # 只抓评论数
# while(count < 6000):
# url = 'https://movie.douban.com/j/new_search_subjects?sort=T&range=0,10&tags=%E7%94%B5%E5%BD%B1&start=' + str(count) + '&year_range=2010,2019'
# print('当前轮第一部电影的序号是:', count)
# response = requests.get(url=url, headers=headers)
# response.encoding = 'utf-8'
# jsondata = response.json()
# if len(jsondata):
# data = jsondata['data']
# for item in data:
# movie_url = item['url']
# result = get_comments(movie_url)
# if result['status'] == 404:
# time.sleep(4)
# continue
# else:
# record = result['name'] + ',' + movie_url + ',' + result['comment'] + ',' + result['review'] + '\n'
# print(record)
# fw.write(record)
# time.sleep(4)
# #走到这里:说明这一页的JSON已经扒完,换下一页
# count += 20
# else:
# break
# --------------------------------------------
|
# Solving mazes with thinning A* heuristic
from maze_gen import *
from search import DFS, BFS, AStarE, AStarM
import matplotlib.pyplot as plt
from tkinter import *
from tkinter import ttk
import timeit as tm
import random
import math
import copy
import numpy as np
DEBUG = 0
# function to generate a thinned/easier version of the original maze
def thin_maze_gen(prob, maze1):
random.seed()
# make copy of original maze to retain its values
maze = copy.deepcopy(maze1)
for i in range(0, len(maze)):
for j in range(0, len(maze)):
rand = random.random() # used for generating a value between 0-1
# remove blocked cell with given probability
if rand <= prob and maze[i][j].val == 1:
maze[i][j].val = 0
return maze
def valid_path(maze1, sol=[]):
if sol == None:
return False
for (r, c) in sol:
if (maze1[r][c].val == 1):
return False
return True
def main():
# take in user input of maze dimension and blocked cell probability
dim = int(input("Enter maze dimension: "))
prob = float(input("Enter probability: "))
# probability of unblocking cells
qprob = .0
# to keep track of all the nodes expanded in thinned maze
nodes_exp = 0
nodes_exp1 = 0
#keep count of blocked cells in path of original maze for computing prob
count = 0
stats = []
stats1 = []
values = []
while qprob <= 1 :
count = 0
nodes_exp = 0
nodes_exp1 = 0
# 1)run maze_gen
maze = maze_gen(dim, prob)
res = BFS(maze)
# maze_visual(dim, maze)
# make sure there is path from start to goal
if res is None:
#print ("no path")
continue
# 2)generate a thin\easier version of original maze
maze1 = thin_maze_gen(qprob, maze)
# maze_visual(dim, maze1)
# 3) solve thinned maze and return solution path
res = AStarE(maze1)
nodes_exp = nodes_exp + res[1][2]
stats.append(nodes_exp)# + count*1)
#print(tm.timeit(lambda: AStarE(maze1), number=50))
#print("total nodes expanded for thinned maze: ", nodes_exp)#+count*4)
#print("length of thinned maze path solution: ", res[1][0])
#collect data of original maze
res = AStarE(maze)
nodes_exp1 = nodes_exp1 + res[1][2]
stats1.append(nodes_exp1)
#print(tm.timeit(lambda: AStarE(maze), number=50))
#print("total nodes expanded for original: ", nodes_exp1)
#print("length of original maze path solution: ", res[1][0])
#find all blocked cells in path of thinned maze solution
for i in range(len(maze)):
for j in range(len(maze)):
if maze[i][j].coord in res[0] and maze[i][j].val == 1 :
count = count+1
values.append(qprob)
qprob += .05
#get stats for original maze
avg_node_exp = sum(stats)/len(stats)
avg_node_exp1 = sum(stats1)/len(stats1)
#print("probability of having matching solutions: ", math.pow(qprob, count))
#print("cost for computing thinned maze: ", nodes_exp)
#print("total nodes expanded for thinned maze: ", nodes_exp)#+count*4)
#print("length of thinned maze path solution: ", res[1][0])
#print("length of thinned maze path solution on original maze: ", res[1][0]+count*4)
#print("number of iterations: ", iter)
#visual of thinned maze with solution
#maze_visual(dim, maze1, res[0])
#visual of original maze with thinned maze solution
#maze_visual(dim, maze, res[0])
# 5) compare nodes expanded with original maze solution
#res = AStarE(maze)
#maze_visual(dim, maze, res[0])
#nodes_exp1 = nodes_exp1 + res[1][2]
#print("total nodes expanded for original: ", nodes_exp1)
#print("length of original maze path solution: ", res[1][0])
print(stats, stats1)
################# for potential use
# 4)plot algorithm stats with graphs (add data here - to be completed)
# density vs. solvability
array = [1, 2, 3, 4]
plt.plot(array, [1, 2, 3, 4], 'ro')
plt.ylabel('density')
plt.xlabel('solvability')
if DEBUG == 2 or DEBUG == 3:
plt.show()
avg = [avg_node_exp, avg_node_exp1]
#sampled time at 200ms intervals
#t = np.arange(3)
#red dashes, blue squares and green triangles
#plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^')
#plt.show()
names = ['Thin A*', 'Reg A*']
#values = [0., .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.]
plt.figure(1, figsize=(9, 3))
plt.subplot(131)
plt.ylabel('Difficulty of solving maze (tot. nodes expanded)')
plt.xlabel('Prob. of simplifying maze')
plt.scatter(values, stats)
plt.scatter(values, stats1)
plt.legend(('Thin A*', 'A*'))
plt.subplot(132)
plt.ylabel('Avg. tot. node expansion')
plt.bar(names, [avg_node_exp, avg_node_exp1], width = 0.6, edgecolor =
'red', color = ['C0','orange'])
plt.subplot(133)
plt.ylabel('Difficulty of solving maze (tot. nodes expanded)')
plt.xlabel('Prob. of simplifying maze')
plt.plot(values, stats)
plt.plot(values, stats1)
plt.suptitle('Thinning A* Heuristic Stats')
plt.show()
return
if __name__ == "__main__":
main()
|
import numpy as np
from astropy.io import fits
from matplotlib import pyplot as plt
from matplotlib import colors
import scipy.stats
# Inputs:
# folder: path to where the .fits files are, e.g. '2016jul14/'
# start, end: range of indices of images, e.g. 95,96
# assumes that the filenames look like e.g. n0095.fits
# Outputs:
# list of full paths to images, e.g. ['2016jul14/n0095.fits', '2016jul14/n0096.fits']
def get_file_names(folder, start, end):
lst = []
for num in range(start, end+1):
num_digits = len(str(num))
lst.append(folder + '/n' + '0'*(4-num_digits) + str(num) + '.fits')
return lst
# Opens (and closes) a list of .fits file and returns the data (numpy arrays) in the PrimaryHDUs
# Inputs:
# list of full paths to .fits file, e.g. output of get_file_names()
# Outputs:
# numpy array of all data (shape: n_images x image_size x image_size)
def get_data_from_path(filenames):
data = []
for filename in filenames:
with fits.open(filename) as hdul:
data.append(hdul[0].data)
return np.array(data)
# Takes in image indices and returns the data from their fits files.
# Calls get_data_from_path()
def get_data(folder, start, end):
filenames = get_file_names(folder, start, end)
all_frames = get_data_from_path(filenames)
return all_frames
# Visualizes image with log normalization and optional vmin and vmax.
# data: numpy array
def plot_img(data, vmin=None, vmax=None):
plt.figure(figsize=(10,10))
plt.imshow(data, norm=colors.LogNorm(), cmap='gray', vmin=vmin, vmax=vmax)
# Computes average of given frames
def average(folder, start, end):
all_frames = get_data(folder, start, end)
return np.mean(all_frames, axis=0)
# Computes median of given frames
def median(folder, start, end):
all_frames = get_data(folder, start, end)
return np.median(all_frames, axis=0)
# Master dark = average of multiple darks with the same total integration time.
def make_master_dark(folder, start, end):
return average(folder, start, end)
# Master flat = average over multiple flats, then subtract dark.
# Dark and flats should match in total integration time.
# Scale dark (linearly) before passing it in if this is not the case.
def make_master_flat(folder, start, end, master_dark):
ave = average(folder, start, end)
return ave - master_dark
# Master sky = average over multiple skys, then subtract dark, then normalize with flat.
# (this master sky has dark removed.)
# Dark and skys should match total integration time. Flat doesn't have to.
def make_master_sky(folder, start, end, master_dark, master_flat):
ave = average(folder, start, end)
return (ave - master_dark ) / master_flat
# Full cleaned image = subtract dark, then normalize with flat, then subtract sky
# Dark, sky, and image should match total integration time.
def process_image(folder, img_index, master_dark, master_flat, master_sky):
raw = get_data(folder, img_index, img_index)[0]
return ((raw - master_dark) / master_flat) - master_sky
# Processes images and then writes them to new fits files (saved in folder out_folder).
# Copies the header of the original file to the new file.
def process_fits(in_folder, out_folder, start, end, master_dark, master_flat, master_sky):
raw_imgs = get_data(in_folder, start, end)
in_filenames = get_file_names(in_folder, start, end)
out_filenames = get_file_names(out_folder, start, end)
for i in range(raw_imgs.shape[0]):
raw = raw_imgs[i]
processed = process_image(in_folder, start+i, master_dark, master_flat, master_sky)
in_header = None
with fits.open(in_filenames[i]) as hdu_in:
in_header = hdu_in[0].header
fits.writeto(out_filenames[i], processed, header=in_header)
|
import random
from math import exp
from collections import defaultdict
from cheat_game_server import Game
from cheat_game_server import Player, Human
from cheat_game_server import Claim, Take_Card, Cheat, Call_Cheat
from cheat_game_server import Rank, Suit, Card
class Agent(Player):
def __init__(self, name):
super(Agent, self).__init__(name)
def make_claim(self, cards, claim):
print 'making claim: {0:1d} cards of rank {1}'.format(claim.count, str(claim.rank))
super(Agent, self).make_claim(cards, claim)
def make_honest_claim(self, claim):
super(Agent, self).make_honest_claim(claim)
def take_card_from_deck(self, silent=False):
if not silent: print 'Taking Card from deck'
super(Agent, self).take_card_from_deck()
def call_cheat(self):
print 'Calling "Cheat!"'
super(Agent, self).call_cheat()
def make_move(self):
print
print 'Player {0:1d} ({1:s}) turn'.format(self.id, self.name)
print "================"+"="*len(self.name)
honest_moves = self.possible_honest_moves()
state = self.game.get_state()
opponent_count = state[3 - self.id]
deck_count = state['DECK']
table_count = state['TABLE']
last_action = state['LAST_ACTION']
cards_revealed = state['CARDS_REVEALED']
last_claim = self.game.last_claim()
# if opponent placed his last cards on the table - call_cheat or lose
action = self.agent_logic(deck_count, table_count, opponent_count,
last_action, last_claim, honest_moves, cards_revealed)
assert action in honest_moves or isinstance(action, Cheat)
if isinstance(action, Call_Cheat):
self.call_cheat()
elif isinstance(action, Claim):
self.make_honest_claim(action)
elif isinstance(action, Take_Card):
self.take_card_from_deck()
elif isinstance(action, Cheat):
self.make_claim(action.cards, Claim(action.rank, action.count))
class DemoAgent(Agent):
def __init__(self, name):
super(DemoAgent, self).__init__(name)
self.cheat_prob = {"NO_MOVES": 0.6, "AVAIL_CLAIMS": 0.1}
self.call_cheat_prob = {1: 0.06, 2: 0.011, 3: 0.28, 4: 0.47}
def agent_logic(self, deck_count, table_count, opponent_count,
last_action, last_claim, honest_moves, cards_revealed):
"""
This function implements action logic / move selection for the agent\n
:param deck_count:
:param table_count:
:param opponent_count:
:param last_action: ActionEnum.TAKE_CARD or .MAKE_CLAIM or .CALL_CHEAT
:param last_claim:
:param honest_moves: a list of available actions, other than making a false ("cheat") claim
:param cards_revealed: if last action was "call cheat" cards on table were revealed
:return: Action object Call_Cheat or Claim or Take_Card or Cheat
"""
scores = {}
if opponent_count == 0:
return Call_Cheat()
available_claim = False
for move in honest_moves:
if isinstance(move, Claim):
scores[move] = move.count
available_claim = True
elif isinstance(move, Take_Card):
scores[move] = 0.6
elif isinstance(move, Call_Cheat):
if last_claim:
scores[move] = self.call_cheat_prob[last_claim.count]
else:
scores[move] = 0.0
if available_claim:
scores[Cheat()] = self.cheat_prob["AVAIL_CLAIMS"]
else:
scores[Cheat()] = self.cheat_prob["NO_MOVES"]
# randomize scores add random \in [-0.5..0.5)
for move, score in scores.iteritems():
scores[move] = score + 0.5 * (2.0 * random.random() - 1)
# select move based on max score
move = max(scores, key=scores.get)
if isinstance(move, Take_Card):
return move
elif isinstance(move, Call_Cheat):
return move
elif isinstance(move, Claim):
return move
elif isinstance(move, Cheat):
top_rank = self.table.top_rank()
rank_above = Rank.above(top_rank)
rank_below = Rank.below(top_rank)
rank_above_score = rank_below_score = 0
# choose cheat rank based on distance to remaining agent's card
for card in self.cards:
rank_above_score += card.rank.dist(rank_above)
rank_below_score += card.rank.dist(rank_below)
if rank_above_score < rank_below_score:
cheat_rank = rank_above
else:
cheat_rank = rank_below
cheat_count = 1
# decaying function of number of cards on the table - cheat less when risk is large
r = 0.5 * exp(-0.1 * table_count)
while cheat_count < 4 and random.random() < r and len(self.cards) >= (cheat_count + 1):
cheat_count += 1
# select cards furthest from current claim rank
dist = defaultdict(int)
for ind, card in enumerate(self.cards):
dist[card] = cheat_rank.dist(card.rank)
claim_cards = sorted(dist, key=dist.get)[:cheat_count]
return Cheat(claim_cards, cheat_rank, cheat_count)
if __name__ == "__main__":
cheat = Game(DemoAgent("Demo 1"), Human("me"))
cheat.play()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-05-06 11:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chat', '0002_auto_20190427_2356'),
]
operations = [
migrations.RenameField(
model_name='message',
old_name='was_read',
new_name='is_readed',
),
migrations.AddField(
model_name='chat',
name='last_send_message',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='last_send_message', to='chat.Message'),
),
]
|
import sys
# Utils to compile .py to .pyc inside zip file
# XXX: this is implementation detail of .pyc, copied from py_compile.py.
# Unfortunately, there is no way that I know of to write the bytecode into a
# string to be used by ZipFile (using compiler is way too slow). Also, the
# py_compile code has not changed much for 10 years.
# XXX: the code has changed quite a few times in python 3.x timeline, we need
# to keep too many copies. Maybe it is not worth it to support this feature
# altogether ?
from py_compile \
import \
PyCompileError
if sys.version_info[0] < 3:
from _bytecode_2 \
import \
bcompile
else:
from _bytecode_3 \
import \
bcompile
|
from django.contrib import admin
from mytest.models import users
# Register your models here.
class usersAdmin(admin.ModelAdmin):
list_display=('uid', 'question')
admin.site.register(users, usersAdmin)
|
Import('debug')
env = Environment(CCFLAGS=['-O9', '-std=c++0x'],
CPPPATH=['#netsight/src/include'])
if debug:
env.Append(CCFLAGS='-g')
env.Append(CPPDEFINES='DEBUG')
src_files = ['main.cc', 'netsight.cc', 'path_table.cc']
lib_deps = []
lib_paths = []
lib_deps.append('netsightlib')
lib_paths.append('lib')
lib_deps.append('netsightapi')
lib_paths.append('api')
lib_deps.append('filter')
lib_paths.append('filter')
lib_deps.append('sort')
lib_paths.append('topo_sort')
lib_deps.append('compress')
lib_paths.append('compress')
lib_deps.append('bpfjit')
lib_paths.append('filter/bpf-linux')
lib_deps += ['pcap', 'pthread', 'zmq', 'z',
'mongoclient', 'boost_thread-mt', 'boost_filesystem',
'boost_program_options', 'boost_system']
env.Program(target='netsight', source=src_files,
LIBS=lib_deps, LIBPATH=lib_paths,
RPATH=[Dir('#').path + '/netsight/src/filter/bpf-linux'])
SConscript(['lib/SConscript', 'filter/SConscript', 'topo_sort/SConscript',
'compress/SConscript', 'api/SConscript'], exports='env')
|
import pytest
from datasciencebox.core.settings import Settings
def test_required_bare_fields():
settings = Settings()
assert settings['CLOUD'] == 'bare'
with pytest.raises(AssertionError):
settings.validate_fields()
settings['NODES'] = []
settings['USERNAME'] = 'root'
settings['KEYPAIR'] = '~/.ssh/something'
settings.validate_fields()
def test_required_aws_fields():
settings = Settings()
settings['CLOUD'] = 'aws'
with pytest.raises(AssertionError):
settings.validate_fields()
settings['AWS_KEY'] = '1'
settings['AWS_SECRET'] = '1'
settings['AWS_KEYNAME'] = '1'
settings['AWS_REGION'] = '1'
settings['AWS_SECURITY_GROUPS'] = '1'
settings['AWS_IMAGE'] = '1'
settings['AWS_SIZE'] = '1'
settings['USERNAME'] = '1'
settings['KEYPAIR'] = '~/.ssh/something'
settings['NUMBER_NODES'] = 3
settings.validate_fields()
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View, TemplateView, CreateView, UpdateView, FormView, ListView, RedirectView
from django.template import RequestContext
from django.shortcuts import render, redirect
from .models import *
from .forms import *
from django.urls import reverse_lazy
from django.http import HttpResponse
# Create your views here.
class Ver_Lista(LoginRequiredMixin, ListView):
model = Reserva
paginate_by = 8
template_name = 'reserva/mis_reservas.html'
context_object_name = 'items'
def get_queryset(self):
if self.request.user.is_superuser:
return Reserva.objects.all()
else:
return Reserva.objects.filter(id_usuario=self.request.user.email, estado_reserva='Nueva')
class Ver_Agenda(LoginRequiredMixin, ListView):
model = Reserva
template_name = 'depto/agenda.html'
context_object_name = 'items'
def get_queryset(self):
if self.request.user.is_superuser:
return Reserva.objects.all()
else:
return Reserva.objects.filter(id_usuario=self.request.user.email, estado_reserva='Nueva')
class VerReserva(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
reserva = Reserva.objects.get(id=self.kwargs['pk'])
#checkin = CheckIn.objects.get(id_reserva=self.kwargs['pk'])
#checkout = CheckOut.objects.get(id_reserva=self.kwargs['pk'])
pagos = RecibirPago.objects.filter(id_reserva=self.kwargs['pk'])
servicios = DetalleServicio.objects.filter(id_reserva=self.kwargs['pk'])
context = {'reserva': reserva, 'pagos': pagos , 'servicios':servicios }
return render(request, 'reserva/ver_reserva.html', context)
class depto_ver(LoginRequiredMixin, ListView):
model = Departamento
paginate_by = 4
template_name = 'depto/lista_depto.html'
queryset = Departamento.objects.all()
context_object_name = 'items'
class inventario_ver(LoginRequiredMixin, ListView):
model = Departamento
paginate_by = 4
template_name = 'inventario/inventario_depto.html'
queryset = Departamento.objects.all()
context_object_name = 'items'
class depto_mantencion_ver(LoginRequiredMixin, ListView):
model = Departamento
paginate_by = 4
template_name = 'mantencion/depto_mantencion.html'
queryset = Departamento.objects.all()
context_object_name = 'items'
class CrearDepto(LoginRequiredMixin, CreateView):
model = Departamento
form_class = DepartamentoFormNew
template_name = 'depto/agregar_depto.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.estado_depto = 'Nuevo'
self.object.save()
return super(CrearDepto, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('web:ver_depto', kwargs={'pk': self.object.id_depto})
class ActualizarDepto(LoginRequiredMixin, UpdateView):
model = Departamento
template_name = 'depto/editar_depto.html'
form_class = DepartamentoFormEdit
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
return redirect('web:ver_depto',pk=pk)
class ActualizarMantencion(LoginRequiredMixin, UpdateView):
model = Mantencion
template_name = 'mantencion/mantencion_actualizar.html'
form_class = EditarMantencion
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
return redirect('web:ver_mantencion_depto',pk=self.object.id_depto.id_depto)
class ActualizarInventario(LoginRequiredMixin, UpdateView):
model = Inventario
template_name = 'inventario/editar_inventario.html'
form_class = InventarioFormNew
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
return redirect('web:ver_inventario', pk=self.object.id_depto.id_depto)
class EliminarMantencion(LoginRequiredMixin, ListView):
model = Mantencion
template_name = 'mantencion/mantencion_eliminar.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
contexto = {
'item': Mantencion.objects.get(id=pk),
'pk': pk,
}
return render(request, self.template_name, contexto)
def post(self, request, pk, *args, **kwargs):
object = Mantencion.objects.get(id=pk)
id=object.id_depto.id_depto
object.delete()
return redirect('web:ver_mantencion_depto',pk=id)
class EliminarDepto(LoginRequiredMixin, ListView):
model = Departamento
template_name = 'depto/eliminar_depto.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
contexto = {
'depto': Departamento.objects.get(id_depto=pk),
'pk': pk,
}
return render(request, self.template_name, contexto)
def post(self, request, pk, *args, **kwargs):
object = Departamento.objects.get(id_depto=pk)
object.delete()
return redirect('web:depto_lista')
class depto_disponibilidad(LoginRequiredMixin, ListView):
model = Departamento
paginate_by = 8
template_name = 'depto/disponibilidad_depto.html'
queryset = Departamento.objects.all()
context_object_name = 'items'
class ActualizarDisponibilidadDepto(LoginRequiredMixin, UpdateView):
model = Departamento
template_name = 'depto/editar_disponibilidad.html'
form_class = DepartamentoDisponibilidadFormEdit
success_url = reverse_lazy('web:depto_disponibilidad_lista')
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
return redirect('web:depto_disponibilidad_lista')
class CrearTipoMantencion(LoginRequiredMixin, CreateView):
model = TipoMantencion
form_class = TipoMantencionFormNew
success_url = reverse_lazy('web:tm_lista')
template_name = 'mantencion/tm_agregar.html'
def form_valid(self, form):
self.object = form.save(commit=False)
#self.object.user_id = User.objects.get(id=self.request.user.id)
self.object.save()
return super(CrearTipoMantencion, self).form_valid(form)
class CrearMantencion(LoginRequiredMixin, CreateView):
model = Mantencion
form_class = MantencionFormNew
template_name = 'mantencion/depto_agregar_mantencion.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
form = MantencionFormNew(initial={'id_depto':depto})
context = {'form': form, 'depto':depto}
return render(request, self.template_name, context)
def form_valid(self, form):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
self.object = form.save(commit=False)
self.object.id_depto=depto
self.object.save()
return super(CrearMantencion, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('web:ver_mantencion_depto', kwargs={'pk': self.kwargs['pk']})
class VerMantencionDepto(LoginRequiredMixin, ListView):
model = Mantencion
paginate_by = 4
template_name = 'mantencion/depto_mantencion_ver.html'
context_object_name = 'items'
def get_context_data(self, **kwargs):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
items2=Mantencion.objects.filter(id_depto=pk,estado_mantencion='Realizado')
context = super().get_context_data(**kwargs)
context['items2'] = items2
context['depto'] = depto
return context
def get_queryset(self):
return Mantencion.objects.filter(id_depto=self.request.resolver_match.kwargs['pk'],estado_mantencion='Nueva')
class depto_Mantencion(LoginRequiredMixin, ListView):
model = Departamento
paginate_by = 8
template_name = 'mantencion/depto_lista_mantencion.html'
queryset = Departamento.objects.filter(estado_depto="Mantencion")
context_object_name = 'items'
class tipo_mantencion_lista(LoginRequiredMixin, ListView):
model = TipoMantencion
paginate_by = 8
template_name = 'mantencion/tm_lista.html'
queryset = TipoMantencion.objects.all()
context_object_name = 'items'
class CrearTipoServicioExtra(LoginRequiredMixin, CreateView):
model = TipoServicioExtra
form_class = TipoServicioExtraFormNew
success_url = reverse_lazy('web:tipo_servicio_lista')
template_name = 'servicio/agregar_tipoServicio.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.save()
return super(CrearTipoServicioExtra, self).form_valid(form)
class ListarTipoServicioExtra(LoginRequiredMixin, ListView):
model = TipoServicioExtra
paginate_by = 4
template_name = 'servicio/listar_tipoServicio.html'
queryset = TipoServicioExtra.objects.all()
context_object_name = 'items'
class CrearDetalleServicioExtra(LoginRequiredMixin, CreateView):
model = DetalleServicio
form_class = DetalleServicioFormNew
success_url = reverse_lazy('web:detalle_servicio_lista')
template_name = 'servicio/agregar_detalleServicio.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.save()
return super().form_valid(form)
class ListarDetalleServicioExtra(LoginRequiredMixin, ListView):
model = DetalleServicio
template_name = 'servicio/listar_detalleServicio.html'
queryset = DetalleServicio.objects.all()
context_object_name = 'items'
class ListarElemento(LoginRequiredMixin, ListView):
model = Elemento
paginate_by = 4
template_name = 'inventario/elemento_lista.html'
queryset = Elemento.objects.all()
context_object_name = 'items'
class CrearElemento(LoginRequiredMixin, CreateView):
model = Elemento
form_class = ElementoFormNew
success_url = reverse_lazy('web:elemento_lista')
template_name = 'inventario/elemento_agregar.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.save()
return super(CrearElemento, self).form_valid(form)
class CrearInventario(LoginRequiredMixin, CreateView):
model = Inventario
form_class = InventarioFormNew
template_name = 'inventario/agregar_inventario_depto.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
form = InventarioFormNew(initial={'id_depto':depto})
context = {'form': form, 'depto':depto}
return render(request, self.template_name, context)
def form_valid(self, form):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
self.object = form.save(commit=False)
self.object.id_depto=depto
self.object.save()
return super(CrearInventario, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('web:ver_inventario', kwargs={'pk': self.kwargs['pk']})
class VerInventario(LoginRequiredMixin, ListView):
model = Inventario
paginate_by = 4
template_name = 'inventario/ver_inventario.html'
context_object_name = 'items'
def get_context_data(self, **kwargs):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
context = super().get_context_data(**kwargs)
context['depto'] = depto
return context
def get_queryset(self):
return Inventario.objects.filter(id_depto=self.request.resolver_match.kwargs['pk'])
class ListarReserva(LoginRequiredMixin, ListView):
model = Reserva
paginate_by = 4
template_name = 'reserva/lista_reservas.html'
queryset = Reserva.objects.all()
context_object_name = 'items'
class EntregaReserva(LoginRequiredMixin, ListView):
model = Reserva
paginate_by = 4
template_name = 'funcionario/entrega.html'
queryset = Reserva.objects.filter(estado_reserva='Nueva')
context_object_name = 'items'
class RecepcionReserva(LoginRequiredMixin, ListView):
model = Reserva
paginate_by = 4
template_name = 'funcionario/recepcion.html'
queryset = Reserva.objects.filter(estado_reserva='En uso')
context_object_name = 'items'
class FinalizadaReserva(LoginRequiredMixin, ListView):
model = Reserva
paginate_by = 4
template_name = 'funcionario/finalizada.html'
queryset = Reserva.objects.filter(estado_reserva='Finalizada')
context_object_name = 'items'
class CrearCheckIn(LoginRequiredMixin, CreateView):
model = CheckIn
form_class = CheckInFormNew
template_name = 'funcionario/crear_checkin.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
form = CheckInFormNew(initial={'id':reserva})
context = {'form': form, 'reserva':reserva}
return render(request, self.template_name, context)
def form_valid(self, form):
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
self.object = form.save(commit=False)
self.object.id_reserva=reserva
self.object.save()
return super(CrearCheckIn, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('web:ver_checkin', kwargs={'pk': self.kwargs['pk']})
class VerCheckIn(LoginRequiredMixin, UpdateView):
model = CheckIn
template_name = 'funcionario/ver_checkin.html'
form_class = CheckInFormNew
success_url = reverse_lazy('web:entrega')
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
comprobar_uso(self.object)
return redirect('web:entrega')
class EditarCheckList(LoginRequiredMixin, UpdateView):
model = CheckList
template_name = 'funcionario/editar_checklist.html'
form_class = CheckListFormEdit
def get_context_data(self, **kwargs):
pk = self.kwargs['pk']
detalle = CheckList.objects.get(id=pk)
kwargs['detalle'] = detalle
if 'form' not in kwargs:
kwargs['form'] = self.get_form()
return super().get_context_data(**kwargs)
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.check = True
multa = 0
multa_actual = self.object.monto_multa
multa_unidad = self.object.id_inventario.id_elemento.valor
diferencia = self.object.id_inventario.cantidad - self.object.cantidad_real
if diferencia > 0:
multa = multa_unidad * diferencia
diferencia_multa = multa - multa_actual
if diferencia_multa != 0:
#agregar diferencia de multa a CheckOut
checkout = CheckOut.objects.get(id_reserva=self.object.id_reserva)
checkout.total_multa = checkout.total_multa + diferencia_multa
checkout.save()
reserva = checkout.id_reserva
reserva.monto_multa = checkout.total_multa
reserva.monto_total = reserva.monto_estadia + reserva.monto_servicioextra + reserva.monto_multa
reserva.save()
print(diferencia_multa)
self.object.monto_multa = multa
self.object.save()
return redirect('web:ver_checklist',pk=self.object.id_reserva.id)
class CrearCheckList(LoginRequiredMixin, CreateView):
model = CheckList
form_class = CheckListFormNew
template_name = 'funcionario/crear_checklist.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
form = CheckListFormNew(initial={'id':reserva})
context = {'form': form, 'reserva':reserva}
return render(request, self.template_name, context)
def form_valid(self, form):
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
self.object = form.save(commit=False)
self.object.id_reserva=reserva
self.object.save()
return super(CrearCheckList, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('web:ver_checklist', kwargs={'pk': self.kwargs['pk']})
class VerCheckList(LoginRequiredMixin, View):
template_name = 'funcionario/ver_checklist.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
items = CheckList.objects.filter(id_reserva=reserva.id)
context = {'reserva' : reserva, 'items': items}
return render(request, self.template_name, context)
def post(self, request, pk, *args, **kwargs):
pk = self.kwargs['pk']
print('pk'+str(pk))
return redirect('web:ver_checklist', pk=pk)
class CrearCheckOut(LoginRequiredMixin, CreateView):
model = CheckOut
form_class = CheckOutFormNew
template_name = 'funcionario/crear_checkout.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
form = CheckOutFormNew(initial={'id':reserva})
context = {'form': form, 'reserva':reserva}
return render(request, self.template_name, context)
def form_valid(self, form):
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
self.object = form.save(commit=False)
self.object.id_reserva=reserva
self.object.save()
return super(CrearCheckOut, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('web:ver_checkout', kwargs={'pk': self.kwargs['pk']})
class VerCheckOut(LoginRequiredMixin, UpdateView):
model = CheckOut
template_name = 'funcionario/ver_checkout.html'
form_class = CheckOutFormNew
success_url = reverse_lazy('web:recepcion')
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
comprobar_cierre(self.object)
return redirect('web:recepcion')
class EliminarTipoMantencion(LoginRequiredMixin, ListView):
model = TipoMantencion
template_name = 'mantencion/tm_eliminar.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
contexto = {
'tipoM': TipoMantencion.objects.get(id=pk),
'pk': pk,
}
return render(request, self.template_name, contexto)
def post(self, request, pk, *args, **kwargs):
object = TipoMantencion.objects.get(id=pk)
object.delete()
return redirect('web:tm_lista')
class ActualizarTipoMantencion(LoginRequiredMixin, UpdateView):
model = TipoMantencion
template_name = 'mantencion/tm_agregar.html'
form_class = TipoMantencionFormNew
success_url = reverse_lazy('web:tm_lista')
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
return redirect('web:tm_lista')
class EliminarElemento(LoginRequiredMixin, ListView):
model = Elemento
template_name = 'inventario/elemento_eliminar.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
contexto = {
'elemento': Elemento.objects.get(id=pk),
'pk': pk,
}
return render(request, self.template_name, contexto)
def post(self, request, pk, *args, **kwargs):
object = Elemento.objects.get(id=pk)
object.delete()
return redirect('web:elemento_lista')
class ActualizarElemento(LoginRequiredMixin, UpdateView):
model = Elemento
template_name = 'inventario/elemento_agregar.html'
form_class = ElementoFormNew
success_url = reverse_lazy('web:elemento_lista')
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
return redirect('web:elemento_lista')
class EliminarInventario(LoginRequiredMixin, ListView):
model = Inventario
template_name = 'inventario/eliminar_inventario.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
contexto = {
'inventario': Inventario.objects.get(id=pk),
'pk': pk,
}
return render(request, self.template_name, contexto)
def post(self, request, pk, *args, **kwargs):
object = Inventario.objects.get(id=pk)
id = object.id_depto.id_depto
object.delete()
return redirect('web:ver_inventario', pk=id)
class EliminarTipoServicioExtra(LoginRequiredMixin, ListView):
model = TipoServicioExtra
template_name = 'servicio/eliminar_tipoServicio.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
contexto = {
'tipo_servicio': TipoServicioExtra.objects.get(id=pk),
'pk': pk,
}
return render(request, self.template_name, contexto)
def post(self, request, pk, *args, **kwargs):
object = TipoServicioExtra.objects.get(id=pk)
object.delete()
return redirect('web:tipo_servicio_lista')
class ActualizarTipoServicioExtra(LoginRequiredMixin, UpdateView):
model = TipoServicioExtra
template_name = 'servicio/agregar_tipoServicio.html'
form_class = TipoServicioExtraFormNew
success_url = reverse_lazy('web:tipo_servicio_lista')
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
return redirect('web:tipo_servicio_lista')
class ActualizarDetalleServicioExtra(LoginRequiredMixin, UpdateView):
model = DetalleServicio
template_name = 'servicio/editar_detalleServicio.html'
form_class = DetalleServicioFormEdit
success_url = reverse_lazy('web:detalle_servicio_lista')
def get_context_data(self, **kwargs):
pk = self.kwargs['pk']
detalle = DetalleServicio.objects.get(id=pk)
kwargs['detalle'] = detalle
if 'form' not in kwargs:
kwargs['form'] = self.get_form()
return super().get_context_data(**kwargs)
def form_valid(self, form):
pk = self.kwargs['pk']
self.object = form.save(commit=False)
self.object.save()
return redirect('web:detalle_servicio_lista')
class depto_Usuario(LoginRequiredMixin, ListView):
model = Departamento
paginate_by = 4
template_name = 'reserva/lista_deptos.html'
queryset = Departamento.objects.all()
context_object_name = 'items'
class CrearReserva(LoginRequiredMixin, CreateView):
model = Reserva
form_class = ReservaFormNew
template_name = 'reserva/crear_reserva.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
form = ReservaFormNew(initial={'id_depto':depto})
context = {'form': form, 'depto':depto}
return render(request, self.template_name, context)
def form_valid(self, form):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
self.object = form.save(commit=False)
self.object.id_depto=depto
self.object.save()
return super(CrearReserva, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('web:reserva_lista')
class CrearReservaInicial(LoginRequiredMixin, CreateView):
model = Reserva
form_class = ReservaInicialPaso1
template_name = 'reserva/crear_reserva.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
form = ReservaInicialPaso1(initial={'id_depto':depto})
context = {'form': form, 'depto':depto}
return render(request, self.template_name, context)
def form_valid(self, form):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
self.object = form.save(commit=False)
self.object.id_depto=depto
self.object.monto_pordia = depto.valor_arriendo
self.object.estado_reserva='Solicitud'
self.object.id_usuario = self.request.user
self.object.save()
return super().form_valid(form)
# return reverse_lazy('web:crear_reserva2', kwargs={'solicitud': self.object})
def get_success_url(self):
return reverse_lazy('web:crear_reserva2', kwargs={'pk': self.object.id})
class ActualizarReservaPaso2(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
objeto = Reserva.objects.get(id=self.kwargs['pk'])
objeto.monto_estadia = objeto.n_dias*objeto.monto_pordia
objeto.monto_total = objeto.monto_estadia + objeto.monto_servicioextra + objeto.monto_multa
monto = objeto.id_depto.pct_anticipo*objeto.monto_estadia/100
objeto.fecha_termino = objeto.fecha_inicio + datetime.timedelta(days=objeto.n_dias)
objeto.save()
context = {'reserva': objeto, 'monto': monto}
return render(request, 'reserva/crear_reserva2.html', context)
class ActualizarReservaPaso4(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
objeto = Reserva.objects.get(id=self.kwargs['pk'])
monto = objeto.monto_estadia - objeto.pago_anticipo
context = {'reserva': objeto, 'monto': monto}
return render(request, 'reserva/crear_reserva4.html', context)
class ActualizarReservaPaso6(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
objeto = Reserva.objects.get(id=self.kwargs['pk'])
monto = objeto.monto_multa
context = {'reserva': objeto, 'monto': monto}
return render(request, 'reserva/crear_reserva6.html', context)
class VerDepto(LoginRequiredMixin, ListView):
model = Departamento
paginate_by = 4
template_name = 'reserva/vista_reserva.html'
context_object_name = 'items'
def get_context_data(self, **kwargs):
pk = self.kwargs['pk']
depto = Departamento.objects.get(id_depto=pk)
context = super().get_context_data(**kwargs)
context['depto'] = depto
return context
def get_queryset(self):
return Departamento.objects.filter(id_depto=self.request.resolver_match.kwargs['pk'])
class CrearServicioExtra(LoginRequiredMixin, CreateView):
model = DetalleServicio
form_class = DetalleServicioFormNew
template_name = 'reserva/crear_Servicioextra.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
depto = reserva.id_depto
form = DetalleServicioFormNew(initial={'id_reserva':reserva, 'id_depto':depto})
context = {'form': form, 'reserva':reserva}
return render(request, self.template_name, context)
def form_valid(self, form):
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
self.object = form.save(commit=False)
self.object.id_reserva=reserva
self.object.estado_servicio = 'Solicitud'
self.object.save()
return super(CrearServicioExtra, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('web:ver_reserva', kwargs={'pk': self.object.id_reserva.id})
class EliminarServicio(LoginRequiredMixin, ListView):
model = DetalleServicio
template_name = 'servicio/eliminar_detalleServicio.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
contexto = {
'detalle': DetalleServicio.objects.get(id=pk),
'pk': pk,
}
return render(request, self.template_name, contexto)
def post(self, request, pk, *args, **kwargs):
temp = DetalleServicio.objects.get(id=pk)
reserva = temp.id_reserva
temp.delete()
return redirect('web:ver_reserva', pk=reserva.id)
class EliminarServicio2(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
try:
temp = DetalleServicio.objects.get(id=pk)
reserva = temp.id_reserva
temp.delete()
except DetalleServicio.DoesNotExist:
print("Hola")
#checkin = CheckIn.objects.get(id_reserva=self.kwargs['pk'])
#checkout = CheckOut.objects.get(id_reserva=self.kwargs['pk'])
pagos = RecibirPago.objects.filter(id_reserva=reserva.id)
servicios = DetalleServicio.objects.filter(id_reserva=reserva.id)
context = {'reserva': reserva, 'pagos': pagos , 'servicios':servicios }
return render(request, 'reserva/ver_reserva.html', context)
class CancelarReserva(LoginRequiredMixin, ListView):
model = Reserva
template_name = 'reserva/cancelar_reserva.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
contexto = {
'reserva': Reserva.objects.get(id=pk),
'pk': pk,
}
return render(request, self.template_name, contexto)
def post(self, request, pk, *args, **kwargs):
objeto = Reserva.objects.get(id=self.kwargs['pk'])
objeto.estado_reserva = 'Anulada'
objeto.save()
return redirect('web:lista')
class ReservasCanceladas(LoginRequiredMixin, ListView):
model = Reserva
paginate_by = 4
template_name = 'reserva/reserva_cancelada.html'
queryset = Reserva.objects.filter(estado_reserva='Anulada')
context_object_name = 'items'
class ReservasReembolso(LoginRequiredMixin, ListView):
model = Reserva
paginate_by = 4
template_name = 'reserva/reserva_reembolso.html'
queryset = Reserva.objects.filter(estado_reserva='Reembolsado')
context_object_name = 'items'
class Reembolso(LoginRequiredMixin, ListView):
model = Reserva
template_name = 'reserva/reembolso.html'
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
contexto = {
'reserva': Reserva.objects.get(id=pk),
'pk': pk,
}
return render(request, self.template_name, contexto)
def post(self, request, pk, *args, **kwargs):
objeto = Reserva.objects.get(id=self.kwargs['pk'])
objeto.estado_reserva = 'Reembolsado'
objeto.save()
reembolso(objeto)
return redirect('web:reservas_reembolso')
class ServicioPago(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
print("estoy get")
pk = self.kwargs['pk']
servicio = DetalleServicio.objects.get(id=pk)
context = {'servicio':servicio}
return render(request, 'servicio/servicio_pago1.html', context)
class CrearPagoServicio(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
print("estoy get")
pk = self.kwargs['pk']
servicio = DetalleServicio.objects.get(id=pk)
servicio.estado_servicio = 'Pagado'
servicio.check_pago = True
servicio.save()
recibir = pago_servicio(servicio)
context = {'servicio':servicio,'recibir':recibir}
return render(request, 'servicio/crear_pago_servicio.html', context)
class CrearPago(LoginRequiredMixin, View):
template_name = 'reserva/crear_reserva3.html'
def get(self, request, *args, **kwargs):
print("estoy get")
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
reserva.estado_reserva = 'Nueva'
monto_anticipo = reserva.id_depto.pct_anticipo*reserva.monto_estadia/100
reserva.pago_anticipo = monto_anticipo
reserva.save()
recibir = pago_anticipo(reserva)
crear_checklist(reserva)
crear_checkin(reserva)
crear_checkout(reserva)
context = {'reserva':reserva,'recibir':recibir}
return render(request, self.template_name, context)
class CrearPago2(LoginRequiredMixin, View):
template_name = 'reserva/crear_reserva5.html'
def get(self, request, *args, **kwargs):
print("estoy get")
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
monto = reserva.monto_estadia - reserva.pago_anticipo
reserva.pago_faltante = monto
reserva.save()
recibir = pago_faltante(reserva)
check_estadia(reserva)
context = {'reserva':reserva,'recibir':recibir}
return render(request, self.template_name, context)
class CrearPago3(LoginRequiredMixin, View):
template_name = 'reserva/crear_reserva7.html'
def get(self, request, *args, **kwargs):
print("estoy get")
pk = self.kwargs['pk']
reserva = Reserva.objects.get(id=pk)
monto = reserva.monto_multa
reserva.pago_multa = monto
reserva.save()
recibir = pago_multa(reserva)
check_multa(reserva)
context = {'reserva':reserva,'recibir':recibir}
return render(request, self.template_name, context)
class ListarPago(LoginRequiredMixin, ListView):
model = RecibirPago
paginate_by = 4
template_name = 'reporte/lista_pago.html'
context_object_name = 'items'
def check_estadia(reserva):
objeto=CheckIn.objects.get(id_reserva=reserva)
objeto.check_pago = True
objeto.save()
def check_multa(reserva):
objeto=CheckOut.objects.get(id_reserva=reserva)
objeto.check_multa = True
objeto.check_lista = True
objeto.detalle_multa = "Pago de Multas - OK"
objeto.save()
def crear_checklist(reserva):
lista=Inventario.objects.filter(id_depto=reserva.id_depto)
for item in lista:
print(item)
temp=CheckList()
temp.id_reserva = reserva
temp.id_inventario = item
try:
objeto=CheckList.objects.get(id_reserva=reserva,id_inventario=item)
except CheckList.DoesNotExist:
temp.save()
def crear_checkin(reserva):
temp=CheckIn()
temp.id_reserva = reserva
try:
objeto=CheckIn.objects.get(id_reserva=reserva)
except CheckIn.DoesNotExist:
temp.save()
def crear_checkout(reserva):
temp=CheckOut()
temp.id_reserva = reserva
try:
objeto=CheckOut.objects.get(id_reserva=reserva)
except CheckOut.DoesNotExist:
temp.save()
def pago_faltante(reserva):
recibir = RecibirPago()
recibir.id_reserva = reserva
recibir.monto = reserva.pago_faltante
obs = "Pago Estadía Faltante"
recibir.observaciones = obs
try:
objeto=RecibirPago.objects.get(id_reserva=reserva,observaciones="Pago Estadía Faltante")
except RecibirPago.DoesNotExist:
reserva.pago_total = reserva.pago_total + recibir.monto
reserva.save()
recibir.save()
return recibir
def pago_anticipo(reserva):
recibir = RecibirPago()
recibir.id_reserva = reserva
recibir.monto = reserva.pago_anticipo
obs = "Pago Anticipo"
recibir.observaciones = obs
try:
objeto=RecibirPago.objects.get(id_reserva=reserva,observaciones="Pago Anticipo")
except RecibirPago.DoesNotExist:
reserva.pago_total = reserva.pago_total + recibir.monto
reserva.save()
recibir.save()
return recibir
def comprobar_uso(checkin):
reserva = checkin.id_reserva
if checkin.check_pago ==True and checkin.check_lista ==True and checkin.check_pdf ==True and checkin.check_servicio ==True and checkin.check_llave ==True:
reserva.estado_reserva = 'En uso'
else:
reserva.estado_reserva = 'Nueva'
reserva.save()
def comprobar_cierre(checkout):
reserva = checkout.id_reserva
if checkout.check_lista==True and checkout.check_multa==True and checkout.check_pdf==True :
reserva.estado_reserva = 'Finalizada'
else:
reserva.estado_reserva = 'En uso'
reserva.save()
def reembolso(reserva):
objeto=RecibirPago.objects.filter(id_reserva=reserva).exclude(observaciones='Reembolso')
temp=0
for item in objeto:
temp= temp-item.monto
reembolso = RecibirPago()
reembolso.id_reserva = reserva
reembolso.monto = temp
obs = "Reembolso"
reembolso.observaciones = obs
try:
objeto=RecibirPago.objects.get(id_reserva=reserva,observaciones="Reembolso")
except RecibirPago.DoesNotExist:
reserva.pago_total = reserva.pago_total + reembolso.monto
reserva.save()
reembolso.save()
def pago_servicio(servicio):
recibir = RecibirPago()
recibir.id_reserva = servicio.id_reserva
recibir.id_detalleservicio = servicio
recibir.monto = servicio.valor
obs = "Pago Servicio Extra"
recibir.observaciones = obs
try:
objeto=RecibirPago.objects.get(id_detalleservicio=servicio.id,observaciones="Pago Servicio Extra")
except RecibirPago.DoesNotExist:
reserva = servicio.id_reserva
reserva.monto_servicioextra = reserva.monto_servicioextra + servicio.valor
reserva.monto_total = reserva.monto_estadia + reserva.monto_servicioextra + reserva.monto_multa
reserva.pago_servicioextra = reserva.pago_servicioextra + servicio.valor
reserva.pago_total = reserva.pago_total + recibir.monto
reserva.save()
recibir.save()
return recibir
def pago_multa(reserva):
recibir = RecibirPago()
recibir.id_reserva = reserva
recibir.monto = reserva.pago_multa
obs = "Pago Multa"
recibir.observaciones = obs
try:
objeto=RecibirPago.objects.get(id_reserva=reserva,observaciones="Pago Multa")
except RecibirPago.DoesNotExist:
reserva.pago_total = reserva.pago_total + recibir.monto
reserva.monto_total = reserva.monto_estadia + reserva.monto_servicioextra + reserva.monto_multa
reserva.save()
recibir.save()
return recibir
|
# Generated by Django 3.2.3 on 2021-06-13 18:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('courses', '0002_alter_language_options'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_number', models.CharField(editable=False, max_length=50)),
('first_name', models.CharField(max_length=25)),
('second_name', models.CharField(max_length=25)),
('email', models.EmailField(max_length=200)),
('phone_number', models.CharField(max_length=25)),
('country', models.CharField(max_length=50)),
('postcode', models.CharField(blank=True, max_length=20, null=True)),
('town_or_city', models.CharField(max_length=40)),
('address1', models.CharField(max_length=60)),
('address2', models.CharField(blank=True, max_length=60, null=True)),
('region_or_county', models.CharField(blank=True, max_length=60, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('delivery_cost', models.DecimalField(decimal_places=2, default=0, max_digits=6)),
('order_total', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('grand_total', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=0)),
('orderitem_total', models.DecimalField(decimal_places=2, editable=False, max_digits=6)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.course')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orderitems', to='checkout.order')),
],
),
]
|
import pandas as pd
import numpy as np
from tqdm import tqdm
import re
import jieba
import jieba.analyse
alldoc = pd.read_csv('../data/all_docs.txt',sep='\001',header=None)
alldoc.columns = ['id','title','doc']
train = pd.read_csv('../data/train_docs_keywords.txt',sep='\t',header=None)
train.columns = ['id','label']
train_id_list = list(train['id'].unique())
train = pd.merge(alldoc[alldoc['id'].isin(train_id_list)],train,on=['id'],how='inner')
test = alldoc[~alldoc['id'].isin(train_id_list)]
def get_unigram(data,num):
char_unigram={}
for row in tqdm(range(num)):
ls=str(data.loc[row,'doc'])+'&'+str(data.loc[row,'title'])
for i in range(len(ls)):
char_unigram[ls[i]]=char_unigram.get(ls[i],0)+1
return char_unigram
char_unigram=get_unigram(alldoc,alldoc.shape[0])
'''stopword是从前文档中200频繁词提取出来的,这里就不重复这个过程了'''
'idf是从jieba里复制出来的,路径是jieba/analys/idf.txt,将它拷贝过来,存在data下'
stopword=list(pd.read_csv('../data/stopwords.txt',header=None,sep='\t')[0].values)
i1=pd.read_csv('../data/idf.txt',header=None,sep=' ')
idf1={}
for i in range(i1.shape[0]):
idf1[i1.loc[i,0]]=i1.loc[i,1]
def get_new_word(data,char_unigram,num):
char_sum=sum(char_unigram.values())
new_gram={}
for row in tqdm(range(num)):
n_gram={}
free_left={}
free_right={}
cnt=0
ls='*'+str(data.loc[row,'doc'])+'&'+str(data.loc[row,'title'])+'%'
for j in range(2,8):
for i in range(1,len(ls)-j):
tmp=ls[i:i+j]
if re.search(u',|。|!|:|?|《|》|、|;|“|”|;|\.|\"|)|(',tmp,flags=re.U) is not None \
or tmp[0]=='的' or tmp[0]=='是' or tmp[-1]=='的' or tmp[-1]=='是':#or tmp in stopword:
continue
n_gram[tmp]=n_gram.get(tmp,0)+1
left=ls[i-1:i]
if left==','or left=='。'or left=='?'or left=='《':
free_left[tmp]=(free_left.get(tmp,set()))
free_left[tmp].add(cnt)
cnt+=1
else:
free_left[tmp]=(free_left.get(tmp,set()))
free_left[tmp].add(left)
right=ls[i+j:i+j+1]
if right==','or right=='。'or right=='?'or right=='》':
free_right[tmp]=(free_right.get(tmp,set()))
free_right[tmp].add(cnt)
cnt+=1
else:
free_right[tmp]=(free_right.get(tmp,set()))
free_right[tmp].add(right)
for w,v in n_gram.items():
thres1=5
thres2=3
if len(w)>=4:
thres1-=1
thres2-=1
if v>=thres1:
minfree=min(len(free_left[w]),len(free_right[w]))
if minfree>=thres2:
if w not in stopword and w not in idf1:
new_gram[w]=new_gram.get(w,0)+n_gram[w]
print (len(new_gram))
return (new_gram)
new_gram=get_new_word(alldoc,char_unigram,alldoc.shape[0])
np.save('../data/new_gram2.npy',new_gram)
np.save('../data/char_unigram2.npy',char_unigram)
import numpy as np
new_gram=np.load('../data/new_gram2.npy')[()]
char_unigram=np.load('../data/char_unigram2.npy')[()]
keywo = list(pd.read_csv('../data/train_docs_keywords.txt',sep='\t',header=None)[1].values)
keyword=[]
for i in keywo:
keyword.extend(i.split(','))
keyword=set(keyword)
ls=[]
newword=[]
char_sum=sum(char_unigram.values())
for w,v in new_gram.items():
unimuty=1
for i in range(len(w)):
unimuty*=(char_unigram[w[i]]/char_sum)
unimuty=(new_gram[w]/char_sum/unimuty)
if len(w)==2:
if unimuty>0.75:
newword.append(w)
if len(w)==3:
if unimuty>250:
newword.append(w)
if len(w)==4:
if unimuty>12000:
newword.append(w)
if len(w)==5:
if unimuty>7000000:
newword.append(w)
if len(w)==6:
newword.append(w)
if len(w)==7:
newword.append(w)
newword=list(set(newword))
print (len(newword))
import re
new_word_list=set()
data=alldoc
for row in tqdm(range(data.shape[0])):
try:
new_word_list|=set(re.findall(r"《(.*?)》",str(data.loc[row,'doc'])+'&'+str(data.loc[row,'title'])))
new_word_list|=set(re.findall(r'[A-z]{2,} [A-z]{2,}|[A-z]{2,}[0-9]+[A-z]*|[A-z]{2,}',str(data.loc[row,'doc'])+'&'+str(data.loc[row,'title'])))
except:
pass
new_word_list2=[]
for i in new_word_list:
if len(i)<30 and len(i)>0:
new_word_list2.append(i)
new_word=pd.DataFrame({'word':list(keyword)+list(new_word_list2)+list(newword)})
new_word.to_csv('../data/newword.txt',header=None,index=None)
print (new_word.shape)
import jieba
import jieba.posseg as pseg
jieba.load_userdict('../data/newword.txt')
jieba.enable_parallel(12)
tag_num=0
hit_num=0
right_num=0
error_list=set()
for row in tqdm(range(train.shape[0])):
ls=str(train.loc[row,'doc'])+'&'+str(train.loc[row,'title'])
tag=train.loc[row,'label'].split(',')
seg_list=set()
for w,pos in pseg.cut(ls,HMM=True):
if w in tag:
right_num+=1
seg_list.add(w)
tag_num+=len(tag)
hit_num+=len(seg_list)
error_list|=set(tag)-seg_list
print (hit_num/tag_num)
print(right_num)
def fenci(data):
embed={}
embed['doc_embed']=[]
embed['title_embed']=[]
cnt=0
for row in tqdm(range(0,data.shape[0]//120+1)):
for col in ['title','doc']:
ls=""
sp_w=[]
for sent in data.loc[row*120:row*120+119,col].values:
ls+=(str(sent)+'\n')
ls=ls[:-1]
for words in pseg.cut(ls,HMM=True):
sp_w.append(words.word)
sp=" ".join(sp_w)
c=sp.split('\n')
for em in c:
embed[col+'_embed'].append(em)
return embed
doc_title_embed=fenci(alldoc)
w2v_train_doc=doc_title_embed['doc_embed']+doc_title_embed['title_embed']
w2v_train_doc2=[]
for i in range(len(w2v_train_doc)//2):
w2v_train_doc2.append(w2v_train_doc[i]+w2v_train_doc[i+len(w2v_train_doc)//2])
print (len(w2v_train_doc2))
w2v_train_doc2=pd.DataFrame(w2v_train_doc)
w2v_train_doc2.to_csv('../data/w2v_train_doc.txt',header=None,index=None)
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 25 17:00:50 2021
@author: L01268185
"""
import numpy as np
data = np.arange(25).reshape(5,5)
temp=data[[0],:]
temp2=data[[1],:]
data[[0],:]=temp2
data[[1],:]=temp
print(data)
data[[0,1],:] = data[[1,0],:]
print(data)
print()
data = np.arange(1,6)
print(data)
print (np.argmax (data)) # Obtiene el índice máximo
data[np.argmax(data)]=0
print(data)
data = np.zeros((5,5))
data += np.arange(1,6)
print(data)
data = np.ones((5,5),dtype=float)
data[:4,:4]=0
data[:1] = 1
data[:,0] =1
print(data)
print()
|
import math
def MergeSort(unsorted_list):
if len(unsorted_list) > 1:
left_half = unsorted_list[: math.floor(len(unsorted_list)/2)]
right_half = unsorted_list[math.floor(len(unsorted_list)/2):]
MergeSort(left_half)
MergeSort(right_half)
i, j, k = 0,0,0
while i<len(left_half) and j<len(right_half):
if left_half[i]<right_half[j]:
unsorted_list[k] = left_half[i]
i+=1
else:
unsorted_list[k] = right_half[j]
j+=j+1
k+=1
if i< len(left_half):
unsorted_list[k:] = left_half[i:]
if j < len(right_half):
unsorted_list[k:]= right_half[j:]
return unsorted_list
# def main():
# s = MergeSort([5,56,8,65,2,56,56,4564])
# print(s)
# if __name__ == '__main__':
# main() |
# -*- coding: utf-8 -*-
# @Time : 2021/5/9 7:05 下午
# @Author : AI悦创
# @FileName: lst_code.py
# @Software: PyCharm
# @Blog :http://www.aiyc.top
# @公众号 :AI悦创
personal_info = ['aiyc', '男']
personal_info.append(1.74)
personal_info.insert(1, 28)
personal_info[1] = 36
personal_info[0:2] = ['aiyc', 35]
print(personal_info)
|
""""
input name and age,
create dictionary with name and age
add to users.append()
wrote array to json
"""
import json
users = []
while True:
name = input("Enter your name:")
age = input("Enter your age: ")
user = {'name': name, 'age': age}
users.append(user)
with open('users.json','w') as file_object:
f = json.dump(users,file_object)
print(users)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
class Timer(object):
"""
A timer for measure runing time.
"""
def __init__(self):
self.start_time = time.time()
self.end_time = None
def end(self):
"""
Stop the timer.
"""
self.end_time = time.time()
def report(self):
"""
Report elapsed time.
"""
if not self.end_time:
self.end()
print "Time:", ((self.end_time - self.start_time )/ 60), "mins"
|
import pygame
RED = (255, 0, 0)
class Hurdle:
def __init__(self, screen):
self.screen = screen
self.x = 610
self.y = 200
self.width = 10
self.height = 50
def draw(self):
pygame.draw.rect(self.screen, RED, [self.x, self.y, self.width, self.height])
def update(self):
if self.x > 0:
self.x -= 10
else:
self.x = 610 |
'''
Created on Sep 6, 2015
@author: hugosenari
'''
from circuits import Component, Event
class AMusicPlayer(Component):
def started(self, component):
print("Hi")
def stopped(self, *args, **kwd):
print("See ya!")
def new_setup(self):
print('Nice to meet you')
def pause_track(self):
print('I am a pause')
def resume_track(self):
print('I am a play')
class play_file(Event):
"""Evemt to play a file"""
class resume_track(Event):
"""Evemt to play current track"""
class stop_track(Event):
"""Evemt to stop current track"""
class pause_track(Event):
"""Evemt to pause current track"""
class played_track(Event):
"""Evemt when x time of current track are played"""
class go_to(Event):
"""Evemt go to x time of current track"""
class track_ended(Event):
"""Evemt when x time of curren file are played"""
class update(Event):
"""Evemt to update current track info"""
|
"""Interval estimation"""
import numpy as np
from scipy.optimize import toms748
from pyhf import get_backend
from pyhf.infer import hypotest
__all__ = ["upper_limit", "linear_grid_scan", "toms748_scan"]
def __dir__():
return __all__
def _interp(x, xp, fp):
tb, _ = get_backend()
return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))
def toms748_scan(
data,
model,
bounds_low,
bounds_up,
level=0.05,
atol=2e-12,
rtol=1e-4,
from_upper_limit_fn=False,
**hypotest_kwargs,
):
"""
Calculate an upper limit interval ``(0, poi_up)`` for a single
Parameter of Interest (POI) using an automatic scan through
POI-space, using the :func:`~scipy.optimize.toms748` algorithm.
Example:
>>> import numpy as np
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.uncorrelated_background(
... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> obs_limit, exp_limits = pyhf.infer.intervals.upper_limits.toms748_scan(
... data, model, 0., 5., rtol=0.01
... )
>>> obs_limit
array(1.01156939)
>>> exp_limits
[array(0.5600747), array(0.75702605), array(1.06234693), array(1.50116923), array(2.05078912)]
Args:
data (:obj:`tensor`): The observed data.
model (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.
bounds_low (:obj:`float`): Lower boundary of search interval.
bounds_up (:obj:`float`): Upper boundary of search interval.
level (:obj:`float`): The threshold value to evaluate the interpolated results at.
Defaults to ``0.05``.
atol (:obj:`float`): Absolute tolerance.
The iteration will end when the result is within absolute
*or* relative tolerance of the true limit.
rtol (:obj:`float`): Relative tolerance.
For optimal performance this argument should be set
to the highest acceptable relative tolerance.
hypotest_kwargs (:obj:`string`): Kwargs for the calls to
:class:`~pyhf.infer.hypotest` to configure the fits.
Returns:
Tuple of Tensors:
- Tensor: The observed upper limit on the POI.
- Tensor: The expected upper limits on the POI.
.. versionadded:: 0.7.0
"""
cache = {}
def f_cached(poi):
if poi not in cache:
cache[poi] = hypotest(
poi,
data,
model,
return_expected_set=True,
**hypotest_kwargs,
)
return cache[poi]
def f(poi, level, limit=0):
# Use integers for limit so we don't need a string comparison
# limit == 0: Observed
# else: expected
return (
f_cached(poi)[0] - level
if limit == 0
else f_cached(poi)[1][limit - 1] - level
)
def best_bracket(limit):
# return best bracket
ks = np.asarray(list(cache))
vals = np.asarray(
[
value[0] - level if limit == 0 else value[1][limit - 1] - level
for value in cache.values()
]
)
pos = vals >= 0
neg = vals < 0
lower = ks[pos][np.argmin(vals[pos])]
upper = ks[neg][np.argmax(vals[neg])]
return (lower, upper)
# extend bounds_low and bounds_up if they don't bracket CLs level
lower_results = f_cached(bounds_low)
# {lower,upper}_results[0] is an array and {lower,upper}_results[1] is a
# list of arrays so need to turn {lower,upper}_results[0] into list to
# concatenate them
while np.any(np.asarray([lower_results[0]] + lower_results[1]) < level):
bounds_low /= 2
lower_results = f_cached(bounds_low)
upper_results = f_cached(bounds_up)
while np.any(np.asarray([upper_results[0]] + upper_results[1]) > level):
bounds_up *= 2
upper_results = f_cached(bounds_up)
tb, _ = get_backend()
obs = tb.astensor(
toms748(f, bounds_low, bounds_up, args=(level, 0), k=2, xtol=atol, rtol=rtol)
)
exp = [
tb.astensor(
toms748(f, *best_bracket(idx), args=(level, idx), k=2, xtol=atol, rtol=rtol)
)
for idx in range(1, 6)
]
if from_upper_limit_fn:
return obs, exp, (list(cache), list(cache.values()))
return obs, exp
def linear_grid_scan(
data, model, scan, level=0.05, return_results=False, **hypotest_kwargs
):
"""
Calculate an upper limit interval ``(0, poi_up)`` for a single
Parameter of Interest (POI) using a linear scan through POI-space.
Example:
>>> import numpy as np
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.uncorrelated_background(
... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> scan = np.linspace(0, 5, 21)
>>> obs_limit, exp_limits, (scan, results) = pyhf.infer.intervals.upper_limits.upper_limit(
... data, model, scan, return_results=True
... )
>>> obs_limit
array(1.01764089)
>>> exp_limits
[array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)]
Args:
data (:obj:`tensor`): The observed data.
model (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.
scan (:obj:`iterable`): Iterable of POI values.
level (:obj:`float`): The threshold value to evaluate the interpolated results at.
return_results (:obj:`bool`): Whether to return the per-point results.
hypotest_kwargs (:obj:`string`): Kwargs for the calls to
:class:`~pyhf.infer.hypotest` to configure the fits.
Returns:
Tuple of Tensors:
- Tensor: The observed upper limit on the POI.
- Tensor: The expected upper limits on the POI.
- Tuple of Tensors: The given ``scan`` along with the
:class:`~pyhf.infer.hypotest` results at each test POI.
Only returned when ``return_results`` is ``True``.
.. versionadded:: 0.7.0
"""
tb, _ = get_backend()
results = [
hypotest(mu, data, model, return_expected_set=True, **hypotest_kwargs)
for mu in scan
]
obs = tb.astensor([[r[0]] for r in results])
exp = tb.astensor([[r[1][idx] for idx in range(5)] for r in results])
result_array = tb.concatenate([obs, exp], axis=1).T
# observed limit and the (0, +-1, +-2)sigma expected limits
limits = [_interp(level, result_array[idx][::-1], scan[::-1]) for idx in range(6)]
obs_limit, exp_limits = limits[0], limits[1:]
if return_results:
return obs_limit, exp_limits, (scan, results)
return obs_limit, exp_limits
def upper_limit(
data, model, scan=None, level=0.05, return_results=False, **hypotest_kwargs
):
"""
Calculate an upper limit interval ``(0, poi_up)`` for a single Parameter of
Interest (POI) using root-finding or a linear scan through POI-space.
Example:
>>> import numpy as np
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.uncorrelated_background(
... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> scan = np.linspace(0, 5, 21)
>>> obs_limit, exp_limits, (scan, results) = pyhf.infer.intervals.upper_limits.upper_limit(
... data, model, scan, return_results=True
... )
>>> obs_limit
array(1.01764089)
>>> exp_limits
[array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)]
Args:
data (:obj:`tensor`): The observed data.
model (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.
scan (:obj:`iterable` or ``None``): Iterable of POI values or ``None`` to use
:class:`~pyhf.infer.intervals.upper_limits.toms748_scan`.
level (:obj:`float`): The threshold value to evaluate the interpolated results at.
return_results (:obj:`bool`): Whether to return the per-point results.
Returns:
Tuple of Tensors:
- Tensor: The observed upper limit on the POI.
- Tensor: The expected upper limits on the POI.
- Tuple of Tensors: The given ``scan`` along with the
:class:`~pyhf.infer.hypotest` results at each test POI.
Only returned when ``return_results`` is ``True``.
.. versionadded:: 0.7.0
"""
if scan is not None:
return linear_grid_scan(
data, model, scan, level, return_results, **hypotest_kwargs
)
# else:
bounds = model.config.suggested_bounds()[
model.config.par_slice(model.config.poi_name).start
]
obs_limit, exp_limit, results = toms748_scan(
data,
model,
bounds[0],
bounds[1],
from_upper_limit_fn=True,
**hypotest_kwargs,
)
if return_results:
return obs_limit, exp_limit, results
return obs_limit, exp_limit
|
"""Open a connection with specified settings"""
import serial
#
#
# last modified : 2015-08-06
class PortFormat(object):
"""Holds the details of the format of the serial connection: baud rate,
number of bits, etc. """
#default timeout values in seconds
DEF_TIMEOUT = 10
def __init__(self,name=0, baudrate = 9600, bytesize=8, parity='N', stopbits=1, timeout= DEF_TIMEOUT):
self.name = name
#baud rate
self.baudrate= baudrate
#number of data bits
self.bytesize = bytesize
#number of parity bits
self.parity= parity
#number of stop bits
self.stopbits = stopbits
#timeout in seconds
self.timeout = timeout
class SerialConnection(serial.Serial):
"""Manages a single serial port, with one specific format"""
ASCII_codes = {"ENQ": "\x05", "ACK":"\x06", "LF":"\x0A", "CR":"\x0D", "ETX":"\x03", "NAK":"\x15"}
def __init__(self, portFormat=None):
""" Open a connection at port <portName>. If no port name is provided,
opens the first available port (as numbered by pyserial). If no data
format is provided, use the default settings"""
if (portFormat is None ):
portName = 0
else:
portName = portFormat.name
#opens the connection
super(SerialConnection, self).__init__(port=portName)
self.portFormat = portFormat
if not (portFormat is None):
#unpack the specified settings
self.loadFormat(portFormat)
def loadFormat(self, portFormat):
"""apply settings, if they are acceptable to serial.Serial"""
self.setBaudrate(portFormat.baudrate)
self.setByteSize(portFormat.bytesize)
self.setParity(portFormat.parity)
self.setStopbits(portFormat.stopbits)
self.setTimeout(portFormat.timeout)
def getASCIICode(self,msg):
if not (msg in self.ASCII_codes.keys()):
raise ValueError("not an allowed message")
return self.ASCII_codes[msg]
|
#%%
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
#%%
#importa o dataset -> .train .test .validation
#imagens 28x28 -> 784 pixels
#dataset treinamento com 55000 imagens
#shape dataset -> 55000 x 784
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
#criando o modelo de rede neural com softmax
#x -> pixels da imagem de entrada
x = tf.placeholder(tf.float32, [None, 784]) #none dimensao de qualquer tamanho
#parametros do modelo sempre sao variaveis, podem ser modificados durante as computacoes
#criando os pesos
W = tf.Variable(tf.zeros([784, 10])) #multiplica cada entrada para produzir saidas de 10
b = tf.Variable(tf.zeros([10])) #soma nas saidas
#definindo modelo
y = tf.matmul(x, W) + b #matmul -> multiplicacao de matrizes
#placeholder das saidas corretas
y_ = tf.placeholder(tf.float32, [None, 10])
#medida do erro
#faz o logaritmo da saida y, multiplica pela resposta correta
#depois soma os valores da segunda dimensao de y (reduction_indices)
#depois faz a media sobre todos os exemplos
#versao instavel
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
#versao estavel
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
#minimiza erro com gradient descent
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#iniciando sessao
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
#_ para ignorar parametros
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
#pega todos os exemplos que a predicao foi correta
#argmax de y pega a predicao do modelo, argmax de y_ pega a resposta correta
#(array onde tudo e 0 e a resposta e 1 vai voltar o index de 1)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
#equal vai voltar array de boolean, se transformar em float da pra somar e tirar a media
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#calcula accuracy agora passando os dados de teste
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) |
#Concatenation - joining strings
print("Iraj")
print("Spam" + 'eggs')
print("1"+"1")
##out of the way - concatenate number with string
#wrong-print("spam"+1+"egg")
print("spam"+ str(1)+"egg")
#Multiply strings
print("spam " * 3)
##What is the output of
print(3*'7')
##Using inplace operators
x = "spam"
print(x)
x += "eggs"
print(x)
y=int("3"+"4")
x=1
print(x+y)
|
#pcb = process control block
class PCB:
def __init__(self, identificador, cantInst, nomPrograma):
self.pid = identificador
self.pc = 0 #cantidad de instrucciones ejecutadas
self.estado = "new"
self.cantInst = cantInst
self.baseDirection = 0
#self.prioridad = prioridad
self.nomPrograma = nomPrograma
def termino(self):
return self.pc >= self.cantInst
def incrementoPc(self):
self.pc = self.pc + 1
print("PCB "+str(self.pid)+"------->PC "+str(self.pc)) |
#!/usr/bin/python
import importlib
import json
import re
import requests
import sys
import traceback
sys.path.insert(0, 'challenges')
test_data = {
'challenge_1': {
'inputs': [('49276d206b696c6c696e6720796f757220627261696e206c696b65206'
'120706f69736f6e6f7573206d757368726f6f6d')],
'output': ('SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2'
'hyb29t')
},
'challenge_2': {
'inputs': ['1c0111001f010100061a024b53535009181c',
'686974207468652062756c6c277320657965'],
'output': '746865206b696420646f6e277420706c6179'
},
'challenge_3': {
'inputs': [('1b37373331363f78151b7f2b783431333d78397828372d363c783'
'73e783a393b3736')],
'output': "Cooking MC's like a pound of bacon"
},
'challenge_4': {
'inputs': [requests.get('https://cryptopals.com/static/challenge-data/'
'4.txt').text],
'output': "Now that the party is jumping\n"
},
'challenge_5': {
'inputs': [("Burning 'em, if you ain't quick and nimble\nI go crazy "
"when I hear a cymbal"),
'ICE'],
'output': ('0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2'
'a26226324272765272a282b2f20430a652e2c652a3124333a653e2b2'
'027630c692b20283165286326302e27282f')
}
}
def run_test(test_name):
try:
data = test_data[test_name]
except KeyError:
raise Exception('Test not implemented')
challenge_module = importlib.import_module(test_name)
output = challenge_module.main(*data['inputs'])
assert output == data['output']
def test_challenge_5():
import challenge_5
assert output == test_output
def main(*challenge_ids):
'''Run test suite, or specific test if a challenge number is supplied'''
with open('cryptopals.json') as fh:
cryptopals_json = json.load(fh)
for set_dict in cryptopals_json['sets'].values():
for challenge_id, data in set_dict['challenges'].items():
if challenge_ids and challenge_id not in challenge_ids:
continue
print('\x1b[1mChallenge #{}:\x1b[0m'
'\x1b[33m{}\x1b[0m'.format(challenge_id, data['title']))
try:
run_test('challenge_{}'.format(challenge_id))
print(' \x1b[32;1mPASSED\x1b[0m')
except Exception as e:
print(' \x1b[31;1mFAILED\x1b[0m:', e)
if challenge_ids:
tb = traceback.format_exc()
print(re.sub('^', ' ', tb, flags=re.MULTILINE))
if __name__ == '__main__':
main(*sys.argv[1:])
|
import sys
from pathlib import Path
try:
import wclib
except ImportError:
# wclib may not be in the path because of the architecture
# of all the challenges and the fact that there are many
# way to run them (through the showcase, or on their own)
ROOT_FOLDER = Path(__file__).parent.parent.parent
sys.path.append(str(ROOT_FOLDER))
import wclib
# This line tells python how to handle the relative imports
# when you run this file directly.
__package__ = "01-fog-of-war." + Path(__file__).parent.name
# ---- Recommended: don't modify anything above this line ---- #
# Metadata about your submission
# aka bitcraft
__author__ = "thecornboss#2284"
__achievements__ = [
"Casual",
"Ambitious",
"Adventurous",
]
"""
improvements to be made:
* smarter management of the blocked cells table
* faster mask creation
* scale the cells to a higher resolution buffer
* gaussian blur
* needs code cleanup
* faster FOV -- not suitable for large light radius
* current FOV doesn't handle arbitrary shapes
* figure out method for light bleeding besides using `circle`
-- bitcraft, 2021
"""
from functools import partial
from operator import attrgetter
from random import randint
import pygame
# To import the modules in yourname/, you need to use relative imports,
# otherwise your project will not be compatible with the showcase.
from .objects import Ghost, Player, SolidObject
BACKGROUND = 0x66856C
MULT = [
[1, 0, 0, -1, -1, 0, 0, 1],
[0, 1, -1, 0, 0, -1, 1, 0],
[0, 1, 1, 0, 0, -1, -1, 0],
[1, 0, 0, 1, -1, 0, 0, -1],
]
def get_visible_points(vantage_point, get_allows_light, max_distance=30):
"""
Returns a set of all points visible from the given vantage point.
Adopted from https://raw.githubusercontent.com/irskep/clubsandwich/afc79ed/clubsandwich/line_of_sight.py
Adapted from `this RogueBasin article <http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation>`_.
"""
x, y = vantage_point
los_cache = set()
los_cache.add(vantage_point)
blocked_set = set()
distance = dict()
distance[vantage_point] = 0
for region in range(8):
_cast_light(
los_cache,
blocked_set,
distance,
get_allows_light,
x,
y,
1,
1.0,
0.0,
max_distance,
MULT[0][region],
MULT[1][region],
MULT[2][region],
MULT[3][region],
)
return los_cache, blocked_set, distance
def _cast_light(
los_cache,
blocked_set,
distance,
get_allows_light,
cx,
cy,
row,
start,
end,
radius,
xx,
xy,
yx,
yy,
):
if start < end:
return
radius_squared = radius ** 2
for j in range(row, radius + 1):
dx, dy = -j - 1, -j
blocked = False
while dx <= 0:
dx += 1
X, Y = cx + dx * xx + dy * xy, cy + dx * yx + dy * yy
point = X, Y
l_slope, r_slope = (dx - 0.5) / (dy + 0.5), (dx + 0.5) / (dy - 0.5)
if start < r_slope:
continue
elif end > l_slope:
break
else:
d = dx ** 2 + dy ** 2
if d < radius_squared:
los_cache.add(point)
distance[point] = d
if blocked:
if not get_allows_light(point):
new_start = r_slope
continue
else:
blocked = False
start = new_start
else:
if not get_allows_light(point) and j < radius:
blocked = True
blocked_set.add(point)
distance[point] = d
_cast_light(
los_cache,
blocked_set,
distance,
get_allows_light,
cx,
cy,
j + 1,
start,
l_slope,
radius,
xx,
xy,
yx,
yy,
)
new_start = r_slope
if blocked:
break
def mainloop():
player = Player((100, 100))
trees = SolidObject.generate_many(36)
ghosts = [Ghost() for _ in range(16)]
all_objects = trees + ghosts + [player]
## configurable parameters
# smaller cells are slower but gives higher resolution mask
cell_size_px = 4
# light radius in pixels
light_radius_px = 140
# darkness of the explored areas
explored_value = 64
# min expose radius when light touches a sprite
blocked_expose_min_px = 48
# max expose radius when light touches a sprite
blocked_expose_max_px = 64
# min expose radius when light doesn't touch a sprite
unblocked_expose_min_px = 8
# max expose radius when light doesn't touch a sprite
unblocked_expose_max_px = 24
# radius of cast light around source
cast_light_px = 32
# values greater than 1 will result in color bands and faster/retro rendering
# setting it to 1 will result in a smooth light cast. must be changed with cell
# size and light radius, or light won't render at correct brightness.
# table for known values:
# cell value notes
# 4 400 3 color bands
# 4 17 higher values create noticeable moire pattern
# 4 3 no artifacts, some caching
light_mod_value = 400
light_radius = int(light_radius_px // cell_size_px)
light_radius2 = light_radius ** 2
light_radius_px2 = light_radius_px ** 2
new_points = set()
cast_light = None
blocked_light_min = None
blocked_light_max = None
unblocked_light_min = None
unblocked_light_max = None
old_screen = None
work_surface = None
fog_tiles = None
fog_table = None
fog_surface = None
light_surface = None
clock = pygame.time.Clock()
while True:
screen, events = yield
for event in events:
if event.type == pygame.QUIT:
return
sw, sh = screen.get_size()
stw = sw // cell_size_px
sth = sh // cell_size_px
# init fog stuff
if old_screen is not screen:
old_screen = screen
fog_size = sw // cell_size_px, sh // cell_size_px
work_surface = pygame.surface.Surface((sw, sh))
fog_surface = pygame.surface.Surface(fog_size)
light_surface = pygame.surface.Surface(fog_size)
blocked_light_min = max(1, blocked_expose_min_px // cell_size_px)
blocked_light_max = max(1, blocked_expose_max_px // cell_size_px)
unblocked_light_min = max(1, unblocked_expose_min_px // cell_size_px)
unblocked_light_max = max(1, unblocked_expose_max_px // cell_size_px)
cast_light = max(1, cast_light_px // cell_size_px)
fog_table = dict()
fog_tiles = list()
for y in range(0, sth * cell_size_px, cell_size_px):
row = list()
fog_tiles.append(row)
for x in range(0, stw * cell_size_px, cell_size_px):
row.append(False)
for obj in all_objects:
obj.logic(objects=all_objects)
# clear collision mask
for row in fog_tiles:
for x in range(len(row)):
row[x] = True
# set collision mask
for obj in all_objects:
if obj is player:
continue
# cast shadow using bbox of image
rect = obj.bbox.move(obj.rect.topleft)
left = rect.left // cell_size_px
top = rect.top // cell_size_px
right = rect.right // cell_size_px
bottom = rect.bottom // cell_size_px
for y in range(top, bottom + 1):
for x in range(left, right + 1):
try:
fog_tiles[y][x] = False
except IndexError:
pass
# cast shadow with mask - very glitchy
# rect = obj.rect
# left = rect.left // cell_size_px
# top = rect.top // cell_size_px
# for point in obj.outline:
# x, y = point
# x += left
# y += top
# try:
# fog_tiles[y][x] = False
# except IndexError:
# pass
# smooth fade and hide ghosts
x1, y1 = player.rect.center
for ghost in ghosts:
x2, y2 = ghost.rect.center
d2 = (x1 - x2) ** 2 + (y1 - y2) ** 2
if d2 < light_radius_px2:
opacity = 1 - (d2 / light_radius_px2)
opacity = int(out_quad(opacity) * 255)
opacity = min(255, max(0, opacity))
ghost.hidden = False
ghost.opacity = opacity
else:
ghost.hidden = True
# get visible and blocked cells, with the distance for them
px = player.rect.centerx // cell_size_px
py = player.rect.centery // cell_size_px
func = partial(light, fog_tiles)
visible, blocked, distance = get_visible_points((px, py), func, light_radius)
# update the fog table, order of update is important; blocked first, then visible
new_points.clear()
for point in blocked:
if point not in fog_table:
size = randint(blocked_light_min, blocked_light_max)
fog_table[point] = size
new_points.add((point, size))
for point in visible:
if point not in fog_table:
size = randint(unblocked_light_min, unblocked_light_max)
fog_table[point] = size
new_points.add((point, size))
draw_fog(
fog_surface,
light_surface,
visible,
distance,
new_points,
explored_value,
light_radius2,
cast_light,
light_mod_value,
)
screen.fill(BACKGROUND)
for obj in sorted(all_objects, key=attrgetter("rect.bottom")):
obj.draw(screen)
pygame.transform.scale(light_surface, work_surface.get_size(), work_surface)
screen.blit(work_surface, (0, 0), special_flags=pygame.BLEND_RGB_MULT)
clock.tick(60)
def draw_fog(
fog_surface,
light_surface,
visible,
distance,
new_points,
explored_value,
light_radius2,
cast_light_radius,
light_mod_value,
):
"""
* draw circles to expose explored map areas on fog surface
* clear light surface to black
* draw visible light on light surface:
- ordered from furthest to closest...
- calculate color value
- draw circle to expose/brighten map
"""
explored_color = explored_value, explored_value, explored_value
white = 255, 255, 255
circle = pygame.draw.circle
fog_surface.lock()
light_surface.lock()
# draw explored cells
for point, size in new_points:
circle(fog_surface, white, point, size)
# make list of visible cells for sorting
draw_list = list()
for point in visible:
draw_list.append((distance[point], point))
draw_list.sort(reverse=True)
# draw cells in the light radius
# avoid too many calculations, also enable color banding
cache = dict()
light_surface.fill(explored_color)
value_range = 255 - explored_value
for d, point in draw_list:
token = d // light_mod_value
try:
color = cache[token]
except KeyError:
value0 = 1 - (d / light_radius2)
value = int(out_quad(value0) * value_range) + explored_value
value = min(255, max(0, value))
color = value, value, value
cache[token] = color
circle(light_surface, color, point, cast_light_radius)
fog_surface.unlock()
light_surface.unlock()
light_surface.blit(fog_surface, (0, 0), special_flags=pygame.BLEND_MULT)
def light(fog_tiles, point):
try:
return fog_tiles[point[1]][point[0]]
except IndexError:
return False
def out_quad(progress):
return -1.0 * progress * (progress - 2.0)
if __name__ == "__main__":
wclib.run(mainloop())
|
import os
from ImagePreprocessing import ReadExpoTimes, ReadTrainingData
from ComputeTrainingExamples import ComputeTrainingExamples
import Constants
import numpy as np
from joblib import Parallel, delayed
import multiprocessing
from contextlib import closing
import h5py
from ModelUtilities import list_all_files_sorted
from ModelUtilities import list_all_folders
from tqdm import tqdm
scene_root = Constants.scenes_root
training_scene_directory = os.path.join(scene_root, Constants.training_directory, "")
test_scene_directory = os.path.join(scene_root, Constants.test_directory, "")
training_data_root = Constants.training_data_root
train_set_training_data_directory = os.path.join(training_data_root, Constants.training_directory, "")
test_set_training_data_directory = os.path.join(training_data_root, Constants.test_directory, "")
def prepare_training_data(params):
scene = params[0]
is_training_set = params[1]
if is_training_set:
training_data_directory = train_set_training_data_directory
scene_directory = training_scene_directory
scene_type = "training"
else:
training_data_directory = test_set_training_data_directory
scene_directory = test_scene_directory
scene_type = "test"
os.makedirs(training_data_directory, exist_ok=True)
#Read Expo times in scene
expoTimes = ReadExpoTimes(os.path.join(scene_directory, scene, 'exposure.txt'))
#Read Image in scene
fileNames = list_all_files_sorted(os.path.join(scene_directory, scene), '.tif')
imgs, label = ReadTrainingData(fileNames)
#ComputeTraining examples in scene
computed, computedLabel = ComputeTrainingExamples(imgs, expoTimes, label, is_training_set)
hf = h5py.File(os.path.join(training_data_directory, scene+".data"), 'w')
computed = np.rollaxis(np.rollaxis(computed, 3), 3, 1)
computedLabel = np.rollaxis(np.rollaxis(computedLabel, 3), 3, 1)
print("WRITING")
hf.create_dataset("inputs", data=computed)
hf.create_dataset("labels", data=computedLabel)
hf.close()
def distribute_training_data_preparation():
training_scenes = list_all_folders(training_scene_directory)
already_processed_training_scenes = list_all_files_sorted(train_set_training_data_directory)
already_processed_training_scenes = [os.path.basename(data).split('.')[0] for data in already_processed_training_scenes]
training_scenes = set(training_scenes) - set(already_processed_training_scenes)
test_scenes = list_all_folders(test_scene_directory)
already_processed_test_scenes = list_all_files_sorted(test_set_training_data_directory)
already_processed_test_scenes = [os.path.basename(data).split('.')[0] for data in already_processed_test_scenes]
test_scenes = set(test_scenes) - set(already_processed_test_scenes)
training_parameters = np.ones(len(training_scenes))
test_parameters = np.zeros(len(test_scenes))
if len(training_scenes) > 0 and len(test_scenes) > 0:
scenes = np.concatenate((training_scenes,test_scenes))
elif len(training_scenes) > 0:
scenes = training_scenes
else:
scenes = test_scenes
is_training_set_params = np.concatenate((training_parameters,test_parameters))
parameters = zip(scenes, is_training_set_params)
#Parallel(n_jobs=-1)(delayed(prepare_training_data)(scene, is_training_set) for (scene, is_training_set) in parameters)
with closing(multiprocessing.pool.Pool(processes=1, maxtasksperchild=1)) as pool:
with tqdm(total=len(scenes)) as pbar:
for i, _ in tqdm(enumerate(pool.imap_unordered(prepare_training_data, parameters))):
pbar.update()
if __name__ == "__main__":
distribute_training_data_preparation()
|
import os
import FileUtils
#fileName = "datafile.txt" #input ("Enter file name ")
fileName = FileUtils.selectOpenFile ("Select data file to open", "Read File Example")
if fileName == None: #user pressed cancel/X
os._exit(0)
#check if the file actually exists before trying to open the file
if os.path.isfile (fileName) == False:
print ("Data file does not exist")
os._exit(0) #ends the program
inFile = open (fileName, "r") #we will read, or receive info from the data file
numOfStudents = 0
total = 0
for line in inFile:
line = line.rstrip() #remove the linefeed that is part of any data returned from the file
numOfStudents = numOfStudents + 1
total = total + int(line)
print (line)
inFile.close()
if numOfStudents != 0:
average = total / numOfStudents
print ("Total of the values: ", total)
print ("Averge of the values: ", format (average, "1.2f"))
else:
print ("Nothing there")
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from blog.models import Cursos, RedesSociales, Proyecto, Menu, Contacto, Categoria
from blog.forms import ContactoForm
from datetime import datetime
# Create your views here.
def index(request):
cursos = Cursos.objects.order_by('id')
redes = RedesSociales.objects.all()
proyectos = Proyecto.objects.all().order_by('-fecha').filter(estado='Publicado')[:5]
menu = Menu.objects.order_by('id')
contenido = {'cursos': cursos, 'Github': redes[0].link, 'Linkedin': redes[1].link, 'proyectos': proyectos, 'menu': menu}
return render(request, 'blog/index.html', contenido)
def proyectos(request):
redes = RedesSociales.objects.all()
proyectos = Proyecto.objects.all().order_by('-fecha').filter(estado='Publicado')
menu = Menu.objects.order_by('id')
contenido = {'proyectos': proyectos, 'Github': redes[0].link, 'Linkedin': redes[1].link, 'menu': menu}
return render(request, 'blog/proyectos_index.html', contenido)
def proyecto (request, url):
redes = RedesSociales.objects.all()
proyectos = Proyecto.objects.filter(url=url).first()
menu = Menu.objects.order_by('id')
contenido = {'proyecto': proyectos, 'Github': redes[0].link, 'Linkedin': redes[1].link, 'menu': menu}
return render(request, 'blog/publicacion.html', contenido)
def categorias(request):
cate = Categoria.objects.all()
menu = Menu.objects.order_by('id')
redes = RedesSociales.objects.all()
contenido = {'categorias': cate, 'Github': redes[0].link, 'Linkedin': redes[1].link, 'menu': menu}
return render(request, 'blog/categorias.html', contenido)
def contacto (request):
redes = RedesSociales.objects.all()
menu = Menu.objects.order_by('id')
if request.method == 'POST':
form = ContactoForm(request.POST)
if form.is_valid():
nombre = form.cleaned_data['nombre']
email = form.cleaned_data['email']
mensaje = form.cleaned_data['mensaje']
fecha = datetime.now()
nuevo_mensaje = Contacto(nombre=nombre, email=email, mensaje=mensaje, fecha=fecha)
nuevo_mensaje.save()
return redirect('inicio')
else:
form = ContactoForm()
return render(request, 'blog/formulario.html', {'form': form, 'Github': redes[0].link, 'Linkedin': redes[1].link, 'menu': menu}) |
def get_profile_details_event_list():
return [{'value': 'active_suspend_reason', 'text': 'Active/Suspend Reason'},
{'value': 'acquiring_sales_executive_id', 'text': 'Agent - Acquiring Sales Executive'},
{'value': 'bank_branch_area', 'text': 'Agent - Bank Branch Area'},
{'value': 'bank_branch_city', 'text': 'Agent - Bank Branch City'},
{'value': 'bank_name', 'text': 'Agent - Bank Name'},
{'value': 'contract_day_of_period_reconciliation', 'text': 'Agent - Contract Day/ Period of Reconciliation'},
{'value': 'contract_expired_date', 'text': 'Agent - Contract Expiry Date'},
{'value': 'contract_issue_date', 'text': 'Agent - Contract Issue Date'},
{'value': 'contract_number', 'text': 'Agent - Contract Number'},
{'value': 'contract_sign_date', 'text': 'Agent - Contract Sign Date'},
{'value': 'contract_type', 'text': 'Agent - Contract Type'},
{'value': 'contract_extended_type', 'text': 'Agent - Extended Type'},
{'value': 'grand_parent_id', 'text': 'Agent - Grand Parent ID'},
{'value': 'kyc_level', 'text': 'Agent - KYC Level'},
{'value': 'national', 'text': 'Agent - National'},
{'value': 'parent_id', 'text': 'Agent - Parent ID'},
{'value': 'primary_expire_date', 'text': 'Agent - Primary Expire Date'},
{'value': 'primary_identify_id', 'text': 'Agent - Primary Identity ID'},
{'value': 'primary_identify_type', 'text': 'Agent - Primary Identity Type'},
{'value': 'primary_issue_date', 'text': 'Agent - Primary Issue Date'},
{'value': 'primary_place_of_issue', 'text': 'Agent - Primary Place of Issue'},
{'value': 'secondary_expire_date', 'text': 'Agent - Secondary Expire Date'},
{'value': 'secondary_identify_id', 'text': 'Agent - Secondary Identity ID'},
{'value': 'secondary_identify_type', 'text': 'Agent - Secondary Identity Type'},
{'value': 'secondary_issue_date', 'text': 'Agent - Secondary Issue Date'},
{'value': 'secondary_mobile_number', 'text': 'Agent - Secondary Mobile Number'},
{'value': 'secondary_place_of_issue', 'text': 'Agent - Secondary Place of Issue'},
{'value': 'tertiary_mobile_number', 'text': 'Agent - Tertiary Mobile Number'},
{'value': 'agent_type_id', 'text': 'Agent Type ID'},
{'value': 'current_address', 'text': 'Current Address'},
{'value': 'current_city', 'text': 'Current City'},
{'value': 'current_commune', 'text': 'Current Commune'},
{'value': 'current_country', 'text': 'Current Country'},
{'value': 'current_district', 'text': 'Current District'},
{'value': 'current_landmark', 'text': 'Current Landmark'},
{'value': 'current_latitude', 'text': 'Current Latitude'},
{'value': 'current_longitude', 'text': 'Current Longitude'},
{'value': 'current_province', 'text': 'Current Province'},
{'value': 'beneficiary', 'text': 'Customer - Benificiary'},
{'value': 'citizen_card_date_of_issue', 'text': 'Customer - Citizen Card Date of Issue'},
{'value': 'citizen_card_id', 'text': 'Customer - Citizen Card ID'},
{'value': 'citizen_card_place_of_issue', 'text': 'Customer - Citizen Card Place of Issue'},
{'value': 'device_description', 'text': 'Customer - Device Description'},
{'value': 'device_id', 'text': 'Customer - Device ID'},
{'value': 'middle_name', 'text': 'Customer - Middle Name'},
{'value': 'occupations', 'text': 'Customer - Occupations'},
{'value': 'passport_date_of_issue', 'text': 'Customer - Passport Date of Issue'},
{'value': 'passport_id', 'text': 'Customer - Passport ID'},
{'value': 'passport_place_of_issue', 'text': 'Customer - Passport Place of Issue'},
{'value': 'place_of_birth', 'text': 'Customer - Place of Birth'},
{'value': 'profile_picture_url', 'text': 'Customer - Profile Picture URL'},
{'value': 'social_security_id', 'text': 'Customer - Social Security ID'},
{'value': 'tax_id', 'text': 'Customer - Tax ID'},
{'value': 'date_of_birth', 'text': 'Date of Birth'},
{'value': 'email', 'text': 'Email'},
{'value': 'first_name', 'text': 'First Name'},
{'value': 'gender', 'text': 'Gender'},
{'value': 'kyc_remark', 'text': 'KYC Remark'},
{'value': 'kyc_status', 'text': 'KYC Status'},
{'value': 'kyc_updated_timestamp', 'text': 'KYC Last Updated Timestamp'},
{'value': 'last_name', 'text': 'Last Name'},
{'value': 'nationality', 'text': 'Nationality'},
{'value': 'permanent_address', 'text': 'Permanent Address'},
{'value': 'permanent_city', 'text': 'Permanent City'},
{'value': 'permanent_commune', 'text': 'Permanent Commune'},
{'value': 'permanent_country', 'text': 'Permanent Country'},
{'value': 'permanent_district', 'text': 'Permanent District'},
{'value': 'permanent_landmark', 'text': 'Permanent Landmark'},
{'value': 'permanent_latitude', 'text': 'Permanent Latitude'},
{'value': 'permanent_longitude', 'text': 'Permanent Longitude'},
{'value': 'permanent_province', 'text': 'Permanent Province'},
{'value': 'primary_mobile_number', 'text': 'Primary Mobile Number'},
{'value': 'profile_created_timestamp', 'text': 'Profile Created Timestamp'},
{'value': 'profile_is_deleted', 'text': 'Profile Is Deleted'},
{'value': 'profile_last_updated_timestamp', 'text': 'Profile Last Updated Timestamp'},
{'value': 'unique_reference', 'text': 'Unique Reference'},
{'value': 'user_id', 'text': 'User ID'},
{'value': 'user_is_suspend', 'text': 'User Is suspended'},
{'value': 'with_linked_bank_sof', 'text': 'With Linked Bank SOF'},
{'value': 'with_linked_card_sof', 'text': 'With Linked Card SOF'}] |
import time
from adp_movie import Insertmovie, Existmovie
from adp_staff import Insertstaff, Existstaff
def Selemoviedetail(code, driver):
Isexistmovie = Existmovie(code)
if Isexistmovie == True:
return
d_url = 'https://movie.naver.com/movie/bi/mi/basic.nhn?code={0}'
driver.get(d_url.format(code))
dds = driver.find_elements_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/dl/dd')
_rating = ''
if len(dds) == 3:
_rating = driver.find_element_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/dl/dd[3]/p').text
else :
_rating = driver.find_element_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/dl/dd[4]/p').text
_code = code
_title = driver.find_element_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/h3/a').text
_story = driver.find_element_by_xpath('//*[@id="content"]/div[1]/div[4]/div[1]/div/div[1]').text
spans = driver.find_elements_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/dl/dd[1]/p/span')
_op_dt = ''
if len(spans) == 4:
_op_dt =driver.find_element_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/dl/dd[1]/p/span[4]').text
_genre = driver.find_element_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/dl/dd[1]/p/span[1]').text
_rn_tm = driver.find_element_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/dl/dd[1]/p/span[3]').text
elif len(spans) == 3:
_op_dt = driver.find_element_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/dl/dd[1]/p/span[3]').text
_genre = ''
_rn_tm = driver.find_element_by_xpath('//*[@id="content"]/div[1]/div[2]/div[1]/dl/dd[1]/p/span[2]').text
# print(_code, _title, _story, _genre, _rating, _rn_tm, _op_dt)
time.sleep(5)
Insertmovie(_code, _title, _story, _genre, _rating, _rn_tm, _op_dt)
def Selemovie(driver):
url = 'https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&date=20200803&page={0}'
page = 1
ispage = True
m_code_list = list()
while ispage == True:
driver.get(url.format(page))
table = driver.find_element_by_xpath('//*[@id="old_content"]/table')
trs = table.find_elements_by_xpath('.//tr')
for tr in trs:
tds = tr.find_elements_by_xpath('.//td')
if len(tds) == 0:
continue
elif len(tds) == 1:
continue
href = tds[1].find_element_by_xpath('.//div/a').get_attribute('href')
m_code = href.split('=')[1]
m_code_list.append(m_code)
for code in m_code_list:
Selemoviedetail(code, driver)
ispage = False
page = page + 1
def Selestaff(movie, driver):
d_url = 'https://movie.naver.com/movie/bi/mi/detail.nhn?code={0}'
driver.get(d_url.format(movie.code))
time.sleep(3)
act_more = driver.find_element_by_xpath('//*[@id="actorMore"]').click()
time.sleep(3)
act_list = driver.find_elements_by_xpath('//*[@id="content"]/div[1]/div[4]')
for act in act_list:
href = act.find_element_by_xpath('//*[@id="content"]/div/ul/li/a').get_attribute('href')
_s_code = href.split('=')[1]
_k_name = act.find_element_by_xpath('.//*[@class = "k_name"]').text
_e_name = act.find_element_by_xpath('.//*[@class = "e_name"]').text
try:
_cast_name = act.find_element_by_xpath('.//*[@class = "pe_cmt"]')
except:
_cast_name = '없음'
_birth = ''
_nation = ''
_role_info = act.find_element_by_xpath('.//*[@class = "pe_cmt"]')
_is_director = False
_is_actor = True
Isexist = Existstaff(_s_code)
if Isexist == False:
Insertstaff(_s_code, _k_name, _e_name, _birth, _nation, _is_director, _is_actor)
time.sleep(10) |
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from project.apps.Currency.models import Currency
from rest_framework_api_key.permissions import HasAPIKey
from project.api.v1.serializer import ExchangeRateSerializer
from project.api.tasks import get_exchange_rate
class GetLastExchangeRate(GenericAPIView):
permission_classes = [HasAPIKey]
serializer_class = ExchangeRateSerializer
def get(self, *args, **kwargs):
currency_obj = Currency.objects.all().order_by('-refreshed_date').first()
if currency_obj:
serializer = self.serializer_class(currency_obj, )
return Response(serializer.data)
get_exchange_rate.delay()
return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE)
def post(self, *args, **kwargs):
get_exchange_rate.delay()
return Response(status=status.HTTP_200_OK)
|
'''
34. 「AのB」
2つの名詞が「の」で連結されている名詞句を抽出せよ.
'''
from knock30 import get_morpheme_list
noun_phrases = []
for s in get_morpheme_list("neko.txt.mecab"):
for i in range(len(s) - 2):
if s[i]['pos'] == '名詞' and \
s[i+1]['surface'] == 'の' and \
s[i+2]['pos'] == '名詞':
noun_phrases.append(
s[i]['surface'] +
s[i+1]['surface'] +
s[i+2]['surface']
)
print(noun_phrases[:10])
|
#!/usr/bin/env python
'''
A Zenity GUI on top of the droid functions
'''
import sys,os
try:
import PyZenity as zen
except ImportError:
print 'No PyZenity, do "sudo easy_install pyzenity"'
sys.exit(1)
import droid
def start_server():
'Start the server if needed'
pid,err = droid.server_pid()
if pid:
zen.InfoMessage("Server running on %s" % pid)
return
out,err = droid.cmd('gksudo',[droid.adbcmd,'start-server'])
if not out:
msg = "Failed to start server. Wrong password?"
zen.ErrorMessage(msg)
else:
zen.InfoMessage(out)
return
def install_amazon():
'Try to sideload waiting packages from Amazon app store'
try:
toinst = droid.cached_amazon()
except OSError,err:
zen.ErrorMessage("Can't get amazon package listing, server started?")
return
if not toinst:
zen.InfoMessage('''No Amazon packages to install at this time. Download the package via your phone STOP at the "can't install" message and then re-run this program''')
return
desc = []
for fname in toinst:
out,err = droid.pull_amazon(fname,fname)
assert not err,err
pname,err = droid.package_name(fname)
assert not err,err
desc.append((True,pname,fname))
continue
pkgs = zen.List(('Select','Package','Filename'),
title='Packages to install',
data=desc,select_col=3,boolstyle="checklist")
for apk in pkgs:
out,err = droid.adb("install " + apk)
if err:
zen.ErrorMessage('Installation of "%s" failed. Error is "%s"' % (apk,err))
pkgs = zen.InfoMessage("Installation done")
return
def check_install():
if not os.path.exists(droid.adbcmd):
zen.ErrorMessage('It looks like you have not installed the Android SDK. I checked here: "%s"' % droid.adbcmd)
return False
return True
def main():
pid,err = droid.server_pid()
need_server = not pid
cmds = zen.List(('Select','Description','Function'),
title='What do you want me to do?',
select_col=3,boolstyle="checklist",
data=[(need_server,'Start the ADB server','start_server'),
(True,'Install downloaded Amazon Apps','install_amazon')]
)
for cmd in cmds:
func = eval(cmd)
func()
continue
return
if __name__ == '__main__':
if not check_install():
sys.exit(1)
func = eval(sys.argv[1])
func(*sys.argv[2:])
|
import time
import math
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn import metrics
from tensorflow import keras
from tensorflow.python.keras import layers, backend
from tensorflow.python.keras.initializers import Constant
from tensorflow.python.keras.preprocessing.text import Tokenizer
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
input_dir = '../../data/jigsaw-unintended-bias-in-toxicity-classification/'
output_dir = '../../data/dummy/'
glove_dir = '../../data/glove/'
fasttext_dir = '../../data/fasttext/'
is_submit = not output_dir
identity_columns = ['male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish',
'muslim', 'black', 'white', 'psychiatric_or_mental_illness']
def draw_graph(fig, overall, l, idx):
ax = fig.add_subplot(1, 3, idx)
ax.plot([0, 1], [0, 1], 'k--')
ax.plot(overall[0], overall[1], label='overall')
for t in l:
ax.plot(t[0], t[1], label=t[2])
ax.legend(loc='lower right')
def draw_roc_curve(df, model_name, label):
l_sub, l_bpsn, l_bnsp = [], [], []
for sub in identity_columns:
subgroup = sub + '_b'
subgroup_examples = df[df[subgroup]]
subgroup_negative_examples = df[df[subgroup] & ~df[label]]
non_subgroup_positive_examples = df[~df[subgroup] & df[label]]
bpsn_examples = subgroup_negative_examples.append(non_subgroup_positive_examples)
subgroup_positive_examples = df[df[subgroup] & df[label]]
non_subgroup_negative_examples = df[~df[subgroup] & ~df[label]]
bnsp_examples = subgroup_positive_examples.append(non_subgroup_negative_examples)
sub_fp, sub_tp, _ = metrics.roc_curve(subgroup_examples[label], subgroup_examples[model_name])
bpsn_fp, bpsn_tp, _ = metrics.roc_curve(bpsn_examples[label], bpsn_examples[model_name])
bnsp_fp, bnsp_tp, _ = metrics.roc_curve(bnsp_examples[label], bnsp_examples[model_name])
l_sub.append((sub_fp, sub_tp, subgroup[:4]))
l_bpsn.append((bpsn_fp, bpsn_tp, 'bpsn_' + subgroup[:4]))
l_bnsp.append((bnsp_fp, bnsp_tp, 'bnsp_' + subgroup[:4]))
overall = metrics.roc_curve(df[label], df[model_name])
fig = plt.figure(figsize=(18, 5))
draw_graph(fig, overall, l_sub, 1)
draw_graph(fig, overall, l_bpsn, 2)
draw_graph(fig, overall, l_bnsp, 3)
fig.savefig(output_dir + 'roc' + str(time.time()) + '.png')
def compute_subgroup_auc(df, subgroup, label, model_name):
subgroup_examples = df[df[subgroup]]
return metrics.roc_auc_score(subgroup_examples[label], subgroup_examples[model_name])
def compute_bpsn_auc(df, subgroup, label, model_name):
subgroup_negative_examples = df[df[subgroup] & ~df[label]]
non_subgroup_positive_examples = df[~df[subgroup] & df[label]]
examples = subgroup_negative_examples.append(non_subgroup_positive_examples)
return metrics.roc_auc_score(examples[label], examples[model_name])
def compute_bnsp_auc(df, subgroup, label, model_name):
subgroup_positive_examples = df[df[subgroup] & df[label]]
non_subgroup_negative_examples = df[~df[subgroup] & ~df[label]]
examples = subgroup_positive_examples.append(non_subgroup_negative_examples)
return metrics.roc_auc_score(examples[label], examples[model_name])
def compute_bias_metrics_for_model(dataset, subgroups, model, label_col):
records = []
for sub in subgroups:
subgroup = sub + '_b'
record = {'subgroup': subgroup[:4], 'size': len(dataset[dataset[subgroup]]),
'subgroup_auc': compute_subgroup_auc(dataset, subgroup, label_col, model),
'bpsn_auc': compute_bpsn_auc(dataset, subgroup, label_col, model),
'bnsp_auc': compute_bnsp_auc(dataset, subgroup, label_col, model)}
records.append(record)
return pd.DataFrame(records).sort_values('subgroup_auc')
def calculate_overall_auc(df, model_name, label_col):
true_labels = df[label_col]
predicted_labels = df[model_name]
return metrics.roc_auc_score(true_labels, predicted_labels)
def power_mean(series, p):
total = sum(np.power(series, p))
return np.power(total / len(series), 1 / p)
def get_final_metric(bias_df, overall_auc):
bias_score = np.average([power_mean(bias_df['subgroup_auc'], -5), power_mean(bias_df['bpsn_auc'], -5),
power_mean(bias_df['bnsp_auc'], -5)])
return 0.25 * overall_auc + 0.75 * bias_score
def construct_embedding_matrix(path, word_idx, d, n, v): # d: embedding_dim, n: num_words, v: vocab_size
embeddings_index = {}
with open(path, encoding='utf-8') as f:
for line in f:
values = line.split()
word = values[0]
if len(values) > d+1:
continue
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
embedding_matrix = np.zeros((n, d))
for word, i in word_idx.items():
if i > v:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def add_start_end_token(X, start, end):
X = [[start] + x for x in X]
for x in X:
x.append(end)
return X
def b_cross_entropy(y_true, y_pred):
return y_true * backend.log(y_pred) + (1-y_true) * backend.log(1-y_pred)
def custom_loss(y_true, y_pred):
epsilon = backend.epsilon()
n = len(identity_columns)
y_pred = backend.clip(y_pred, epsilon, 1-epsilon)
bg = backend.equal(backend.sum(y_true[:, 1:], axis=1), 0)
pos = backend.greater_equal(y_true[:, 0], 0.5)
neg = ~pos
bp = bg & pos
bn = bg & neg
p = 2
loss_base = -b_cross_entropy(y_true[:, :1], y_pred)
loss_sub, loss_bpsn, loss_bnsp = 0, 0, 0
for i in range(n):
sub = backend.equal(y_true[:, i+1], 1)
bpsn = bp | sub & neg
bnsp = bn | sub & pos
sub = backend.cast(backend.expand_dims(sub, axis=1), 'float32')
bpsn = backend.cast(backend.expand_dims(bpsn, axis=1), 'float32')
bnsp = backend.cast(backend.expand_dims(bnsp, axis=1), 'float32')
loss_sub += backend.pow(backend.sum(loss_base * sub) / (epsilon + backend.sum(sub)), p)
loss_bpsn += backend.pow(backend.sum(loss_base * bpsn) / (epsilon + backend.sum(bpsn)), p)
loss_bnsp += backend.pow(backend.sum(loss_base * bnsp) / (epsilon + backend.sum(bnsp)), p)
loss_sub = backend.pow(loss_sub / n, 1/p)
loss_bpsn = backend.pow(loss_bpsn / n, 1/p)
loss_bnsp = backend.pow(loss_bnsp / n, 1/p)
return 0.25 * loss_base + 0.25 * (loss_sub + loss_bpsn + loss_bnsp)
pd.set_option('mode.chained_assignment', None)
nrows = None if is_submit else 200000
nrows_test = None if is_submit else 100
data = pd.read_csv(input_dir + 'train.csv')
data = data.sample(frac=1)
data = data[:nrows]
for col in identity_columns:
data.loc[:, col + '_b'] = data[col] >= 0.5
training_ratio = 1 if is_submit else 0.8
ntrains = 2 ** math.floor(math.log2(data.shape[0] * training_ratio))
train_data = data[:ntrains]
val_data = data[ntrains:]
test_data = pd.read_csv(input_dir + 'test.csv', nrows=nrows_test)
target_name = 'target_b'
train_data.loc[:, target_name] = train_data['target'] >= 0.5
val_data.loc[:, target_name] = val_data['target'] >= 0.5
# tf.enable_eager_execution()
vocab_size = 20000
batch_size = 512
epoch = 10
CHARS_TO_REMOVE = '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n“”’\'∞θ÷α•à−β∅³π‘₹´°£€\×™√²—'
tokenizer = Tokenizer(num_words=vocab_size, oov_token='<UNK>', filters=CHARS_TO_REMOVE)
tokenizer.fit_on_texts(train_data['comment_text'])
num_words = min(vocab_size, len(tokenizer.word_index))
start_token, end_token = num_words, num_words+1
train_data['comment_text'] = add_start_end_token(tokenizer.texts_to_sequences(train_data['comment_text']), start_token, end_token)
val_data['comment_text'] = add_start_end_token(tokenizer.texts_to_sequences(val_data['comment_text']), start_token, end_token)
test_data['comment_text'] = add_start_end_token(tokenizer.texts_to_sequences(test_data['comment_text']), start_token, end_token)
X_val = pad_sequences(val_data['comment_text'], maxlen=max(map(lambda x: len(x), val_data['comment_text'])))
X_test = pad_sequences(test_data['comment_text'], maxlen=max(map(lambda x: len(x), test_data['comment_text'])))
num_words += 3 # start token, end token, padding
embedding_dim = 300
glove_path = glove_dir + 'glove.840B.' + str(embedding_dim) + 'd.txt'
glove_matrix = construct_embedding_matrix(glove_path, tokenizer.word_index, embedding_dim, num_words, vocab_size)
fasttext_path = fasttext_dir + 'crawl-' + str(embedding_dim) + 'd-2M.vec'
fasttext_matrix = construct_embedding_matrix(fasttext_path, tokenizer.word_index, embedding_dim, num_words, vocab_size)
embedding_matrix = np.concatenate([glove_matrix, fasttext_matrix], axis=-1)
num_of_models = 2
dropout_rate = 0.2
id_weights = [0.84, 0.82, 2.67, 0.64, 1.31, 1.86, 2.71, 2.51, 1.12]
icb = [s+'_b' for s in identity_columns]
models = []
for model_num in range(num_of_models):
input_layer = keras.Input(shape=(None,))
output_layer = layers.Embedding(num_words, embedding_dim*2, embeddings_initializer=Constant(embedding_matrix), trainable=False)(input_layer)
output_layer = layers.SpatialDropout1D(dropout_rate)(output_layer)
output_layer = layers.Bidirectional(layers.CuDNNLSTM(128, return_sequences=True))(output_layer)
output_layer = layers.Bidirectional(layers.CuDNNLSTM(128, return_sequences=True))(output_layer)
output_layer = layers.concatenate([layers.GlobalMaxPooling1D()(output_layer), layers.GlobalAveragePooling1D()(output_layer)])
output_layer = layers.add([output_layer, layers.Dense(512, activation='relu')(output_layer)])
output_layer = layers.Dropout(dropout_rate)(output_layer)
output_layer = layers.add([output_layer, layers.Dense(512, activation='relu')(output_layer)])
output_layer = layers.Dropout(dropout_rate)(output_layer)
output_layer = layers.Dense(1, activation='sigmoid')(output_layer)
model = keras.Model(inputs=input_layer, outputs=output_layer)
model.compile(optimizer=tf.train.AdamOptimizer(), loss=custom_loss)
models.append(model)
weights = []
preds = []
preds_test = []
for i in range(epoch):
for model_num in range(len(models)):
t = time.time()
weights.append(2 ** i)
model = models[model_num]
agg_loss = 0
train_data = train_data.sample(frac=1)
Y_train_aux = np.stack([train_data[f].fillna(0).values for f in icb], axis=1)
print('epoch: %d' % i, 'with model %d' % model_num)
for j in range(0, ntrains, batch_size):
batch_data = train_data[j:j + batch_size]
X_batch = batch_data['comment_text']
Y_batch = batch_data['target']
Y_batch_aux = Y_train_aux[j:j + batch_size]
X_batch = pad_sequences(X_batch, maxlen=max(map(lambda x: len(x), X_batch)))
Y_concat = np.concatenate([np.expand_dims(Y_batch, axis=1), Y_batch_aux], axis=1)
sample_weight = np.ones(len(batch_data), dtype=np.float32)
for k in range(len(identity_columns)):
identity = identity_columns[k]
mask = ~batch_data[target_name] & batch_data[identity]
sample_weight *= mask * id_weights[k] + ~mask
agg_loss += model.train_on_batch(X_batch, Y_concat, sample_weight=sample_weight.values)
print('Loss: %.4f, Time: %.4f' % (agg_loss, time.time() - t))
if not is_submit:
if nrows is not None or (nrows is None and i == epoch - 1):
preds.append(model.predict(X_val))
preds_test.append(model.predict(X_test))
if preds:
pred = np.average(preds, weights=weights, axis=0)
model_name = 'my_model'
val_data.loc[:, model_name] = pred
bias_metrics_df_val = compute_bias_metrics_for_model(val_data, identity_columns, model_name, target_name)
overall_auc_val = calculate_overall_auc(val_data, model_name, target_name)
score_val = get_final_metric(bias_metrics_df_val, overall_auc_val)
print(bias_metrics_df_val)
print('Score: %.4f, Overall AUC: %.4f (Validation)' % (score_val, overall_auc_val))
if i == epoch - 1:
draw_roc_curve(val_data, model_name, target_name)
with open(output_dir + 'submission.csv', 'w') as f:
f.write('id,prediction\n')
pred = np.average(preds_test, weights=weights, axis=0)
for i in range(len(pred)):
f.write(str(test_data['id'].loc[i]) + ',' + str(pred[i][0]) + '\n')
|
import logging, sys, signal, time, json, os, pytweening, threading
from collections import deque
from lib.vhWindows import vhWindows
from lib.vhSockets import vhSockets
from lib.vhUI import vhUI
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
#logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
class App:
colorBuffer = bytes([0])
# This moves
cacheHP = 0
conf = vhWindows()
sock = vhSockets()
ui = vhUI()
# Last loop tick
tickTime = time.time()
# Tween value being modified
tweenVal = 0
# Tween start value
tweenStart = 0
# Time tween was started
tweenStarted = 0
# Local duration in case we go over max
tweenDuration = 1
TWEEN_DURATION = 1
FRAMERATE = 4
# Time to run a save
saveScheduled = 0
def __init__(self):
signal.signal(signal.SIGINT, self.sigint_handler)
self.conf.onWowStatus = self.onWowRunning
self.conf.init()
self.ui.setDeviceId(self.conf.deviceID)
self.ui.setDeviceServer(self.conf.server)
self.ui.setIntensity(self.conf.maxIntensity)
self.ui.setRatio(self.conf.hpRatio)
self.ui.setMinIntensity(self.conf.minIntensity)
self.ui.setCursorCoordinates(self.conf.cursor["x"], self.conf.cursor["y"])
self.ui.onEvt = self.uiEvent
self.sock.onConnection = self.onConnection
self.sock.init(self.conf)
thrd = threading.Thread(target=self.loop)
thrd.daemon = True
thrd.start()
#start UI
self.ui.begin()
def uiEvent(self, t, data):
c = self.conf
if t == "settings":
c.deviceID = data[0]
c.server = data[1]
c.saveConfig()
self.sock.resetDevice()
elif t == "click":
c.cursor["x"] = data[0]
c.cursor["y"] = data[1]
c.saveConfig()
self.ui.setCursorCoordinates(self.conf.cursor["x"], self.conf.cursor["y"])
elif t == "intensity":
c.maxIntensity = data[0]
c.minIntensity = min(c.maxIntensity, c.minIntensity)
self.ui.setMinIntensity(c.minIntensity)
self.scheduleSave()
elif t == "minintensity":
c.minIntensity = data[0]
c.maxIntensity = max(c.maxIntensity, c.minIntensity)
self.ui.setIntensity(c.maxIntensity)
self.scheduleSave()
elif t == "ratio":
c.hpRatio = data[0]
self.scheduleSave()
elif t == "weakaura":
self.conf.copyWeakaura()
def onWowRunning(self, running):
self.ui.setWowRunning(running)
if not running:
self.sock.resetVib()
def onConnection(self, connected):
self.ui.setConnectionStatus(connected)
def scheduleSave(self):
self.saveScheduled = time.time()+0.2
def startTween(self, amount):
# Power at start of tween
self.tweenStart = self.tweenVal+amount
# Time at start of tween
self.tweenStarted = time.time()
# Power at tween start needs to be at least 15%
# Duration should be total intensity plus 0.2, but max 4
self.tweenDuration = min(0.2+self.tweenStart*2, 4)
# Intensity slides between min and max
intensity = min(max(
self.tweenStart*
(self.conf.maxIntensity-self.conf.minIntensity)+
self.conf.minIntensity, 0), self.conf.maxIntensity)
#print(amount, intensity, self.tweenDuration)
self.sock.sendProgram(intensity, self.tweenDuration)
# Sigint handling
def sigint_handler(self, signal, frame):
print ('Interrupted')
os._exit(1)
# Threading
def createThread(func, autostart = True):
thrd = threading.Thread(target=func)
thrd.daemon = True
if autostart:
thrd.start()
return thrd
def loop(self):
while True:
t = time.time()
passed = t-self.tickTime
self.tickTime = t
conf = self.conf
conf.processScan() # See if WoW is running or not
if self.saveScheduled:
self.saveScheduled = 0
self.conf.saveConfig()
if self.sock.connected and self.conf.wowPid:
color = conf.updatePixelColor()
if conf.g == 51:
index = 0
hpp = conf.r/255
if hpp < self.cacheHP:
self.startTween((self.cacheHP-hpp)*self.conf.hpRatio)
self.cacheHP = hpp
if self.tweenStarted:
tweenPerc = 1-(t-self.tweenStarted)/self.tweenDuration;
if tweenPerc < 0:
tweenPerc = 0
self.tweenStarted = 0
elif tweenPerc > 1:
tweenPerc = 1
self.tweenVal = pytweening.linear(tweenPerc)*self.tweenStart
if not self.conf.wowPid:
time.sleep(1)
else:
after = time.time()
logicTime = 1/self.FRAMERATE-(after-t)
if logicTime > 0:
time.sleep(logicTime)
#Begin
App()
|
import pyqrcode
from pyzbar.pyzbar import decode
from PIL import Image
from tkinter.filedialog import *
while True:
choice = int(input("Enter 1 to generate QR code and 2 to read QR code: "))
if choice == 1:
text = input("Enter the text you want to convert into QR code: ")
qr = pyqrcode.create(text)
qr.png("myCode.png", scale=8)
elif choice == 2:
# img = input("Enter the name of the image that contains the QR code, please enter the correct name in case sensitive: ")
img = askopenfilename()
d = decode(Image.open(f"{img}"))
print(d[0].data.decode("ascii"))
else:
print("You entered something else.\nTry Again... ")
loop_out = input("Enter q to quit this loop: ")
if loop_out == 'q':
break |
from typing import List
from typing import Optional
import rubik
############################################################################
#Invarient Documentation:
#
#Our invarient is that the list frontier is full of elements we have yet to
# visit, but are near points that we have visited so long as it's empty, we
# have not visited all nodes. We know that this relates to the depth as, if
# we have not yet found the solution, we know the problem is longer than the
# depth of the current point we are visiting (point curr). My program uses
# depth to mean something slightly different in order to cut down on math
# operations
#
#On Initialization of the loop, this holds true as we have not visited any
# nodes so any nodes that are in frontier are unvisited and the starting
# node is the only one queued to be visited due to it's depth (0)
#
#During Maintenence of the loop, this is provably true as the way we visit
# the node is by popping it out of the frontier list, later during the
# maintenence we add any nodes that we can "see" from our curr nude to
# frontier which cues them to be visited, though the problem seems to grow,
# we are actually shrinking the problem as we stop looking at irrelevant
# information as we know that the solution can not be shorter than the
# current depth that we are at
#
#During termination of the loop, we have found the shortest possible path
# and the frontier list is now filled with nodes that we have not visited
# and are close to the solved state, we have no need to visit these nodes
# so we never do as we break out of the loop. We will have our answer as we
# either have found the shortest answer at our current depth or frontier is
# emoty and we have completely traversed the tree and found no possible
# solution on any existing depth
#
############################################################################
def shortest_path(
start: rubik.Position,
end: rubik.Position,
) -> Optional[List[rubik.Permutation]]:
"""
Using 2-way BFS, finds the shortest path from start to end.
Returns a list of Permutations representing that shortest path.
If there is no path to be found, return None instead of a list.
You can use the rubik.quarter_twists move 6-tuple.
Each move can be applied using rubik.perm_apply.
"""
#initialize a frontier to store nodes that still need to be explored
frontier = [start]
#make a dictionary saving the stored points and pairing them with the move that got us to it
moves = {start: None}
#initialize some variables for use later in order to presentt the answer
solved = False
path = []
depth = 0
#while fronier has elements (starts with "start") and we haven't reached an aswer already, depth is just to control runtime
while frontier and not solved and depth <= 1000000:
#grab the first element remaining in the frontier
curr = frontier.pop(0)
depth += 1
#if we find our end point, we're done, mark as solved and break out of the loop
if(curr == end):
solved = True
break
#check the 6 possible moves from the current position
for i in range(6):
temp = rubik.perm_apply(rubik.quarter_twists[i], curr)
#check if we've seen this cube state already, if not store reverse of move
if temp not in moves:
frontier.append(temp)
if(i%2 == 0):
moves[temp] = i+1
else:
moves[temp] = i-1
#check if we have a solved cube
if solved:
#im is the "important" point that we are working backwards from, i is the move
im = end
i = moves[im]
#while we are not at the start
while i != None:
#save move and update i and im
temp = 0
if(i%2 == 0):
temp = i+1
else:
temp = i-1
path.append(rubik.quarter_twists[temp])
im = rubik.perm_apply(rubik.quarter_twists[i], im)
i = moves[im]
#reverse since we were appending the earlier moves last
path.reverse()
return path
else:
#we were given an unsolvable cube
return None
# raise NotImplementedError
|
import unittest
import zserio
from testutils import getZserioApi
class StructureArrayParamTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "functions.zs").structure_array_param
cls.CHILD_BIT_SIZE = 19
cls.CHILDREN = [cls.api.ChildStructure.fromFields(cls.CHILD_BIT_SIZE, 0xAABB),
cls.api.ChildStructure.fromFields(cls.CHILD_BIT_SIZE, 0xCCDD)]
cls.NUM_CHILDREN = len(cls.CHILDREN)
def testParentStructure(self):
parentStructure = self._createParentStructure()
self.assertEqual(self.CHILD_BIT_SIZE, parentStructure.funcGetChildBitSize())
writer = zserio.BitStreamWriter()
parentStructure.write(writer)
expectedWriter = zserio.BitStreamWriter()
self._writeParentStructureToStream(expectedWriter)
self.assertTrue(expectedWriter.getByteArray() == writer.getByteArray())
reader = zserio.BitStreamReader(writer.getByteArray())
readParentStructure = self.api.ParentStructure.fromReader(reader)
self.assertEqual(parentStructure, readParentStructure)
def _writeParentStructureToStream(self, writer):
writer.writeBits(self.NUM_CHILDREN, 8)
for childStructure in self.CHILDREN:
writer.writeBits(childStructure.getValue(), childStructure.getBitSize())
def _createParentStructure(self):
return self.api.ParentStructure.fromFields(self.NUM_CHILDREN, self.CHILDREN)
|
from flask_sqlalchemy import SQLAlchemy
from flask_seeder import generator
db = SQLAlchemy()
def seed_database(User):
for index in range(250):
db.session.add(
User(name=generate_name())
)
db.session.commit()
def generate_name():
return f"{generator.Name().generate()} {generator.Name().generate()}"
|
from gui import client_app
import winsound
def main():
root = client_app.Root(None, client_app.Game())
root.title("Dungeon Hero")
myapp = client_app.App(root)
myapp.mainloop()
if __name__ == "__main__":
main() |
#-*- encoding: utf-8 -*-
from django.test import TestCase
from zhidewen.tests.models.base import ModelTestCase
from zhidewen.models import Answer
from django.utils import timezone
class TestAnswer(ModelTestCase):
def setUp(self):
self.user = self.create_user('test')
self.q = self.create_question('Foo', 'Bar')
def test_answer_question(self):
answer = Answer.objects.answer_question(self.user, self.q, 'Content')
self.assertEqual(Answer.objects.count(), 1)
self.assertEqual(list(self.q.answers.all()), [answer])
def test_ranking_weight(self):
answer = self.answer_question('Content', up_count=5, down_count=2)
self.assertEqual(answer.ranking_weight, 3)
answer.up_count += 2
answer.save()
self.assertEqual(answer.ranking_weight, 5)
def test_update(self):
answer = self.answer_question('Content')
edit_user = self.create_user('admin')
last_updated_at = answer.last_updated_at
answer.update(edit_user, content='Content')
new_answer = Answer.objects.get(pk=answer.id)
self.assertTrue(new_answer.last_updated_at > last_updated_at)
self.assertEqual(edit_user, new_answer.last_updated_by)
last_updated_at = new_answer.last_updated_at
new_answer.title = 'FOO'
new_answer.save()
self.assertEqual(new_answer.last_updated_at, last_updated_at)
class TestListAnswer(ModelTestCase):
def setUp(self):
self.user = self.create_user('test')
self.q = self.create_question('Foo', 'Bar')
self.a1 = self.answer_question('A1', down_count=2)
self.a2 = self.answer_question('A2', up_count=5)
self.a3 = self.answer_question('A3')
def test_best_list(self):
self.assertEqual(list(self.q.answers.best()), [self.a2, self.a3, self.a1])
def test_oldest_list(self):
self.assertEqual(list(self.q.answers.oldest()), [self.a1, self.a2, self.a3])
def test_existed_manager_and_soft_delete(self):
self.assertEqual(list(self.q.answers.oldest()), [self.a1, self.a2, self.a3])
self.a2.soft_delete()
self.assertEqual(list(self.q.answers.existed().oldest()), [self.a1, self.a3])
self.assertEqual(list(self.q.answers.oldest()), [self.a1, self.a2, self.a3])
class TestAnswerCount(ModelTestCase):
def setUp(self):
self.user = self.create_user('test')
self.q = self.create_question('Foo', 'Bar')
self.answer = self.answer_question('A')
self.assertEqual(1, self.q.answer_count)
def test_answer_count(self):
self.answer.soft_delete()
self.assertEqual(0, self.q.answer_count)
def test_answer_count_hard_delete(self):
self.answer.delete()
self.assertEqual(0, self.q.answer_count)
def xtest_answer_count_query_set_delete(self):
self.q.answers.delete()
self.assertEqual(0, self.q.answer_count)
def test_answer_count_soft_and_hard_delete(self):
self.answer.soft_delete()
self.assertEqual(0, self.q.answer_count)
self.answer.delete()
self.assertEqual(0, self.q.answer_count)
class TestRefreshQuestion(ModelTestCase):
def test(self):
self.user = self.create_user('test')
self.q = self.create_question('Foo', 'Bar')
last_refreshed_at = self.q.last_refreshed_at
self.answer_question('A')
self.assertTrue(self.q.last_refreshed_at > last_refreshed_at)
|
from __future__ import absolute_import
import os
from test.syncbase import TestPithosSyncBase
import pithossync
class TestClone(TestPithosSyncBase):
# TODO: assert that remote folder does not exists causes an error during clone
# TODO: assert that local permission denied during clone fails correctly
def test_empty_clone(self):
"""Check if cloning an empty folder works."""
self.local.make()
self.local.write_file('dummy', '')
self.assertRaises(pithossync.DirectoryNotEmptyError,
self.syncer.clone,
self.local.path,
self.remote.path)
os.unlink(os.path.join(self.local.path, 'dummy'))
self.syncer.clone(self.local.path, self.remote.path)
# make sure the directory is still empty after cloning an empty
# server-side directory
os.mkdir(self.workspace.path)
self.assertTreesEqual(self.local.path, self.workspace.path)
# make sure the server-side directory is not affected
# by the clone operation
self.assertTrue(self.remote.folder_empty_but_lock(self.remote.path))
# TODO: un-init the target dir, then test to assert cloning fails
def test_clone_one_text(self):
"""Check if cloning a folder containing a single text file works.
Create one text test file on the server and make sure it's
downloaded by the client during sync.
"""
os.mkdir(self.workspace.path)
self.workspace.write_file('one.txt', 'Hello, world!\n')
self.workspace.upload('one.txt')
os.mkdir(self.local.path)
self.syncer.clone(self.local.path, self.remote.path)
self.assertTreesEqual(self.local.path, self.workspace.path)
def test_clone_one_bin(self):
"""Check if cloning a folder with a single binary file works."""
self.workspace.create_binary('test.bin', 8)
self.workspace.upload('test.bin')
os.mkdir(self.local.path)
self.syncer.clone(self.local.path, self.remote.path)
self.assertTreesEqual(self.local.path, self.workspace.path)
# def test_clone_one_big(self):
# """Check if cloning a folder with a 100MB binary file works."""
#
# self.workspace.create_binary('test.bin', 100 * 1024 * 1024)
# self.workspace.upload('test.bin')
#
# os.mkdir(self.local.path)
# self.syncer.clone(self.local.path, self.remote.path)
# self.assertTreesEqual(self.local.path, self.workspace.path)
#
def test_clone_tree(self):
"""Create a tree of files and directories and check if it clones."""
tree = ['red.file', 'green.file', 'blue.file',
'foo/',
'foo/red.file', 'foo/green.file', 'foo/blue.file',
'bar/',
'bar/quux/',
'bar/quux/koko.txt',
'bar/quux/lala.txt',
'bar/quux/liruliru.txt']
# create the tree in local_workspace
self.workspace.create_tree(tree)
# upload the tree to the server manually by directly
# utilizing the pithos client
for file in tree:
if file[-1] == '/':
self.remote.mkdir(file[:-1])
else:
self.workspace.upload(os.path.join(*file.split('/')))
# make sure cloning using the pithossync library works correctly
os.mkdir(self.local.path)
self.syncer.clone(self.local.path, self.remote.path)
self.assertTreesEqual(self.local.path, self.workspace.path)
|
# Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def test_pipeline_downgrade(sdc_executor):
"""Ensure that when user tries to downgrade pipeline, we issue a proper error message."""
builder = sdc_executor.get_pipeline_builder()
generator = builder.add_stage(label='Dev Data Generator')
trash = builder.add_stage(label='Trash')
generator >> trash
pipeline = builder.build()
# We manually alter the pipeline version to some really high number
# TLKT-561: PipelineInfo doesn't seem to be exposed in the APIs
pipeline._data['pipelineConfig']['info']['sdcVersion'] = '99.99.99'
sdc_executor.add_pipeline(pipeline)
with pytest.raises(Exception) as e:
sdc_executor.validate_pipeline(pipeline)
assert 'VALIDATION_0096' in e.value.issues
|
#!/usr/bin/env python
# -*- coding: iso-8859-2 -*-
c = \
"""
PyIconv.py - Prosty 'front-end' do programu iconv
Copyright (C) 2003 Rafał 'jjhop' Kotusiewicz
"Rafał Kotusiewicz" <jjhop@randal.qp.pl>
http://randal.qp.pl/_projects/PyIconv/index.php
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
$HOME/.pyiconv
> icon_path=/usr/bin/iconv
> from_enc=ISO_8859-2
> to_enc=WINDOWS-1250
"""
import Tkinter
import tkMessageBox
import tkFileDialog
import sys
import os
import os.path
import string
class PyIconv:
version = "v.02.4"
def __init__(self, config=None):
self.config = config
print c
# for i in config.get_config_map().keys():
# print i , config[i]
self.root = Tkinter.Tk()
self.from_enc = Tkinter.StringVar(self.root)
self.from_enc.set("WINDOWS-1250")
self.input_file_name = Tkinter.StringVar(self.root)
self.output_file_name = Tkinter.StringVar(self.root)
self.to_enc = Tkinter.StringVar(self.root)
self.to_enc.set("ISO_8859-2")
self.menu = Tkinter.Menu(self.root)
self.filem = Tkinter.Menu(self.menu)
self.filem.config(tearoff=0)
#self.filem.add_separator()
self.filem.add_command(label="Exit", command=self.__exit)
self.optionsm = Tkinter.Menu(self.menu)
self.optionsm.config(tearoff=0)
#self.optionsm.add_command(label="Options...", command=self.__options_dialog)
self.optionsm.add_command(label="Options...", command=self.von)
self.helpm = Tkinter.Menu(self.menu)
self.helpm.config(tearoff=0)
self.helpm.add_command(label="PyIconv Home Page", command=self.__open_home_page)
self.helpm.add_command(label="About PyIconv", command=self.__about_pyiconv)
self.helpm.add_separator()
self.helpm.add_command(label="About Author", command=self.__about_author)
self.menu.add_cascade(label="File", menu=self.filem)
self.menu.add_cascade(label="Options", menu=self.optionsm)
self.menu.add_cascade(label="Help", menu=self.helpm)
self.frame = Tkinter.Frame(self.root)
self.frame.pack(expand=1, fill=Tkinter.BOTH)
# WIERSZ PIERWSZY
#Tkinter.Label(self.frame, text="sss: ").grid(column=0, row=1)
self.option_from_enc = Tkinter.OptionMenu(self.frame, self.from_enc, "ISO_8859-1","ISO_8859-2","WINDOWS-1250")
self.option_from_enc.configure(width = 14)
self.option_from_enc.grid(column=0, row=1)
self.input_file = Tkinter.Entry(self.frame)
self.input_file.configure(state="normal", width=30)
self.input_file.grid(column=1, row=1)
Tkinter.Button(self.frame, text="Choose...", command=self.__get_input_file_name).grid(column=2, row=1)
# WIERSZ DRUGI
self.option_to_enc = Tkinter.OptionMenu(self.frame, self.to_enc, "ISO_8859-1","ISO_8859-2","WINDOWS-1250")
self.option_to_enc.configure(width = 14)
self.option_to_enc.grid(column=0, row=2)
self.output_file = Tkinter.Entry(self.frame)
self.output_file.configure(state="normal", width=30)
self.output_file.grid(column=1, row=2)
Tkinter.Button(self.frame, text="Choose...", command=self.__get_output_file_name).grid(column=2, row=2)
# WIERSZ TRZECI
self.diff_check = Tkinter.Checkbutton(self.frame)
self.diff_check.configure(text="Show diff when convert will be done....")
self.diff_check.grid(column=0, sticky=Tkinter.W, columnspan=2, row=3)
#self.diff_check.pack(side=Tkinter.LEFT, fill=Tkinter.BOTH, padx=2)
# WIERSZ CZWARTY
self.open_after = Tkinter.Checkbutton(self.frame)
self.open_after.configure(text="Open converted file")
self.open_after.grid(column=0, sticky=Tkinter.W, columnspan=2, row=4)
# WIERSZ CZWARTY / PIĄTY
self.proc_but = Tkinter.Button(self.frame, text="Convert!", command=self.convert).grid(column=2,rowspan=2,row=3,sticky="s")
self.statusBar = Tkinter.Label(self.root, text="Application started...")
self.statusBar.pack(side=Tkinter.LEFT, fill=Tkinter.BOTH, padx=2)
self.root.bind("<ButtonRelease-3>", self.von)
self.root.config(menu=self.menu)
self.root.wm_minsize(width=450, height=180)
self.root.wm_maxsize(width=450, height=180)
title_ = "PyIconv " + self.version + " by jjhop@randal.qp.pl"
self.root.wm_title(string=title_)
#self.root.tk_strictMotif(1)
#self.root.tk_bisque()
tkMessageBox.showwarning("Uwaga!", "Uwaga!\n\nSciezka do pliku wykonywalnego iconv ustawiona na \"/usr/bin/iconv\"\naby ja zmienic nalezy edytowac plik programu w linii 141\n")
self.root.mainloop()
def convert(self, evt=None):
#print "Input: " , self.input_file_name.get()
#print "Output: " , self.output_file_name.get()
#print "FROM: " , self.from_enc.get()
#print "TO: " , self.to_enc.get()
conv = Converter('/usr/bin/iconv');
conv.set_from_enc(self.from_enc.get())
conv.set_to_enc(self.to_enc.get())
conv.set_input_file(self.input_file_name.get())
conv.set_output_file(self.output_file_name.get())
conv.prepare_command()
conv.convert()
def von(self, evt=None):
tkMessageBox.showinfo('Not yet...','Not yet implemented')
def __get_input_file_name(self):
self.input_file_name.set( tkFileDialog.askopenfilename(filetypes=[("Text files","*.txt"), ("All files", "*.*")]) )
self.input_file.configure(state="normal")
self.input_file.insert(0, self.input_file_name.get())
self.input_file.configure(state="readonly")
def __get_output_file_name(self):
self.output_file_name.set( tkFileDialog.asksaveasfilename(filetypes=[("Text","*.txt"), ("All files", "*.*")]) )
self.output_file.configure(state="normal")
self.output_file.insert(0, self.output_file_name.get())
self.output_file.configure(state="readonly")
def __options_dialog(self):
# mamy self.config
pass
def __open_home_page(self):
try:
import webbrowser
webbrowser.open_new("http://www.jjhop.net/apps/python/PyIconv/index.php")
except:
tkMessageBox.showinfo("Error...", "You need open site http://www.jjhop.net/apps/python/PyIconv/")
def __about_pyiconv(self):
tkMessageBox.showinfo("About program...", "\nPyIconv " + self.version + "\n\nPyIconv is simple front-end to iconv and I hope that is very useful.\nLast update: 23.02.2004\n")
def __about_author(self):
tkMessageBox.showinfo("About author...", "This program was created by Rafal 'jjhop' Kotusiewicz.\n\nWWW: http://www.jjhop.net\nEmail: jjhop@randal.qp.pl\n")
def __exit(self):
if tkMessageBox.askokcancel("Confirm exit", "Are You sure?") == 1:
del self.config
sys.exit(0)
class Converter:
def __init__(self, iconv_path):
self.iconv_path = iconv_path
self.from_enc = None
self.to_enc = None
self.input_file = None
self.output_file = None
self.command = None
def set_from_enc(self, from_enc):
self.from_enc = from_enc
def set_to_enc(self, to_enc):
self.to_enc = to_enc
def set_input_file(self, input_file):
self.input_file = input_file
def set_output_file(self, output_file):
self.output_file = output_file
def prepare_command(self):
command = self.iconv_path + ' '
command = command + ' -f ' + self.from_enc
command = command + ' -t ' + self.to_enc
command = command + ' ' + self.input_file
command = command + ' -o ' + self.output_file
self.command = command
def convert(self, show_diff=None):
print self.command
ret = os.system(self.command)
if ret == 0:
# poinformuj o sukcesie
tkMessageBox.showinfo("Powodzenie...", "Operacja konversji pliku zakonczona powodzeniem");
else:
# poinformuj o porażce
tkMessageBox.showerror("Porazka...", "Operacja konversji pliku nie zakonczyla sie powodzeniem");
Converter("hello")
class Configurator:
"""
Configurator - klasa obsługująca pliki konfiguracyjne w formacie
zmienna1=wartosc1
zmienna2=wartosc2
zmienna3=wartosc3
Możemy z nich korzystac w nastepujący sposób:
config = Configurator('/path/to/config_file')
value = config.get_var('var_name')
# lub
value = config['var_name']
"""
def __init__(self, config_file):
self.config_file = config_file
self.config_map ={}
self.change = 0
try:
fd = open(self.config_file)
line = fd.readline()
while line:
as_list = string.split(line, '=', 2)
self.config_map[as_list[0]] = as_list[1][:-1]
line = fd.readline()
fd.close()
except Exception, e:
print ":" , e
def __getitem__(self, key):
if self.config_map.has_key(key):
return self.config_map[key]
else:
raise LookupError, "Unknown config key"
def __setitem__(self, key, value):
# not yet implemented
if self.config_map.has_key(key):
if self.config_map[key] == value:
return
else:
self.config_map[key] = value
self.change = 1
else:
self.config_map[key] = value
self.change = 1
def __delitem__(self, key):
if not self.config_map.has_key(key):
return
else:
del self.config_map[key]
self.change = 1
def __del__(self):
""" Destruktor """
# not yet implemented
if self.change:
# zmianiamy konfigurację
print 'zmieniamy...'
try:
vars = []
for i in self.config_map.keys():
line = i + "=" + self.config_map[i] + "\n"
vars.append(line)
fd = open(self.config_file,'w', 0)
fd.writelines(vars)
fd.close()
except IOError, e:
print e
def get_var(self, key):
return self.config_map[key]
def set_var(self, key, value):
self.__setitem__(self, key, value)
def get_config_map(self):
return self.config_map
def dump_config(self):
self.__del__()
if __name__ == '__main__':
#if string.atoi(str(sys.version_info[0]) + str(sys.version_info[1])) < 23:
# raise Exception("Niewłaściwa wersja interpretera!")
try:
# ustalamy nazwe pliku konfiguracyjnego $HOME/.pyiconv
if 1: #plik_istnieje i mozna go czytac
config = Configurator("/home/jjhop/congo")
PyIconv(config)
else:
PyIconv()
except Exception, e:
print e
else:
del config
|
from rest_framework import serializers
from .models import CallService,Rider,StateRider,StatusOrder,Menu,Market,FoodOption,Option,Order,Food,Drink,Snack
class CallServiceSerializer(serializers.ModelSerializer):
class Meta :
model = CallService
fields = ('id','name','username','password','status')
extra_kwargs = {'password' : {'write_only': True , 'required':True}}
def create(self, validated_data):
user = CallService.objects.create_user(**validated_data)
user.save()
return user
class RiderSerializer(serializers.ModelSerializer):
class Meta :
model = Rider
fields = ('id','name','username','password')
extra_kwargs = {'password' : {'write_only': True , 'required':True}}
class StatusRiderSerializer(serializers.ModelSerializer):
class Meta :
model = StateRider
fields = ('id','takeOrder','timeTakeOrder','takeFood','timeTakeFood','finishOrder','timeFinishOrder')
class StatusOrderSerializer(serializers.ModelSerializer):
rider = RiderSerializer(read_only = True)
statusRider = StatusRiderSerializer(read_only = True)
class Meta :
model = StatusOrder
fields = ('id','rider','statusRider')
class FoodSerializer(serializers.ModelSerializer):
class Meta :
model = Food
fields = ('id','name','exFood','priceBase','markets')
class MarketSerializer(serializers.ModelSerializer):
food = FoodSerializer(many=True, read_only=True)
class Meta :
model = Market
fields = ('id','name','location','timeToOpen','timeToClose','food')
|
import socket
import time
class ClientError(Exception):
pass
class Client:
def __init__(self,host,port,timeout = None):
self.socket = socket.create_connection((host,port), timeout)
def send(self,msg):
try:
self.socket.sendall(msg.encode("utf8"))
except socket.error as error:
raise ClientError("Error sending data", error)
def recieve(self):
try:
got_value = self.socket.recv(1024).decode("utf8")
except socket.error as error:
raise ClientError("Error recieving data", error)
status,data = got_value.split("\n", 1)
data = data.strip()
if status == 'error':
raise ClientError(data)
return data
def put(self,key,value,timestamp = None):
if not timestamp:
timestamp = int(time.time())
msg = "put {0} {1} {2}\n".format(key,str(value),str(timestamp))
self.send(msg)
self.recieve()
def get(self,key):
data = {}
msg = "get {}\n".format(key)
self.send(msg)
answer = self.recieve()
items = [item.split() for item in answer.split('\n')]
if items == [[]]:
return data
else:
for i in range(0,len(items)):
if items[i][0] not in data:
data[items[i][0]] = []
data[items[i][0]].append((int(items[i][2]), float(items[i][1])))
if key == '*':
return data
else:
data = {key: data.get(key)}
return data
|
""" -*- coding: utf-8 -*-
@author: omerkocadayi
https://github.com/omerkocadayi
https://www.linkedin.com/in/omerkocadayi/ """
import cv2
#img2 = cv2.imread("750x750_siyah.jpg")
img = cv2.imread("750x750.jpg",0)
""" resmi img degiskenine atadik. '0' parametresi direkt olarak siyah-beyaza gecis yapar
print(type(img)) #->numpy.ndarray """
cv2.imwrite("......./Desktop/750x750_siyah.jpg", img)
#img degiskeni siyah-beyaz olarak masaustune kaydedildi
cv2.imshow("Deneme Resmi", img) #resim ekranda gosterildi
#cv2.imshow("Deneme Resmi", img) #resim ekranda gosterildi
cv2.waitKey(0)
cv2.destroyAllWindows()
|
#coding:utf-8
import sys
import re
input_file=sys.argv[1]
output_file=sys.argv[2]
output=open(output_file,"w")
output.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format("sequence_id","primer_left","primer_right","seq_info_left","seq_info_right","tm_left","tm_right","gc_left","gc_right","product_size"))
# input_file="/mnt/data/test/2.mei_ssr/chr1_misa.p3out"
for line in open(input_file):
line=line.strip("\n")
line1=re.split('_|=', line)
# print(line)
if line.startswith("SEQUENCE_ID="):
i=1
sequence_id=line.split("=")[1]
if line.startswith("PRIMER_LEFT") and 'SEQUENCE' in line1:
primer_left=line.split("=")[1]
# print(primer_left)
if line.startswith("PRIMER_RIGHT") and 'SEQUENCE' in line1:
primer_right=line.split("=")[1]
if line.startswith("PRIMER_LEFT") and len(line1)==4:
seq_info_left=line.split("=")[1]
if line.startswith("PRIMER_RIGHT") and len(line1)==4:
seq_info_right = line.split("=")[1]
if line.startswith("PRIMER_LEFT") and 'TM' in line1:
tm_left=line.split("=")[1]
if line.startswith("PRIMER_RIGHT") and 'TM' in line1:
tm_right=line.split("=")[1]
if line.startswith("PRIMER_LEFT") and 'GC' in line1:
gc_left=line.split("=")[1]
if line.startswith("PRIMER_RIGHT") and 'GC' in line1:
gc_right=line.split("=")[1]
if line.startswith("PRIMER_PAIR") and 'SIZE' in line1:
product_size=line.split("=")[1]
output.write("{}_{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(sequence_id,i,primer_left,primer_right,seq_info_left,seq_info_right,tm_left,tm_right,gc_left,gc_right,product_size))
i+=1
|
import contextlib
import functools
import torch
_module_settings = {}
@contextlib.contextmanager
def set_module_settings(**kwargs):
global _module_settings
old_module_settings = _module_settings.copy()
try:
for key in kwargs:
if key not in _module_settings:
_module_settings[key] = kwargs[key]
yield
finally:
_module_settings = old_module_settings
def default_module_settings(**settings_kwargs):
"""
Module configurations priority.
1. config key name with '!' prefix.
2. configurations given to the module constructor.
3. First configs given to default_module_settings().
"""
def settings_wrapper2(f):
@functools.wraps(f)
def settings_wrapper(*args, **kwargs):
global _module_settings
old_module_settings = _module_settings.copy()
for key in settings_kwargs:
if key not in _module_settings:
_module_settings[key] = settings_kwargs[key]
result = f(*args, **kwargs)
_module_settings = old_module_settings
return result
return settings_wrapper
return settings_wrapper2
class ModuleBase(torch.nn.Module):
VERSION = (0, 0)
# If 'none' activation is specified by kwargs, it has the highest priority.
# This is because some architectures need blocks without activation.
PRIORITY_KWARGS_SETTINGS = {'activation': 'none', 'activation2': 'none'}
def __init__(self, **kwargs):
super().__init__()
global _module_settings
values = kwargs
for key in _module_settings:
if key[0] == '!':
if not self._has_kwargs_high_priority(kwargs, key[1:]):
values[key[1:]] = _module_settings[key]
elif key not in values:
values[key] = _module_settings[key]
self.module_settings = values
def reset_parameters(self):
pass
@staticmethod
def _has_kwargs_high_priority(kwargs, name):
return name in kwargs and name in ModuleBase.PRIORITY_KWARGS_SETTINGS and kwargs[name] == ModuleBase.PRIORITY_KWARGS_SETTINGS[name]
|
# coding=utf8
import tensorflow as tf
def softmax_with_mask(tensor, mask):
"""
Calculate Softmax with mask
:param tensor: [shape1, shape2]
:param mask: [shape1, shape2]
:return:
"""
exp_tensor = tf.exp(tensor)
masked_exp_tensor = tf.multiply(exp_tensor, mask)
total = tf.reshape(
tf.reduce_sum(masked_exp_tensor, axis=1),
shape=[-1, 1]
)
return tf.div(masked_exp_tensor, total)
def get_last_relevant(output, length):
"""
RNN Output
:param output: [shape_0, shape_1, shape_2]
:param length: [shape_0]
:return:
[shape_0, shape_2]
"""
shape_2 = tf.shape(output)[2]
slices = list()
for idx, l in enumerate(tf.unstack(length)):
last = tf.slice(output, begin=[idx, l - 1, 0], size=[1, 1, shape_2])
slices.append(last)
lasts = tf.concat(slices, 0)
return lasts
|
from table import Table
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
def _default(*args, **opt):
return 1
class PageRanker:
_sims = None
_svd = None
_sim_table = {}
_np_table = {}
_v_table = {}
def __init__(self, tab, sim=0, bm25=True):
"""
:param tab: The table
:param sim: Threshold of similarity to consider phrases connected in the graph
:param bm25: Method to calculate similarity from
"""
# Optional parameters
self._sim = sim
self._bm25 = bm25
self._table = tab
self._graph = self._init_graph()
def _init_graph(self):
def add(g, key, val):
if key not in graph:
g[key] = set()
g[key].add(val)
print("hello")
graph = {} # Filter out repeated sentences if they exist
for s1 in self._table.corpus:
for s2 in self._table.corpus:
if s1 != s2:
if self._table.independent_similarity(s1, s2, bm25=True) >= self._sim:
add(graph, s1, s2)
add(graph, s2, s1)
return graph
def rank(self,
prior_fn=_default,
weight_fn=_default,
prior_calc=_default,
prestige_calc=_default, iterations=2, rw=0.15):
"""
:param prior_fn: Function to calculate the prior of each node
:param weight_fn: Weight function for nodes
:param prior_calc: Function to calculate of the prior part of the ranking function
:param prestige_calc: Function to calculate the prestige part of the ranking function
:param iterations: Number of iterations
:param rw: Random walk probability
:return: Ranked sentences
"""
priors = prior_fn(self._table, bm25=self._bm25)
ranks = {node: 1 / len(self._graph) for node in self._graph} # Initial prestige vector
for i in range(iterations):
print('IT %d' % i)
new_ranks = {}
for node in self._graph:
val1 = rw * prior_calc(self._graph, priors, node)
val2 = (1 - rw) * prestige_calc(self._table, self._graph, node, ranks, weight_fn)
new_ranks[node] = val1 + val2
ranks = new_ranks
return ranks
@staticmethod
def init(table, corpus):
PageRanker._sims = table.similarity(" ".join(corpus))
PageRanker._sim_table = {}
PageRanker._np_table = {}
PageRanker._v_table = {}
PageRanker._svd = TruncatedSVD(n_components=3)
PageRanker._svd.fit_transform(table.matrix)
# 1 (right hand side)
@staticmethod
def prestige_1_level(table, graph, node, ranks, weight_fn, **opt):
return sum([ranks[node] / len(graph[node]) for node in graph])
# 1 (left hand side)
@staticmethod
def prior_N(graph, priors, node):
return 1 / len(graph[node])
# 2 (left hand site)
@staticmethod
def prior_quotient(graph, priors, node):
return priors[node] / sum([priors[adj] for adj in graph[node]])
# 2 (right hand side)
@staticmethod
def prestige_2_levels(table, graph, pi, ranks, weight_fn, **opt):
acc = []
for pj in graph[pi]:
first_level = ranks[pj] * weight_fn(table, pj, pi, **opt)
acc_sum = 0
for pk in graph[pj]:
acc_sum += weight_fn(table, pj, pk, **opt)
if acc_sum == 0:
acc.append(0)
else:
acc.append(first_level / acc_sum)
return sum(acc)
# 2.1
@staticmethod
def prior_sentence_ix(table, **opt):
corpus = table.corpus
N = len(corpus) # Smoothing parameter
return {corpus[ix]: 1 / (N + ix + 1) for ix in range(len(corpus))}
# 2.2
@staticmethod
def prior_relevance(table, **opt):
corpus = table.corpus
PageRanker._sims = table.similarity(" ".join(corpus))
res = {}
for ix in range(len(corpus)):
res[corpus[ix]] = PageRanker._sims[ix]
return res
# 2.3
@staticmethod
def weight_relevance(table, s1, s2, **opt):
sim = PageRanker._sim_table.get(s1, None)
if sim is None:
sim = table.similarity(s1, **opt)
PageRanker._sim_table[s1] = sim
# v2 = able.independent_similarity(s1, s2, bm25=True)
idx = table.corpus.index(s2)
return sim[idx]
# 2.4
@staticmethod
def weight_shared_noun_phrases(table, s1, s2, **opt):
np_s1 = PageRanker._np_table.get(s1, None)
if np_s1 is None:
np_s1 = Table.extract_np([s1])
PageRanker._np_table[s1] = np_s1
np_s2 = PageRanker._np_table.get(s2, None)
if np_s2 is None:
np_s2 = Table.extract_np([s2])
PageRanker._np_table[s2] = np_s2
return len([np for np in np_s1 if np in np_s2])
@staticmethod
def weight_pca(table, s1, s2):
sim = PageRanker._v_table.get(s1, None)
if sim is None:
v1 = PageRanker._svd.transform(table.tf_idf_vect([s1]))
all = PageRanker._svd.transform(table.matrix)
sim = cosine_similarity(v1, all)[0]
PageRanker._v_table[s1] = sim
idx = table.corpus.index(s2)
res = sim[idx]
return res
|
"""
===========================
Plots with different scales
===========================
Demonstrate how to do two plots on the same axes with different left and
right scales.
The trick is to use *two different axes* that share the same *x* axis.
You can use separate `matplotlib.ticker` formatters and locators as
desired since the two axes are independent.
Such axes are generated by calling the `Axes.twinx` method. Likewise,
`Axes.twiny` is available to generate axes that share a *y* axis but
have different top and bottom scales.
The twinx and twiny methods are also exposed as pyplot functions.
"""
import numpy as np
import matplotlib.pyplot as plt
from readfiles import readlookup
xarrn=0
yarrn=6
colnum=8
filename='../Output/Clusters.dat'
results1 = readlookup(filename,colnum)
Gdpa=1.84E-6
length=len(results1[0])
for i in range(length):
results1[0][i]=results1[0][i]*Gdpa
"""
filename='./Output1/ClustersMed.dat'
results2 = readlookup(filename,colnum)
Gdpa=5.52E-7
length=len(results2[0])
for i in range(length):
results2[0][i]=results2[0][i]*Gdpa
filename='./Output1/ClustersHigh.dat'
results3 = readlookup(filename,colnum)
Gdpa=1.84E-6
length=len(results3[0])
for i in range(length):
results3[0][i]=results3[0][i]*Gdpa
"""
fig, ax1 = plt.subplots()
ax1.plot(results1[yarrn+1], results1[xarrn],linewidth=2, color='green',linestyle='--',label= 'Low')
#ax1.plot(results2[yarrn+1], results2[xarrn],linewidth=2, color='blue',linestyle='--',label= 'Medium')
#ax1.plot(results3[yarrn+1], results3[xarrn],linewidth=2, color='red',linestyle='--',label= 'High')
ax1.cla()
# Make the y-axis label, ticks and tick labels match the line color.
ax2 = ax1.twiny()
ax2.plot(results1[xarrn], results1[yarrn],linewidth=2, color='green',linestyle='--',label= 'Low')
#ax2.plot(results2[xarrn], results2[yarrn],linewidth=2, color='blue',linestyle='--',label= 'Medium')
#ax2.plot(results3[xarrn], results3[yarrn],linewidth=2, color='red',linestyle='--',label= 'High')
ax1.text(2.E-3, 3.E22, r'He Implantation T=623K')
ax1.set_xlabel(r'He Concentration (appm)')
ax1.set_ylabel(r'Total Cluster Density (m$^{-3}$)')
ax2.set_xlabel(r'Dose (dpa)')
if yarrn==1:
ax1.set_ylabel(r'Conc. of Vacancies (per atom)')
if yarrn==2:
ax1.set_ylabel(r'Conc. of SIA (per atom)')
if yarrn==3:
ax1.set_ylabel(r'Conc. of He (per atom)')
if yarrn==4:
ax1.set_ylabel(r'Conc. of He at GB (appm)')
plt.axhline(y=48.0, xmin=results1[xarrn][0], xmax=results1[xarrn][-1], linewidth=2, color = 'k')
ax.annotate('Critical Concentration', xy=(0.0005,48.8), xytext=(0.0005, 2.0),
arrowprops=dict(facecolor='black', shrink=0.0005),)
if yarrn==6:
ax1.set_ylabel(r'Total Cluster Density (m$^{-3}$)')
experimentaldatay=np.array([4.0E+21,1.7E+22])
experimentaldatax=np.array([1.0E-03,1.0E-02])
upper_error = np.array([3.0E+21,1.5E+22])
lower_error = np.array([2.5E+21,11.0E+21])
asymmetric_error = [lower_error, upper_error]
ax2.errorbar(experimentaldatax, experimentaldatay, yerr=asymmetric_error, fmt='o',color='r',linewidth=2)
plt.xlim(8.E-7 , 3.E-2)
plt.ylim(7.2E+19 , 4.E22)
legend = ax2.legend(loc='lower right', shadow=True)
ax1.set_yscale('log')
ax2.set_yscale('log')
ax2.set_xscale('log')
ax1.set_xscale('log')
fig.tight_layout()
plt.show()
|
# Please edit this list and import only required elements
import webnotes
from webnotes.model.doc import Document
from webnotes.model.doclist import getlist
from webnotes.model.code import get_obj
from webnotes import session, form, is_testing, msgprint, errprint
sql = webnotes.conn.sql
convert_to_lists = webnotes.conn.convert_to_lists
# -----------------------------------------------------------------------------------------
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
self.nsm_parent_field = 'parent_sales_person';
def check_state(self):
return "\n" + "\n".join([i[0] for i in sql("select state_name from `tabState` where `tabState`.country='%s' " % self.doc.country)])
# update Node Set Model
def update_nsm_model(self):
import webnotes
import webnotes.utils.nestedset
webnotes.utils.nestedset.update_nsm(self)
# ON UPDATE
#--------------------------------------
def on_update(self):
# update nsm
self.update_nsm_model()
def validate(self):
for d in getlist(self.doclist, 'target_details'):
if not flt(d.target_qty) and not flt(d.target_amount):
msgprint("Either target qty or target amount is mandatory.")
raise Exception
#self.sync_with_contact()
def sync_with_contact(self):
cid = sql("select name from tabContact where sales_person_id = %s and is_sales_person=1", self.doc.name)
if cid:
d = Document('Contact', cid[0][0])
else:
d = Document('Contact')
name_split = self.doc.sales_person_name.split()
d.contact_name = self.doc.sales_person_name
d.first_name = name_split[0]
d.last_name = len(name_split) > 1 and name_split[1] or ''
d.email_id = self.doc.email_id
d.contact_no = d.mobile_no = self.doc.mobile_no
d.designation = self.doc.designation
d.department = self.doc.department
d.sales_person_id = self.doc.name
d.is_sales_person = 1
d.save(new = (not d.name))
|
"""Initial migration
Revision ID: e421475dba3c
Revises:
Create Date: 2021-06-22 18:26:28.255632
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e421475dba3c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('clients',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('username', sa.String(length=120), nullable=True),
sa.Column('first_name', sa.String(length=120), nullable=True),
sa.Column('last_name', sa.String(length=120), nullable=True),
sa.Column('password', sa.String(length=120), nullable=True),
sa.Column('oauth_token', sa.String(length=200), nullable=True),
sa.Column('oauth_token_secret', sa.String(length=200), nullable=True),
sa.Column('bio', sa.String(length=250), nullable=True),
sa.Column('avatar_filename', sa.String(length=100), nullable=True),
sa.Column('avatar_uploaded', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_clients'))
)
op.create_index(op.f('ix_clients_email'), 'clients', ['email'], unique=True)
op.create_index(op.f('ix_clients_username'), 'clients', ['username'], unique=True)
op.create_table('blocked_tokens',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('jti', sa.String(length=50), nullable=False),
sa.Column('client_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['client_id'], ['clients.id'], name=op.f('fk_blocked_tokens_client_id_clients')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_blocked_tokens'))
)
op.create_table('cad_models',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('cad_model_id', sa.String(length=100), nullable=False),
sa.Column('cad_model_name', sa.String(length=100), nullable=False),
sa.Column('cad_model_height', sa.Float(precision=2), nullable=False),
sa.Column('cad_model_width', sa.Float(precision=2), nullable=False),
sa.Column('cad_model_length', sa.Float(precision=2), nullable=False),
sa.Column('cad_model_material', sa.String(length=100), nullable=False),
sa.Column('cad_model_mesh_percent', sa.Integer(), nullable=False),
sa.Column('cad_model_visibility', sa.Boolean(), nullable=True),
sa.Column('cad_model_creation_time', sa.DateTime(), nullable=False),
sa.Column('client_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['client_id'], ['clients.id'], name=op.f('fk_cad_models_client_id_clients')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_cad_models'))
)
op.create_table('confirmations',
sa.Column('id', sa.String(length=50), nullable=False),
sa.Column('expire_at', sa.Integer(), nullable=False),
sa.Column('confirmed', sa.Boolean(), nullable=False),
sa.Column('client_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['client_id'], ['clients.id'], name=op.f('fk_confirmations_client_id_clients')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_confirmations'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('confirmations')
op.drop_table('cad_models')
op.drop_table('blocked_tokens')
op.drop_index(op.f('ix_clients_username'), table_name='clients')
op.drop_index(op.f('ix_clients_email'), table_name='clients')
op.drop_table('clients')
# ### end Alembic commands ###
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True):
super(BasicConv, self).__init__()
self.out_channels = out_planes
if bn:
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True)
self.relu = nn.ReLU(inplace=True) if relu else None
else:
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True)
self.bn = None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class BasicRFB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, scale=0.1, map_reduce=8, vision=1, groups=1):
super(BasicRFB, self).__init__()
self.scale = scale
self.out_channels = out_planes
inter_planes = in_planes // map_reduce
self.branch0 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, 2 * inter_planes, kernel_size=(3, 3), stride=stride, padding=(1, 1), groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1, padding=vision + 1, dilation=vision + 1, relu=False, groups=groups)
)
self.branch1 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, 2 * inter_planes, kernel_size=(3, 3), stride=stride, padding=(1, 1), groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1, padding=vision + 2, dilation=vision + 2, relu=False, groups=groups)
)
self.branch2 = nn.Sequential(
BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
BasicConv(inter_planes, (inter_planes // 2) * 3, kernel_size=3, stride=1, padding=1, groups=groups),
BasicConv((inter_planes // 2) * 3, 2 * inter_planes, kernel_size=3, stride=stride, padding=1, groups=groups),
BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1, padding=vision + 4, dilation=vision + 4, relu=False, groups=groups)
)
self.ConvLinear = BasicConv(6 * inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
self.shortcut = BasicConv(in_planes, out_planes, kernel_size=1, stride=stride, relu=False)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.ConvLinear(out)
short = self.shortcut(x)
out = out * self.scale + short
out = self.relu(out)
return out
def conv_bn(inp, oup, stride = 1):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def depth_conv2d(inp, oup, kernel=1, stride=1, pad=0):
return nn.Sequential(
nn.Conv2d(inp, inp, kernel_size = kernel, stride = stride, padding=pad, groups=inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, kernel_size=1)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
class RFB(nn.Module):
def __init__(self, cfg = None, phase = 'train'):
"""
:param cfg: Network related settings.
:param phase: train or test.
"""
super(RFB,self).__init__()
self.phase = phase
self.num_classes = 2
self.conv1 = conv_bn(3, 16, 2)
self.conv2 = conv_dw(16, 32, 1)
self.conv3 = conv_dw(32, 32, 2)
self.conv4 = conv_dw(32, 32, 1)
self.conv5 = conv_dw(32, 64, 2)
self.conv6 = conv_dw(64, 64, 1)
self.conv7 = conv_dw(64, 64, 1)
self.conv8 = BasicRFB(64, 64, stride=1, scale=1.0)
self.conv9 = conv_dw(64, 128, 2)
self.conv10 = conv_dw(128, 128, 1)
self.conv11 = conv_dw(128, 128, 1)
self.conv12 = conv_dw(128, 256, 2)
self.conv13 = conv_dw(256, 256, 1)
self.conv14 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=64, kernel_size=1),
nn.ReLU(inplace=True),
depth_conv2d(64, 256, kernel=3, stride=2, pad=1),
nn.ReLU(inplace=True)
)
self.loc, self.conf, self.landm = self.multibox(self.num_classes);
def multibox(self, num_classes):
loc_layers = []
conf_layers = []
landm_layers = []
loc_layers += [depth_conv2d(64, 3 * 4, kernel=3, pad=1)]
conf_layers += [depth_conv2d(64, 3 * num_classes, kernel=3, pad=1)]
landm_layers += [depth_conv2d(64, 3 * 10, kernel=3, pad=1)]
loc_layers += [depth_conv2d(128, 2 * 4, kernel=3, pad=1)]
conf_layers += [depth_conv2d(128, 2 * num_classes, kernel=3, pad=1)]
landm_layers += [depth_conv2d(128, 2 * 10, kernel=3, pad=1)]
loc_layers += [depth_conv2d(256, 2 * 4, kernel=3, pad=1)]
conf_layers += [depth_conv2d(256, 2 * num_classes, kernel=3, pad=1)]
landm_layers += [depth_conv2d(256, 2 * 10, kernel=3, pad=1)]
loc_layers += [nn.Conv2d(256, 3 * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, 3 * num_classes, kernel_size=3, padding=1)]
landm_layers += [nn.Conv2d(256, 3 * 10, kernel_size=3, padding=1)]
return nn.Sequential(*loc_layers), nn.Sequential(*conf_layers), nn.Sequential(*landm_layers)
def forward(self,inputs):
detections = list()
loc = list()
conf = list()
landm = list()
x1 = self.conv1(inputs)
x2 = self.conv2(x1)
x3 = self.conv3(x2)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
x6 = self.conv6(x5)
x7 = self.conv7(x6)
x8 = self.conv8(x7)
detections.append(x8)
x9 = self.conv9(x8)
x10 = self.conv10(x9)
x11 = self.conv11(x10)
detections.append(x11)
x12 = self.conv12(x11)
x13 = self.conv13(x12)
detections.append(x13)
x14= self.conv14(x13)
detections.append(x14)
for (x, l, c, lam) in zip(detections, self.loc, self.conf, self.landm):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
landm.append(lam(x).permute(0, 2, 3, 1).contiguous())
bbox_regressions = torch.cat([o.view(o.size(0), -1, 4) for o in loc], 1)
classifications = torch.cat([o.view(o.size(0), -1, 2) for o in conf], 1)
ldm_regressions = torch.cat([o.view(o.size(0), -1, 10) for o in landm], 1)
if self.phase == 'train':
output = (bbox_regressions, classifications, ldm_regressions)
else:
output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
return output
|
def openData(doc):
text = open(doc, 'r')
text = text.read()
return text
def saveData(doc, data):
text = open(doc, 'w')
text = text.write(data)
def parse(doc):
text = openData(doc)
lines = text.split('\n')
heights = []
weights = []
for line in lines:
components = line.split(' ')
heights.append(components[1])
weights.append(components[2])
return heights, weights
parse('HeightsWeightsData.txt')
|
#! /usr/bin/env python
import sys
import os
import argparse
import cv2
import rospy
import roslib
from std_msgs.msg import String, Float32MultiArray
#from ar_track_alvar_msgs.msg import AlvarMarker, AlvarMarkers
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
class ARTagFollow:
def __init__(self, dir_):
self.original, self.depth = None, None
self.i = 1
self.lower = np.array([100, 10, 50], dtype = "uint8") #0,48,80
self.upper = np.array([200, 70, 100], dtype = "uint8") #20,255,255
self.dir_ = dir_
self.annot_file = open(self.dir_ +"annotation.txt", "w+")
self.bench_test, self.publish_image = True, False
rospy.init_node('turtle_follower', anonymous=True)
self.bridge = CvBridge()
im_sub = rospy.Subscriber('/camera/rgb/image_raw', Image, self.imageCallBack, queue_size=5)
depth_sub = rospy.Subscriber('/camera/depth/image_raw', Image, self.depthCallBack, queue_size=5)
#tag_pose_sub = rospy.Subscriber("/ar_pose_marker", AlvarMarkers, self.tagPoseCallback, queue_size=5)
self.target_pub = rospy.Publisher('target_info', String, queue_size=5)
self.cmd_pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size=5)
if self.publish_image:
self.ProcessedRaw = rospy.Publisher('/follow/out_image', Image, queue_size=5)
try:
rospy.spin()
except KeyboardInterrupt:
print("Rospy Sping Shut down")
cv2.namedWindow("image")
#cv2.setMouseCallback("image", self.click_and_crop)
# for real-time testing
def imageCallBack(self, rgb_im):
try:
im_array = self.bridge.imgmsg_to_cv2(rgb_im, "bgr8")
except CvBridgeError as e:
print(e)
if im_array is None:
print ('frame dropped, skipping tracking')
else:
self.original = np.array(im_array)
self.SaveData()
# for real-time testing
def depthCallBack(self, d_im):
try:
d_array = self.bridge.imgmsg_to_cv2(d_im, "32FC1")
except CvBridgeError as e:
print(e)
if d_array is None:
print ('frame dropped, skipping tracking')
else:
self.depth = np.array(d_array)
# for real-time testing
def SaveData(self):
if self.original is not None and self.depth is not None:
self.showFrame(self.original, 'image')
cv2.imshow("image", self.original)
key = cv2.waitKey(1) & 0xFF
# if the 'r' key is pressed, reset the cropping region
if key == ord("f"):
base_cmd = Twist()
base_cmd.linear.x = 0.2
self.cmd_pub.publish(base_cmd)
annotn_txt = str(self.i)+str('.jpg 0.0 0.2 \n')
self.annot_file.write(annotn_txt)
cv2.imwrite(self.dir_+str('images/')+str(self.i)+'.jpg', self.original)
self.i += 1
if key == ord("r") or key==ord("d"):
base_cmd = Twist()
base_cmd.angular.z = 0.4
self.cmd_pub.publish(base_cmd)
annotn_txt = str(self.i)+str('.jpg 0.4 0.0 \n')
self.annot_file.write(annotn_txt)
cv2.imwrite(self.dir_+str('images/')+str(self.i)+'.jpg', self.original)
self.i += 1
if key == ord("l") or key==ord("a"):
base_cmd = Twist()
base_cmd.angular.z = -0.4
self.cmd_pub.publish(base_cmd)
annotn_txt = str(self.i)+str('.jpg -0.4 0.0 \n')
self.annot_file.write(annotn_txt)
cv2.imwrite(self.dir_+str('images/')+str(self.i)+'.jpg', self.original)
self.i += 1
elif key == ord("q"):
print ("Saved data")
self.annot_file.close()
'''
# for real-time testing
def tagPoseCallback(self, msg):
if self.original is not None and self.depth is not None:
if msg.markers!=[]:
self.tag_msg = msg.markers
self.tag_pose, self.tag_orien = self.tag_msg[0].pose.pose.position, self.tag_msg[0].pose.pose.orientation
#print ("Found: tag", self.tag_msg[0].id)
#print (self.tag_pose, self.tag_orien)
self.makemove()
if self.bench_test:
self.showFrame(self.original, 'input_image')
#self.showFrame(self.depth, 'input_depth')
if self.publish_image:
msg_frame = CvBridge().cv2_to_imgmsg(self.original, encoding="bgr8")
self.ProcessedRaw.publish(msg_frame)
'''
def makemove(self):
if self.tag_pose != None:
base_cmd = Twist()
base_cmd.linear.x = (self.tag_pose.z - 0.5)
base_cmd.angular.z = -self.tag_pose.x*4
self.cmd_pub.publish(base_cmd)
##########################################################################
### For bench testing with dataset images ###############################
def showFrame(self, frame, name):
cv2.imshow(name, frame)
cv2.waitKey(20)
# stream images from directory Dir_
def image_streamimg(self, Dir_):
from eval_utils import filter_dir
dirFiles = filter_dir(os.listdir(Dir_))
for filename in dirFiles:
self.original = cv2.imread(Dir_+filename)
self.ImageProcessor()
####################################################################################
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import sopel.module
import sys
import os
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(shareddir)
from BotShared import *
@sopel.module.commands('vote', 'rate', 'poll')
def mainfunction(bot, trigger):
"""Check to see if module is enabled."""
enablestatus, triggerargsarray, botcom, instigator = spicebot_prerun(bot, trigger, 'vote')
if not enablestatus:
# IF "&&" is in the full input, it is treated as multiple commands, and is split
commands_array = spicemanip(bot, triggerargsarray, "split_&&")
if commands_array == []:
commands_array = [[]]
for command_split_partial in commands_array:
triggerargsarray_part = spicemanip(bot, command_split_partial, 'create')
execute_main(bot, trigger, triggerargsarray_part, botcom, instigator)
def execute_main(bot, trigger, triggerargsarray, botcom, instigator):
"""Take votes and create polls."""
now = time.time()
commandused = trigger.group(1)
choice = spicemanip(bot, triggerargsarray, 1)
player = trigger.nick
if commandused == 'vote':
if choice == 'results':
getvotes(bot)
elif choice == 'settime' and trigger.admin:
timing = spicemanip(bot, triggerargsarray, 2)
if timing.isdigit():
timing = int(timing)
set_database_value(bot, bot.nick, 'votetimer', timing)
osd(bot, player, 'priv', "Voting delay set to 10 plus" + str(timing))
else:
osd(bot, player, 'priv', "Please enter a valid number")
else:
yesvotes = 0
novotes = 0
ratings = []
pollchoice = []
voters = get_database_value(bot, bot.nick, 'voters') or []
if player not in voters:
if choice == 'yes' or choice == 'ya':
osd(bot, player, 'priv', "Your yes vote has been recorded")
adjust_database_value(bot, bot.nick, 'yesvotes', 1)
adjust_database_array(bot, bot.nick, player, 'voters', 'add')
set_database_value(bot, bot.nick, 'voting', 1)
set_database_value(bot, bot.nick, 'voting', 'True')
set_database_value(bot, bot.nick, 'votechannel', trigger.sender)
set_database_value(bot, bot.nick, 'votingstart', now)
elif choice == 'no' or choice == 'na':
osd(bot, player, 'priv', "Your no vote has been recorded")
adjust_database_value(bot, bot.nick, 'novotes', 1)
adjust_database_array(bot, bot.nick, player, 'voters', 'add')
set_database_value(bot, bot.nick, 'voting', 'True')
set_database_value(bot, bot.nick, 'votechannel', trigger.sender)
set_database_value(bot, bot.nick, 'votingstart', now)
else:
osd(bot, trigger.sender, 'say', "Vote yes or no")
else:
osd(bot, player, 'priv', "You have already voted")
elif commandused == 'rate':
raters = get_database_value(bot, bot.nick, 'raters') or []
if not choice:
osd(bot, trigger.sender, 'say', "Rate on scale of -10 through 10")
elif choice == 'results':
getrating(bot)
elif choice == 'settime' and trigger.admin:
timing = spicemanip(bot, triggerargsarray, 2)
if timing.isdigit():
timing = int(timing)
set_database_value(bot, bot.nick, 'ratetimer', timing)
osd(bot, player, 'priv', "Rating delay set to 10 plus" + str(timing))
else:
if player not in raters:
if isfloat(choice):
choice = float(choice)
if choice > 10:
choice = 10
if choice < -10:
choice = -10
osd(bot, player, 'priv', "Your rating of " + str(choice) + " has been recorded")
adjust_database_array(bot, bot.nick, player, 'raters', 'add')
adjust_database_array(bot, bot.nick, choice, 'ratings', 'add')
set_database_value(bot, bot.nick, 'rating', 'True')
set_database_value(bot, bot.nick, 'ratechannel', trigger.sender)
set_database_value(bot, bot.nick, 'ratestart', now)
else:
osd(bot, player, 'priv', str(choice) + " is not a number between -10 and 10")
else:
osd(bot, player, 'priv', "You already submitted a rating this round")
elif commandused == 'poll':
osd(bot, trigger.sender, 'say', "WIP")
def clearvoting(bot):
reset_database_value(bot, bot.nick, 'novotes')
reset_database_value(bot, bot.nick, 'yesvotes')
reset_database_value(bot, bot.nick, 'voters')
reset_database_value(bot, bot.nick, 'voting')
reset_database_value(bot, bot.nick, 'votechannel')
def clearrating(bot):
reset_database_value(bot, bot.nick, 'raters')
reset_database_value(bot, bot.nick, 'ratings')
reset_database_value(bot, bot.nick, 'ratechannel')
@sopel.module.interval(10)
def countdown(bot):
isvote = get_database_value(bot, bot.nick, 'voting') or ''
israte = get_database_value(bot, bot.nick, 'rating') or ''
votetimeout = get_database_value(bot, bot.nick, 'votetimer')
ratetimeout = get_database_value(bot, bot.nick, 'ratetimer')
if isvote:
if get_timesince(bot, bot.nick, 'votestart') > votetimeout:
getvotes(bot)
if israte:
if get_timesince(bot, bot.nick, 'ratestart') > ratetimeout:
getrating(bot)
def getvotes(bot):
novotes = get_database_value(bot, bot.nick, 'novotes') or 0
yesvotes = get_database_value(bot, bot.nick, 'yesvotes') or 0
channel = get_database_value(bot, bot.nick, 'votechannel') or ''
if not channel == '':
dispmsg = str(yesvotes) + " votes for yes and " + str(novotes) + " no votes"
osd(bot, trigger.sender, 'say', dispmsg)
clearvoting(bot)
def getrating(bot):
sum = 0
ratings = get_database_value(bot, bot.nick, 'ratings')
channel = get_database_value(bot, bot.nick, 'ratechannel') or ''
if not channel == '':
if ratings:
for n in ratings:
n = int(n)
sum = sum + n
average = sum / len(ratings)
dispmsg = 'The average is ' + str(average)
osd(bot, trigger.sender, 'say', dispmsg)
clearrating(bot)
else:
dispmsg = 'No ratings found'
clearrating(bot)
osd(bot, trigger.sender, 'say', dispmsg)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
|
import pandas as pd
import os
import numpy as np
from sklearn.utils import resample
df = pd.read_csv(os.path.abspath('dataset.csv'),header=None)
y = df.iloc[:, 11].values
X = df.iloc[:, :11].values
#class 1과 0의 비율을 1:1로 upsampling함. 총 9040개의 데이터를 사용함.
X_upsampled, y_upsampled = resample(X[y == 1], y[y == 1], replace=True, n_samples=X[y == 0].shape[0], random_state=1)
X = np.vstack((X[y==0], X_upsampled))
y = np.hstack((y[y==0], y_upsampled))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y) #30% test set
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.fit_transform(X_test)
cov_mat = np.cov(X_train_std.T) #nomalize된 X_train_set의 공분산
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
tot = sum(eigen_vals)
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
cum_var_exp = np.cumsum(var_exp)
import matplotlib.pyplot as plt
plt.bar(range(1,12), var_exp, alpha=0.5, align='center', label='individual explained variance')
plt.step(range(1,12), cum_var_exp, where='mid', label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal component index')
plt.legend(loc='best')
plt.tight_layout
plt.show() |
import sys, sqlite3, getpass
from prettytable import PrettyTable
# Connection with the database
dbConnection = sqlite3.connect(r"C:\\Users\\samjo\\documents\\sqlite\\SgrStore.db")
dbCursor = dbConnection.cursor()
# Get the username from environment variables (only for modifying update records)
currentUser = getpass.getuser()
# For search queries, this receives all of the results from the database and prints them on the screen
def resultsParse():
results = dbCursor.fetchall()
if results != []:
tableFormat = PrettyTable(
[
"ItemId",
"BinId",
"RowId",
"Description",
"Create date",
"Create user",
"Update date",
"Update user",
"Status",
]
)
for item in results:
tableFormat.add_row(item)
print(tableFormat)
elif results == []:
print("> No results found!")
else:
print("> No results found!")
# This adds row, bin, or item records to the database
def recordAdd():
itemName = str(input("> Insert an item name: "))
itemBin = str(
input("> Insert the bin number of the item (where you will store the item): ")
)
itemRow = str(
input("> Insert the row number of the bin (where the bin is stored): ")
)
dbConnection.execute(
"INSERT INTO item (RowId, BinId, Description, CreateDate, CreateUser, Status) VALUES (?,?,?,CURRENT_TIMESTAMP,?,'1')",
(itemRow, itemBin, itemName, currentUser),
)
try:
dbConnection.commit()
except:
print("> ERROR: Failed to add item record. ")
# This checks out or checks in records on the database by changing the status column
def recordCheck():
userCheckQuery = input("> Do you want to check OUT or check IN? ")
userCheckQuery = userCheckQuery.upper()
if userCheckQuery == "OUT":
dbCursor.execute("SELECT * FROM item WHERE Status = 1 ORDER BY RowId")
results = dbCursor.fetchall()
if results != []:
tableFormat = PrettyTable(
[
"ItemId",
"BinId",
"RowId",
"Description",
"Create date",
"Create user",
"Update date",
"Update user",
"Status",
]
)
for item in results:
tableFormat.add_row(item)
print(tableFormat)
itemQuery = input(
"> Which item do you want to check out? Provide the ItemId (on the far left): "
)
dbConnection.execute(
"UPDATE item SET STATUS = '2', UpdateDate = CURRENT_TIMESTAMP, UpdateUser = '"
+ currentUser
+ "' WHERE ItemId = '"
+ itemQuery
+ "'"
)
try:
dbConnection.commit()
except:
print(
"> ERROR: Failed to check out record. Please leave a physical note so the record can be updated later. "
)
elif results == []:
print("> No items available to check out!")
else:
print("> No results found!")
elif userCheckQuery == "IN":
dbCursor.execute("SELECT * FROM item WHERE Status = 2 ORDER BY RowId")
results = dbCursor.fetchall()
if results != []:
tableFormat = PrettyTable(
[
"ItemId",
"BinId",
"RowId",
"Description",
"Create date",
"Create user",
"Update date",
"Update user",
"Status",
]
)
for item in results:
tableFormat.add_row(item)
print(tableFormat)
itemQuery = input(
"> Which item do you want to check in? Provide the ItemId (on the far-left) "
)
dbConnection.execute(
"UPDATE item SET STATUS = '1', UpdateDate = CURRENT_TIMESTAMP, UpdateUser = '"
+ currentUser
+ "' WHERE ItemId = '"
+ itemQuery
+ "'"
)
try:
dbConnection.commit()
except:
print(
"> ERROR: Failed to check in record. Please leave a physical note so the record can be updated later. "
)
elif results == []:
print("> No items available to check in!")
else:
print("> No results found!")
else:
print("> Please provide a valid option. ")
# This searches the item table for a key word
def recordSearch():
userSearchQuery = input("> Search for a term: ")
dbCursor.execute(
"SELECT * FROM item WHERE Description LIKE '%" + userSearchQuery + "%'"
)
resultsParse()
# This lists all items in the item table
def listAll():
orderInput = input("> Order items ITEM, BIN, ROW, DESCRIPTION, CREATEDATE, CREATEUSER, UPDATEDATE, UPDATEUSER, or STATUS? ")
orderInput = orderInput.upper()
if orderInput == "ITEM":
dbCursor.execute("SELECT * FROM item ORDER BY ItemId")
resultsParse()
elif orderInput == "BIN":
dbCursor.execute("SELECT * FROM item ORDER BY BinId")
resultsParse()
elif orderInput == "ROW":
dbCursor.execute("SELECT * FROM Item ORDER BY RowId")
resultsParse()
elif orderInput == "DESCRIPTION":
dbCursor.execute("SELECT * FROM item ORDER BY Description")
resultsParse()
elif orderInput == "CREATEDATE":
dbCursor.execute("SELECT * FROM item ORDER BY CreateDate DESC")
resultsParse()
elif orderInput == "CREATEUSER":
dbCursor.execute("SELECT * FROM item ORDER BY CreateUser")
resultsParse()
elif orderInput == "UPDATEDATE":
dbCursor.execute("SELECT * FROM item ORDER BY UpdateDate DESC")
resultsParse()
elif orderInput == "UPDATEUSER":
dbCursor.execute("SELECT * FROM item ORDER BY UpdateUser")
resultsParse()
elif orderInput == "STATUS":
dbCursor.execute("SELECT * FROM item ORDER BY Status")
resultsParse()
else:
print("> Please provide a valid option. ")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-13 20:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0012_question_multiple_instances'),
]
operations = [
migrations.AddField(
model_name='question',
name='name',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
]
|
print('MICHELL\'S TINY ADVENTURE 2!')
print('upstairs or into Kitchen?')
usr = str(input('>'))
while True:
if usr == 'Kitchen' or usr == 'back' or usr == 'downstairs':
print('refrigerator or go "back"')
usr = str(input('>'))
print('use if 1 = ', usr)
if usr == 'refrigerator':
print('eat some food ("Yes or No?")')
us = str(input('>'))
if us == 'yes':
print('The food is slimy and foul, You have died.')
break
else:
print('You die because of starvation.game over')
break
else:
if usr == 'upstairs':
print('Go bedroom or bathroom or go back downstairs')
usr = str(input('>'))
print(usr)
|
from termcolor import colored
papelaria = (
"Lapis", 1.70,
"Borracha", 0.5,
"Estojo", 20.50,
"Esquadro Metalico", 3.70
)
#print(f"\033[1;32m{'LISTA DE PREÇOS':=^44}\033[m")
print(f"\033[1;32m{'LISTA DE PREÇOS':=^44}")
for i in range(int(len(papelaria)/2)):
item = papelaria[2*i]
preco = papelaria[2*i+1]
print(f"| \033[33m{item:.<30} R${preco:7.2f}", end=" \033[32m|\n")
print(colored("="*44, "green"))
|
import numpy as np
x = np.array([-1.0, 1.0, 2.0])
print("x 값",x)
# 넘파일 배열에 부등호 연산을 수행하면 bool 배열이 생성
y = x > 0
print("y 값",y)
y = y.astype(np.int)
print("int 형으로 변환",y)
|
import os
import json
import python_http_client
host = "https://api.sendgrid.com"
api_key = os.environ.get('SENDGRID_API_KEY')
request_headers = {
"Authorization": 'Bearer {0}'.format(api_key)
}
version = 3 # we could also use client.version(3)
client = python_http_client.Client(host=host,
request_headers=request_headers,
version=version)
# GET collection
response = client.api_keys.get()
print(response.status_code)
print(response.headers)
print(response.body)
# POST
data = {
"name": "My API Key",
"scopes": [
"mail.send",
"alerts.create",
"alerts.read"
]
}
response = client.api_keys.post(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
json_response = json.loads(response.body)
api_key_id = json_response['api_key_id']
# GET single
response = client.api_keys._(api_key_id).get()
print(response.status_code)
print(response.headers)
print(response.body)
# PATCH
data = {
"name": "A New Hope"
}
response = client.api_keys._(api_key_id).patch(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
# PUT
data = {
"name": "A New Hope",
"scopes": [
"user.profile.read",
"user.profile.update"
]
}
response = client.api_keys._(api_key_id).put(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
# DELETE
response = client.api_keys._(api_key_id).delete()
print(response.status_code)
print(response.headers) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.