content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
""" Inspection utilities. """ from typing import Optional import numpy as np import tensorflow as tf # type: ignore from matplotlib import cm # type: ignore from PIL import Image # type: ignore from ._image import preprocess_image, Preprocessing from ._typing import NDUInt8Array, NDFloat32Array def make_grad_cam_heatmap( preprocessed_image: NDFloat32Array, model: tf.keras.Model, last_conv_layer_name: str, classification_linear_layer_name: str, prediction_index: Optional[int] = None ) -> NDFloat32Array: """ References: https://keras.io/examples/vision/grad_cam/ """ if len(preprocessed_image.shape) != 3: raise ValueError( "Input preprocessed image array must have 3 dimensions." ) grad_model = tf.keras.models.Model( model.inputs, [model.get_layer(last_conv_layer_name).output, model.get_layer(classification_linear_layer_name).output] ) with tf.GradientTape() as tape: last_conv_layer_output, output = grad_model( np.expand_dims(preprocessed_image, 0) ) if prediction_index is None: prediction_index = tf.argmax(output[0]) class_channel = output[:, prediction_index] grads = tape.gradient(class_channel, last_conv_layer_output) # Shape: (num_channels,). pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2)) # Shape of last_conv_layer_output: (1, h, w, num_channels). # Shape of heatmap: (h, w, 1). tf_heatmap = last_conv_layer_output[0] @ pooled_grads[..., tf.newaxis] tf_heatmap = tf.squeeze(tf_heatmap) # Normalise to [0.0, 1.0]. tf_heatmap = tf.maximum(tf_heatmap, 0.0) / tf.reduce_max(tf_heatmap) heatmap: NDFloat32Array = tf_heatmap.numpy() return heatmap def _resize( image: NDUInt8Array, target_height: int, target_width: int ) -> NDUInt8Array: pil_image = tf.keras.preprocessing.image.array_to_img(image) pil_image = pil_image.resize((target_width, target_height)) return np.array(pil_image) def save_grad_cam( pil_image: Image, heatmap: NDFloat32Array, grad_cam_path: str, target_height: int, target_width: int, alpha: float ) -> None: """ References: https://keras.io/examples/vision/grad_cam/ """ # Rescale heatmap to a range 0-255. scaled_heatmap = np.uint8(255 * heatmap) # Use jet colormap to colorize heatmap. jet = cm.get_cmap("jet") # Use RGB values of the colormap. # See: https://matplotlib.org/stable/api/_as_gen/matplotlib.colors.Colormap.html#matplotlib.colors.Colormap jet_colors = jet(np.arange(256), bytes=True)[:, :3] jet_heatmap = jet_colors[scaled_heatmap] # Superimpose the heatmap on the input image after resizing. jet_heatmap = _resize(jet_heatmap, target_height, target_width) pil_image = pil_image.resize((target_width, target_height)) superimposed_image = jet_heatmap * alpha + np.array(pil_image) pil_superimposed_image = tf.keras.preprocessing.image.array_to_img( superimposed_image ) # Save the superimposed image. pil_superimposed_image.save(grad_cam_path) def make_and_save_nsfw_grad_cam( pil_image: Image, preprocessing: Preprocessing, open_nsfw_model: tf.keras.Model, grad_cam_path: str, grad_cam_height: int, grad_cam_width: int, alpha: float ) -> None: heatmap = make_grad_cam_heatmap( preprocess_image(pil_image, preprocessing), open_nsfw_model, "activation_stage3_block2", "fc_nsfw", 1 ) save_grad_cam( pil_image, heatmap, grad_cam_path, grad_cam_height, grad_cam_width, alpha )
nilq/baby-python
python
#! /usr/bin/env python # coding=utf-8 try: from setuptools import setup except ImportError: from distutils.core import setup with open('README.rst') as readme_file: readme = readme_file.read() with open('CHANGELOG.rst') as history_file: history = history_file.read().replace('.. :changelog:', '') requirements = [ ] test_requirements = [ # TODO: put package test requirements here ] setup( name='python-nvd3', version='0.14.2', description="Python NVD3 - Chart Library for d3.js", long_description=readme + '\n\n' + history, keywords='plot, graph, nvd3, d3', author='Belaid Arezqui', author_email='areski@gmail.com', url='http://github.com/areski/python-nvd3', license="MIT", py_modules=['nvd3'], namespace_packages=[], test_suite='tests', packages=[ 'nvd3', ], include_package_data=True, zip_safe=False, install_requires=[ 'python-slugify>=1.2.5', 'Jinja2>=2.8' # -*- Extra requirements: -*- ], entry_points={ 'console_scripts': [ 'nvd3 = nvd3.NVD3Chart:_main', ], }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Multimedia :: Graphics :: Presentation', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Useful functions to work with dictionaries. """ def deep_get(d, *keys, default=None): """ Recursive safe search in a dictionary of dictionaries. Args: d: the dictionary to work with *keys: the list of keys to work with default: the default value to return if the recursive search did not succeed Returns: The value wich was found recursively in d, or default if the search did not succeed Example: >>> d = {"user": {"id": 1, "login": "foo"}, "date": "2016-04-27"} >>> deep_get(d, "user", "login") "foo" >>> deep_get(d, "user") {"id": 1, "login": "foo"} >>> deep_get(d, "user", "name") None >>> deep_get(d, "user", "name", default="bar") "bar" """ for key in keys: try: d = d[key] except (KeyError, IndexError, TypeError): return default return d
nilq/baby-python
python
""" PyRetroPrint emulates Epson ESC/P printers, IBM Proprinters, and Atari 8-series """ __all__ = ["pyretroprint", "page", "epsonfx", "ibm"]
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Fri Jun 8 19:20:13 2018 @author: kejintao input information: 1. demand patterns (on minutes) 2. demand databases 3. drivers' working schedule (online/offline time) ** All the inputs are obtained from env, thus we do not need to alter parameters here """ from path import * import pickle class SimulatorPattern(object): def __init__(self, **kwargs): # read parameters self.simulator_mode = kwargs.pop('simulator_mode', 'simulator_mode') self.request_file_name = kwargs['request_file_name'] self.driver_file_name = kwargs['driver_file_name'] if self.simulator_mode == 'simulator_mode': self.request_all = pickle.load(open(load_path + self.request_file_name + '.pickle', 'rb')) self.driver_info = pickle.load(open(load_path + self.driver_file_name + '.pickle', 'rb'))
nilq/baby-python
python
from unittest import TestCase from catalog import Catalog class TestCatalog(TestCase): def setUp(self): class TestNum(Catalog): _attrs = 'value', 'label', 'other' red = 1, 'Red', 'stuff' blue = 2, 'Blue', 'things' self.TestNum = TestNum def test_access_attrs(self): self.assertEqual(self.TestNum.red.name, 'red') self.assertEqual(self.TestNum.red.value, 1) self.assertEqual(self.TestNum.red.label, 'Red') self.assertEqual(self.TestNum.red.other, 'stuff') def test_access_by_attrs(self): self.assertEqual(self.TestNum(2), self.TestNum.blue) self.assertEqual(self.TestNum('blue', 'name'), self.TestNum.blue) self.assertEqual(self.TestNum(2, 'value'), self.TestNum.blue) self.assertEqual(self.TestNum('Blue', 'label'), self.TestNum.blue) self.assertEqual(self.TestNum('things', 'other'), self.TestNum.blue) def test_set_single_value(self): class TestNum(Catalog): red = 1 blue = 2 self.assertEqual(TestNum.red.value, 1) self.assertEqual(TestNum(2), TestNum.blue) def test_wrong_length_of_values(self): class TestNum(Catalog): _attrs = 'value', 'label', 'other' red = 1, 'Red' blue = 2, 'Blue', 'things', 'more' self.assertIsNone(TestNum.red.other) def test_data_model(self): self.assertEqual(len(self.TestNum), 2) self.assertTrue(self.TestNum.red in self.TestNum) self.assertSequenceEqual(list(self.TestNum), [self.TestNum.red, self.TestNum.blue]) self.assertSequenceEqual(list(reversed(self.TestNum)), [self.TestNum.blue, self.TestNum.red]) with self.assertRaises(AttributeError): del self.TestNum.red def test_zip(self): values = self.TestNum._zip() self.assertSequenceEqual( list(values), (('red', 1, 'Red', 'stuff'), ('blue', 2, 'Blue', 'things'))) def test_zip_w_list(self): values = self.TestNum._zip('label', 'value') self.assertSequenceEqual(list(values), (('Red', 1), ('Blue', 2)))
nilq/baby-python
python
import unittest2 as unittest import urllib2 from AccessControl import Unauthorized from plone.app.testing import TEST_USER_ID from plone.app.testing import setRoles from zope.component import getUtility from plone.registry.interfaces import IRegistry from collective.flattr.interfaces import ICollectiveFlattr from mocker import Mocker from Products.statusmessages.interfaces import IStatusMessage from collective.flattr.tests.mocks import MockOpener from collective.flattr.tests.base import COLLECTIVE_FLATTR_INTEGRATION_TESTING class as_manager(object): def __init__(self, portal): self.portal = portal def __enter__(self): setRoles(self.portal, TEST_USER_ID, ('Manager',)) return self.portal.restrictedTraverse('@@collective_flattr') def __exit__(self, type, value, traceback): setRoles(self.portal, TEST_USER_ID, ('Member',)) class TestFlattrView(unittest.TestCase): layer = COLLECTIVE_FLATTR_INTEGRATION_TESTING def setUp(self): self.portal = self.layer['portal'] setRoles(self.portal, TEST_USER_ID, ('Member',)) def test_permissions(self): # only cmf.ManagePortal has access! error = False try: self.portal.restrictedTraverse('@@collective_flattr') except Unauthorized: error = True self.assertTrue(error) def test_access_token_url(self): with as_manager(self.portal) as view: ret = view.access_token_url self.assertEquals(ret, u'https://flattr.com/oauth/token') def test_authorize_url(self): with as_manager(self.portal) as view: ret = view.authorize_url self.assertEquals(ret, u'https://flattr.com/oauth/authorize') def test_registry(self): with as_manager(self.portal) as view: ret = view.registry self.assertEquals(ret.__dict__, getUtility(IRegistry).forInterface(ICollectiveFlattr).\ __dict__) def test_access_token_empty(self): with as_manager(self.portal) as view: ret = view.access_token self.failUnless(ret is None) def test_access_token(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.access_token = u'8843d7f92416211de9ebb963ff4ce28125932878' reg.access_token_type = u'Bearer' with as_manager(self.portal) as view: ret = view.access_token self.assertTrue(isinstance(ret, dict)) self.assertEquals(ret['Authorization'], u'Bearer 8843d7f92416211de9ebb963ff4ce28125932878') def test_consumer_empty(self): with as_manager(self.portal) as view: ret = view.consumer self.failUnless(ret is None) def test_consumer(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.customer_key = u'mycustomer' reg.customer_secret = u'mysecret' with as_manager(self.portal) as view: ret = view.consumer self.assertTrue(isinstance(ret, dict)) self.assertEquals(ret['key'], u'mycustomer') self.assertEquals(ret['secret'], u'mysecret') def test_setAccessToken(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) with as_manager(self.portal) as view: view._setAccessToken(u'a', u'bearer') self.assertEquals(reg.access_token, u'a') self.assertEquals(reg.access_token_type, u'Bearer') view._setAccessToken(u'c', u'bearer') self.assertEquals(reg.access_token, u'c') self.assertEquals(reg.access_token_type, u'Bearer') def test_setAccessToken_no_unicode(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) with as_manager(self.portal) as view: view._setAccessToken('a', 'bearer') self.assertEquals(reg.access_token, u'a') self.assertEquals(reg.access_token_type, u'Bearer') view._setAccessToken('c', 'bearer') self.assertEquals(reg.access_token, u'c') self.assertEquals(reg.access_token_type, u'Bearer') def test_getAccessToken_no_customer(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) with as_manager(self.portal) as view: reg.customer_key = u'customer' ret = view.getAccessToken(1234) self.assertEquals(ret['error'], u'no_customer') self.assertEquals(ret['error_description'], u'no customer_key or customer_secret configured') reg.customer_key = u'' reg.customer_secret = u'secret' self.assertEquals(ret['error'], u'no_customer') self.assertEquals(ret['error_description'], u'no customer_key or customer_secret configured') reg.customer_key = u'' reg.customer_secret = u'' self.assertEquals(ret['error'], u'no_customer') self.assertEquals(ret['error_description'], u'no customer_key or customer_secret configured') def test_getAccessToken_token_configured(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.customer_key = u'customer' reg.customer_secret = u'secret' with as_manager(self.portal) as view: reg.access_token = u'token' reg.access_token_type = u'Bearer' ret = view.getAccessToken(1234) self.assertEquals(ret['error'], u'token_configured') self.assertEquals(ret['error_description'], u'access token already configured') def test_getAccessToken(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.customer_key = u'customer' reg.customer_secret = u'secret' with as_manager(self.portal) as view: mocker = Mocker() obj = mocker.patch(view) obj.opener mocker.result(MockOpener('{"access_token":"NEW_ACCESS_TOKEN","token_type":"bearer"}', verify_data=lambda x: x.get_full_url()==u'https://flattr.com/oauth/token' and x.data=='{"redirect_uri": "http://nohost/plone/collective_flattr", "code": 1234, "grant_type": "authorization_code"}' and x.headers=={'Content-type': 'application/json'})) obj.opener mocker.result(MockOpener('{"error":"invalid_request","error_description":"error desc"}', error=True, verify_data=lambda x: x.get_full_url()==u'https://flattr.com/oauth/token' and x.data=='{"redirect_uri": "http://nohost/plone/collective_flattr", "code": 1234, "grant_type": "authorization_code"}' and x.headers=={'Content-type': 'application/json'} )) with mocker: ret = view.getAccessToken(1234) self.failUnless(u'error' not in ret) self.failUnless(u'error_description' not in ret) self.failUnless(u'access_token' in ret) self.failUnless(u'token_type' in ret) self.assertEquals(ret['access_token'], u'NEW_ACCESS_TOKEN') self.assertEquals(ret['token_type'], u'bearer') # second call get an inner status of != 200 and # will return None ret = view.getAccessToken(1234) self.failUnless(u'error' in ret) self.failUnless(u'error_description' in ret) self.failUnless(u'access_token' not in ret) self.failUnless(u'token_type' not in ret) self.assertEquals(ret['error'], u'invalid_request') self.assertEquals(ret['error_description'], u'error desc') def test_opener(self): from collective.flattr.browser.flattr import Flattr view = Flattr(self.portal, self.layer['request']) ret = view.opener self.assertTrue(isinstance(ret, urllib2.OpenerDirector)) def test_opener_authorization(self): from collective.flattr.browser.flattr import Flattr reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.access_token = u'TOKEN' reg.access_token_type = u'Bearer' view = Flattr(self.portal, self.layer['request']) ret = view.opener self.assertTrue(isinstance(ret, urllib2.OpenerDirector)) self.assertEquals(ret.addheaders, [('Authorization', 'Bearer TOKEN')]) def test_opener_base_auth(self): from collective.flattr.browser.flattr import Flattr reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.access_token = u'' reg.access_token_type = u'' reg.customer_key = u'USER' reg.customer_secret = u'PASS' view = Flattr(self.portal, self.layer['request']) ret = view.opener self.assertTrue(isinstance(ret, urllib2.OpenerDirector)) self.assertEquals(ret.addheaders, [('Authorization', 'Basic VVNFUjpQQVNT')]) def test_getLanguages(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('[{"id": "de_DE", "text": "German"}, {"id": "en_US", "text": "English"}]')) with mocker: ret = view.getLanguages() self.failUnless(isinstance(ret, list)) self.assertEquals(len(ret), 2) self.assertEquals(ret[0], {'id': u'de_DE', 'text': u'German'}) self.assertEquals(ret[1], {'id': u'en_US', 'text': u'English'}) def test_getLanguages_HTTPError(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('[{"id": "de_DE", "text": "German"}, {"id": "en_US", "text": "English"}]', error=True)) with mocker: ret = view.getLanguages() self.failUnless(isinstance(ret, list)) self.assertEquals(len(ret), 0) def test_getCategories(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('[{"id": "text", "text": "Text"}, {"id": "images", "text": "Images"}]')) with mocker: ret = view.getCategories() self.failUnless(isinstance(ret, list)) self.assertEquals(len(ret), 2) self.assertEquals(ret[0], {'id': u'text', 'text': u'Text'}) self.assertEquals(ret[1], {'id': u'images', 'text': u'Images'}) def test_getCategories_HTTPError(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('[{"id": "text", "text": "Text"}, {"id": "images", "text": "Images"}]', error=True)) with mocker: ret = view.getCategories() self.failUnless(isinstance(ret, list)) self.assertEquals(len(ret), 0) def test_getParams(self): from collective.flattr.browser.flattr import Flattr view = Flattr(self.portal, self.layer['request']) ret = view._getParams(u'Hello') self.assertEquals(ret, 'title=Hello&hidden=False') ret = view._getParams(u'Hello', url=u'http://localhost/', description='desc', category='cat', language='de_DE', tags='a,b', patch='patch', hidden=True) self.assertEquals(ret, 'title=Hello&hidden=True&url=http%3A%2F%2Flocalhost%2F&description=desc&category=cat&language=de_DE&tags=a%2Cb&_method=patch') def test_createThing(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('{ "id": 431547, "link": "https://api.flattr.dev/rest/v2/things/431547", "message": "ok", "description": "Thing was created successfully" }', verify_data = lambda x: x.get_data() == 'title=Hello&hidden=True&url=http%3A%2F%2Flocalhost%2F&description=desc&category=cat&language=de_DE&tags=a%2Cb')) with mocker: ret = view.createThing(u'Hello', url=u'http://localhost/', description='desc', category='cat', language='de_DE', tags='a,b', hidden=True) self.assertEquals(ret, {'id': 431547, 'link': u'https://api.flattr.dev/rest/v2/things/431547', 'message': u'ok', 'description': u'Thing was created successfully' }) def test_createThing_wrong_data(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('{ "id": 431547, "link": "https://api.flattr.dev/rest/v2/things/431547", "message": "ok", "description": "Thing was created successfully" }', verify_data = lambda x: x.get_data() == 'title=Hello&hidden=True&url=http%3A%2F%2Flocalhost%2F&description=desc&category=cat&language=de_DE&tags=a%2Cb')) with mocker: ret = False try: view.createThing(u'Hello', url=u'http://localhost/', description='desc', category='cat', language='en_DE', tags='a,b', hidden=True) except ValueError: ret = True self.assertTrue(ret) def test_createThing_HTTPError(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('{ "id": 431547, "link": "https://api.flattr.dev/rest/v2/things/431547", "message": "ok", "description": "Thing was created successfully" }', verify_data = lambda x: x.get_data() == 'title=Hello&hidden=True&url=http%3A%2F%2Flocalhost%2F&description=desc&category=cat&language=de_DE&tags=a%2Cb', error=True)) with mocker: ret = view.createThing(u'Hello', url=u'http://localhost/', description='desc', category='cat', language='de_DE', tags='a,b', hidden=True) self.assertEquals(ret, {}) def test_updateThing(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('{ "message": "ok", "description": "Thing was updated successfully" }', verify_data = lambda x: x.get_full_url().endswith('431547') and x.get_data() == 'title=Hello&hidden=True&description=desc&category=cat&language=de_DE&tags=a%2Cb&_method=patch')) with mocker: ret = view.updateThing(u'Hello', 431547, description='desc', category='cat', language='de_DE', tags='a,b', hidden=True) self.assertEquals(ret, {'message': u'ok', 'description': u'Thing was updated successfully' }) def test_updateThing_HTTPError(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('{ "message": "ok", "description": "Thing was updated successfully" }', verify_data = lambda x: x.get_full_url().endswith('431547') and x.get_data() == 'title=Hello&hidden=True&description=desc&category=cat&language=de_DE&tags=a%2Cb&_method=patch', error=True)) with mocker: ret = view.updateThing(u'Hello', 431547, description='desc', category='cat', language='de_DE', tags='a,b', hidden=True) self.assertEquals(ret, {}) def test_getThing(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) def test_func(x): return 'count=30&page=' in x.get_data() obj = mocker.patch(view) obj.opener mocker.result(MockOpener('[ { "type": "thing", "resource": "https://api.flattr.dev/rest/v2/things/1", "link": "https://flattr.dev/thing/1", "id": 1 }, { "type": "thing", "resource": "https://api.flattr.dev/rest/v2/things/2", "link": "https://flattr.dev/thing/2", "id": 2} ]', verify_data=test_func)) obj.opener mocker.result(MockOpener('', verify_data=test_func, error=True)) with mocker: ret = view.getThings() self.failUnless(u'data' in ret) self.failUnless(u'next_page' in ret) self.assertFalse(ret['next_page']) self.assertEquals(ret['data'][0], { "type": "thing", "resource": "https://api.flattr.dev/rest/v2/things/1", "link": "https://flattr.dev/thing/1", "id": 1 }) self.assertEquals(ret['data'][1], { "type": "thing", "resource": "https://api.flattr.dev/rest/v2/things/2", "link": "https://flattr.dev/thing/2", "id": 2}) def test_getThing_with_next(self): from collective.flattr.browser.flattr import Flattr mocker = Mocker() view = Flattr(self.portal, self.layer['request']) obj = mocker.patch(view) obj.opener mocker.result(MockOpener('[ { "type": "thing", "resource": "https://api.flattr.dev/rest/v2/things/1", "link": "https://flattr.dev/thing/1", "id": 1 }, { "type": "thing", "resource": "https://api.flattr.dev/rest/v2/things/2", "link": "https://flattr.dev/thing/2", "id": 2} ]', verify_data=lambda x: 'count=30&page=' in x.get_data())) mocker.count(2) # if the same thing is called twice, it is called for the first page # and again for the second page. So there is a result, what means that # there is a next page with mocker: ret = view.getThings() self.failUnless(u'data' in ret) self.failUnless(u'next_page' in ret) self.assertTrue(ret['next_page']) self.assertEquals(ret['data'][0], { "type": "thing", "resource": "https://api.flattr.dev/rest/v2/things/1", "link": "https://flattr.dev/thing/1", "id": 1 }) self.assertEquals(ret['data'][1], { "type": "thing", "resource": "https://api.flattr.dev/rest/v2/things/2", "link": "https://flattr.dev/thing/2", "id": 2}) class TestFlattrViewCall(unittest.TestCase): layer = COLLECTIVE_FLATTR_INTEGRATION_TESTING def setUp(self): self.portal = self.layer['portal'] setRoles(self.portal, TEST_USER_ID, ('Member',)) def test_call_access_denied(self): with as_manager(self.portal) as view: from collective.flattr.browser.flattr import Flattr view = Flattr(self.portal, self.layer['request']) ret = view() self.layer['request']['error'] = u'access_denied' self.layer['request']['error_description'] = u'error description' ret = view() self.assertEquals(self.layer['request'].response\ .headers['location'], 'http://nohost/plone') ret = IStatusMessage(self.layer['request'])\ .showStatusMessages()[0] self.assertEquals(ret.message, u'access_denied: error description') self.assertEquals(ret.type, u'error') def test_call_invalid_request(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.access_token = u'' mocker = Mocker() func = mocker.replace('collective.flattr.browser.flattr.Flattr.getAccessToken') func(u'un8Vzv7pNMXNuAQY3uRgjYfM4V3Feirz') mocker.result({'error': u'invalid_request', 'error_description': u'error desc'}) with as_manager(self.portal) as view: ## need the real class here, not the wrapped one, to get mocker ## working from collective.flattr.browser.flattr import Flattr with mocker: view = Flattr(self.portal, self.layer['request']) self.layer['request']['code'] = u'un8Vzv7pNMXNuAQY3uRgjYfM4V3Feirz' ret = view() self.assertEquals(self.layer['request'].response\ .headers['location'], 'http://nohost/plone') ret = IStatusMessage(self.layer['request'])\ .showStatusMessages()[0] self.assertEquals(ret.message, u'invalid_request: error desc') self.assertEquals(ret.type, u'error') def test_call_valid(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.access_token = u'' mocker = Mocker() func = mocker.replace('collective.flattr.browser.flattr.Flattr.getAccessToken') func(u'un8Vzv7pNMXNuAQY3uRgjYfM4V3Feirz') mocker.result({'access_token': u'NEW_ACCESS_TOKEN', 'token_type': u'bearer'}) with as_manager(self.portal) as view: ## need the real class here, not the wrapped one, to get mocker ## working from collective.flattr.browser.flattr import Flattr with mocker: self.layer['request']['code'] = u'un8Vzv7pNMXNuAQY3uRgjYfM4V3Feirz' view = Flattr(self.portal, self.layer['request']) ret = view() self.assertEquals(reg.access_token, u'NEW_ACCESS_TOKEN') self.assertEquals(self.layer['request'].response\ .headers['location'], 'http://nohost/plone') ret = IStatusMessage(self.layer['request'])\ .showStatusMessages()[0] self.assertEquals(ret.message, u'collective.flattr successfully configured') self.assertEquals(ret.type, u'info') def test_call_no_unicode_and_error(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.access_token = u'' with as_manager(self.portal) as view: from collective.flattr.browser.flattr import Flattr self.layer['request']['code'] = 'un8Vzv7pNMXNuAQY3uRgjYfM4V3Feirz' self.layer['request']['error'] = 'test' self.layer['request']['error_description'] = 'test error' view = Flattr(self.portal, self.layer['request']) ret = view() self.assertEquals(reg.access_token, u'') self.assertEquals(self.layer['request'].response\ .headers['location'], 'http://nohost/plone') ret = IStatusMessage(self.layer['request'])\ .showStatusMessages() self.assertEquals(ret[0].message, u'test: test error') self.assertEquals(ret[0].type, u'error') def test_call_no_unicode_and_no_error_desc(self): reg = getUtility(IRegistry).forInterface(ICollectiveFlattr) reg.access_token = u'' mocker = Mocker() func = mocker.replace('collective.flattr.browser.flattr.Flattr.getAccessToken') func(u'un8Vzv7pNMXNuAQY3uRgjYfM4V3Feirz') mocker.result({'access_token': u'NEW_ACCESS_TOKEN', 'token_type': u'bearer', 'error': u'blubber'}) with as_manager(self.portal) as view: from collective.flattr.browser.flattr import Flattr with mocker: self.layer['request']['code'] = 'un8Vzv7pNMXNuAQY3uRgjYfM4V3Feirz' view = Flattr(self.portal, self.layer['request']) ret = view() self.assertEquals(reg.access_token, u'') self.assertEquals(self.layer['request'].response\ .headers['location'], 'http://nohost/plone') ret = IStatusMessage(self.layer['request'])\ .showStatusMessages() self.assertEquals(ret[0].message, u'undefined: Undefined error while getting access token') self.assertEquals(ret[0].type, u'error')
nilq/baby-python
python
""" This is about the prediction of alpha using the conditional input output pair of parameters and outcome """ import os import argparse import numpy as np import pandas as pd import seaborn as sns from collections import Counter import logging from annotator.annot import Annotator from commons import ENDPOINT from experiments.alpha_analysis import shorten_uri from experiments.alpha_eval_one import get_classes_fnames import matplotlib.pyplot as plt def add_alpha_per_file(df_alphas): """ Add mid alpha between from_alpha and to_alpha for each file :param df_alphas: :return: """ alphas = [] for idx, row in df_alphas.iterrows(): if row['from_alpha'] >= 0 and row['to_alpha'] >= 0: a = (row['from_alpha'] + row['to_alpha']) * 0.5 else: a = -1 alphas.append(a) df_alphas.insert(5, 'alpha', alphas) def annotate_column(fpath, col_id, title_case): """ Get the annotator which includes the annotations :param fpath: :param col_id: :param title_case: :return: """ annotator = Annotator(endpoint=ENDPOINT, title_case=title_case, num_of_threads=3, logger=None, class_prefs=["http://dbpedia.org/ontology/", "http://www.w3.org/2002/07/owl#Thing"]) annotator.annotate_table(file_dir=fpath, subject_col_id=col_id) return annotator def predict_class(annotator, fsid, alpha): """ Returns the candidates using a given alpha and fsid :param annotator: :param fsid: :param alpha: :return: """ annotator.compute_f(alpha) candidates = annotator.get_top_k(fsid=fsid) return candidates def compute_file_acc(row, alphas_classes, data_path, correct_class_uri, title_case, alpha_voting="max"): annotator = annotate_column(os.path.join(data_path, row['fname']), row['colid'], title_case) acc = dict() for fsid in range(1, 6): acc[fsid] = { 'mean': -1, 'median': -1 } for a_attr in ['mean', 'median']: if fsid in alphas_classes and correct_class_uri in alphas_classes[fsid]: if alphas_classes[fsid][correct_class_uri][a_attr] == -1: acc[fsid][a_attr] = -1 print("compute_file_acc> set accuracy to -1 for %s with fsid %d attr %s" % (row.fname, fsid, a_attr)) continue candidate_alpha = -1 candidate_class = None for class_uri in alphas_classes[fsid]: alpha = alphas_classes[fsid][class_uri][a_attr] candidates = predict_class(annotator, fsid, alpha) if candidates == []: print("No candidates") continue pred_class = candidates[0] if pred_class == class_uri: if alpha_voting == "max": if candidate_alpha < alpha: if candidate_alpha >= 0: print("compute_file_acc> Prediction of %s colid %d (fsid %d)" % (row['fname'], row['colid'], fsid)) print("\tSwitch max <%s, %f> to <%s, %f>" % (candidate_class, candidate_alpha, pred_class, alpha)) candidate_alpha = alpha candidate_class = class_uri elif alpha_voting == "min": if candidate_alpha > -1: if candidate_alpha > alpha: print("compute_file_acc> Prediction of %s colid %d (fsid %d)" % ( row['fname'], row['colid'], fsid)) print("\tSwitch min <%s, %f> to <%s, %f>" % (candidate_class, candidate_alpha, pred_class, alpha)) candidate_alpha = alpha candidate_class = class_uri else: candidate_alpha = alpha candidate_class = class_uri else: raise Exception("unknown alpha voting method") if candidate_class == correct_class_uri: res = 1 else: res = 0 print("Invalid candidate: fsid: %d - class: %s (correct: %s)- alpha: %f - a_attr: %s - fname: %s" % (fsid, candidate_class, correct_class_uri, alpha, a_attr, row['fname'])) acc[fsid][a_attr] = res return acc def get_file_acc(row, class_files_alpha, alphas_classes, class_uri, title_case, data_path, alpha_voting): old = dict() for fsid in range(1, 6): old[fsid] = dict() if fsid in alphas_classes and class_uri in alphas_classes[fsid]: old[fsid][class_uri] = alphas_classes[fsid][class_uri].copy() # Just to verify alphas_classes[fsid][class_uri] = None if fsid in class_files_alpha and row.fname in class_files_alpha[fsid] and row.colid in class_files_alpha[fsid][row.fname]: alphas_classes[fsid][class_uri] = class_files_alpha[fsid][row.fname][row.colid].copy() else: alphas_classes[fsid][class_uri] = {'mean': -1, 'median': -1} acc = compute_file_acc(row=row, alphas_classes=alphas_classes, data_path=data_path, correct_class_uri=class_uri, title_case=title_case, alpha_voting=alpha_voting) for fsid in range(1, 6): if fsid in old and class_uri in old[fsid]: alphas_classes[fsid][class_uri] = old[fsid][class_uri] return acc def get_class_files_alphas(df_class): """ Compute the mean and media alphas to be used for each file using one out. :param df_class: :return: """ alphas = dict() for fsid in range(1, 6): df_class_fsid = df_class[df_class.fsid == fsid] alphas[fsid] = dict() for idx, row in df_class_fsid.iterrows(): if row['alpha'] >= 0: for idx2, row2 in df_class_fsid.iterrows(): if idx == idx2: continue if row['fname'] not in alphas[fsid]: alphas[fsid][row['fname']] = {row['colid']: []} if row2['alpha'] >= 0: alphas[fsid][row['fname']][row['colid']].append(row2['alpha']) for fsid in alphas: for fname in alphas[fsid]: for colid in alphas[fsid][fname]: d = { 'mean': np.mean(alphas[fsid][fname][colid]), 'median': np.median(alphas[fsid][fname][colid]) } alphas[fsid][fname][colid] = d return alphas def get_acc_per_class(df_class, alphas_classes, class_uri, title_case, data_path, alpha_voting): # Get the alpha (mean and median) for file class (using one file out from the same class) for the given rows. class_files_alpha = get_class_files_alphas(df_class) acc = dict() computed_files = dict() for idx, row in df_class.iterrows(): if row['fname'] in computed_files: if row['colid'] in computed_files[row['fname']]: continue file_acc = get_file_acc(row, class_files_alpha, alphas_classes, class_uri, title_case, data_path, alpha_voting) for fsid in file_acc: if fsid not in acc: acc[fsid] = {'mean': [], 'median': []} for a_attr in file_acc[fsid]: if file_acc[fsid][a_attr] >= 0: acc[fsid][a_attr].append(file_acc[fsid][a_attr]) if row['fname'] not in computed_files: computed_files[row['fname']] = dict() computed_files[row['fname']][row['colid']] = True for fsid in acc: for a_attr in acc[fsid]: # # DEBUG # print("\nDEBUG: ") # print(acc[fsid][a_attr]) # in case there is a single file, one file out per class is not applicable if len(acc[fsid][a_attr]) <= 1: acc[fsid][a_attr] = -1 print("get_acc_per_class> Ignoring fsid %d for class %s" % (fsid, class_uri)) continue else: # if -1 in acc[fsid][a_attr]: # raise Exception("Something went wrong") acc[fsid][a_attr] = sum(acc[fsid][a_attr])/len(acc[fsid][a_attr]) return acc def get_accuracy_for_classes(df_alphas, classes_fnames, alphas_classes, title_case, data_path, alpha_voting, debug_class=None): print("%s > debug class: %s" % (__name__, str(debug_class))) acc = dict() for class_uri in classes_fnames: if debug_class: # print("**** Debug class is there") if debug_class not in class_uri: # print("*** Ignore: %s" % class_uri) continue # else: # print("*** Class %s is there" % class_uri) # else: # print("*** No Debug class") # # DEBUG # if 'Airline' not in class_uri: # continue # Get rows with files (with their colid) of the class class_uri t = [tuple(tt) for tt in classes_fnames[class_uri]] df_class = df_alphas[df_alphas[['fname', 'colid']].apply(tuple, axis=1).isin(t)] # Get accuracy of the class_uri acc[class_uri] = get_acc_per_class(df_class, alphas_classes, class_uri, title_case, data_path, alpha_voting) return acc def get_alpha_per_class(df_alphas, classes_fnames): d = dict() for class_uri in classes_fnames: t = [tuple(tt) for tt in classes_fnames[class_uri]] df_class = df_alphas[df_alphas[['fname', 'colid']].apply(tuple, axis=1).isin(t)] for idx, row in df_class.iterrows(): if row['from_alpha'] >= 0 and row['to_alpha'] >= 0: if class_uri not in d: d[class_uri] = {'alphas': []} d[class_uri]['alphas'].append((row['from_alpha'] + row['to_alpha']) * 0.5) to_be_del = [] for class_uri in d: if class_uri in d and len(d[class_uri]['alphas']) > 1: d[class_uri]['mean'] = np.mean(d[class_uri]['alphas']) d[class_uri]['median'] = np.median(d[class_uri]['alphas']) else: to_be_del.append(class_uri) for c in to_be_del: del d[c] return d def get_accuracy(df_alphas, classes_fnames, title_case, data_path, alpha_voting, debug_class=None): alphas_classes = dict() for fsid in range(1, 6): df_alphas_fsid = df_alphas[df_alphas.fsid == fsid] alphas_classes[fsid] = get_alpha_per_class(df_alphas_fsid, classes_fnames) acc = get_accuracy_for_classes(df_alphas, classes_fnames, alphas_classes, title_case, data_path, alpha_voting, debug_class) return acc def workflow(falpha, draw_basename, dataset, fmeta, title_case, data_path, subject_col_fpath, alpha_voting, debug_class=None): print("%s > debug class: %s" % (__name__, str(debug_class))) df_alphas = pd.read_csv(falpha) df_alphas[["colid"]] = df_alphas[["colid"]].apply(pd.to_numeric) add_alpha_per_file(df_alphas) classes_fnames = get_classes_fnames_col_ids(fmeta, dataset, subject_col_fpath=subject_col_fpath) acc = get_accuracy(df_alphas, classes_fnames, title_case, data_path, alpha_voting, debug_class) print_accuracy_per_fsid(acc) if draw_basename: generate_diagram(acc, draw_basename) return acc def print_accuracy_per_fsid(acc): print("|fsid\t|accuracy of mean\t|accuracy of median|") print("|:---:|:---:|:---:|") for fsid in range(1, 6): scores = { 'mean': [], 'median': [] } for class_uri in acc: if fsid not in acc[class_uri]: continue for a_attr in ['mean', 'median']: if acc[class_uri][fsid][a_attr] == -1: continue scores[a_attr].append(acc[class_uri][fsid][a_attr]) # print("%d\t%s\t%s\t\t%f" % (fsid, shorten_uri(class_uri), a_attr, acc[class_uri][fsid][a_attr])) print("%d\t|%f\t|%f" % (fsid, np.mean(scores['mean']), np.mean(scores['median']))) def get_classes_fnames_col_ids(fpath, dataset, ext=".csv", subject_col_fpath=None): d = dict() f = open(fpath) if dataset == "wcv2": with open(subject_col_fpath) as f_subj_col: subj_col_dict = dict() for line in f_subj_col: sline = line.strip() # sline = sline.replace('"', '') if sline == "": continue fn, colid = line.split(',') colid = int(colid) subj_col_dict[fn+".tar.gz"] = colid for line in f.readlines(): sline = line.strip() if sline == "": continue if dataset == "wcv2": fname, _, class_uri = sline.split(',') fname = fname.replace('"', '') print("fname: "+fname) print(subj_col_dict) colid = subj_col_dict[fname] elif dataset == "wcv1": fname, _, class_uri, colid = sline.split(',') fname = fname.split(".")[0] colid = int(colid) else: raise Exception("Unknown dataset") fname = fname.replace('"', '') fname += ext class_uri = class_uri.replace('"', "") if class_uri not in d: d[class_uri] = [] d[class_uri].append([fname, colid]) f.close() return d def generate_diagram(acc, draw_file_base): """ :param acc: acc :param draw_file_base: base of the diagram :return: None """ for fsid in range(1, 6): rows = [] for class_uri in acc: if fsid not in acc[class_uri]: continue for a_attr in ['mean', 'median']: if acc[class_uri][fsid][a_attr] == -1: continue r = [shorten_uri(class_uri), acc[class_uri][fsid][a_attr], a_attr] rows.append(r) data = pd.DataFrame(rows, columns=['Class', 'Accuracy', 'Aggr']) ax = sns.barplot(x="Accuracy", y="Class", hue="Aggr", data=data, linewidth=1.0, # palette="colorblind", # palette="Spectral", # palette="pastel", # palette="ch:start=.2,rot=-.3", # palette="YlOrBr", palette="Paired", orient="h") # ax.legend_.remove() # ax.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0) ax.legend(bbox_to_anchor=(1.0, -0.1), borderaxespad=0) # ax.set_xlim(0, 1.0) # ax.set_ylim(0, 0.7) # Horizontal ticks = ax.get_yticks() new_ticks = [t for t in ticks] texts = ax.get_yticklabels() # print(ax.get_yticklabels()) labels = [t.get_text() for t in texts] ax.set_yticks(new_ticks) ax.set_yticklabels(labels, fontsize=8) # print(ax.get_yticklabels()) draw_fname = draw_file_base+"_fsid%d" % (fsid) plt.setp(ax.lines, color='k') ax.figure.savefig('docs/%s.svg' % draw_fname, bbox_inches="tight") ax.figure.clf() def main(): """ Parse the arguments :return: """ parser = argparse.ArgumentParser(description='Evaluate the accuracy of alpha among all classes (k-fold).') parser.add_argument('--falpha', help="The path to the alpha results file.") parser.add_argument('--fmeta', help="The path to the meta file which contain the filenames and classes.") parser.add_argument('--dataset', choices=['wcv1', 'wcv2'], help="The path to the csv files") parser.add_argument('--draw', default=None, help="The base name for the diagram file (without the extension)") parser.add_argument('--title_case', default="title", choices=["title", "original"], help="Whether title case or not. true or false") parser.add_argument('--data-path', help="The path to the data (csv files)") parser.add_argument('--subject-col', help="The path to the subject column file (only for wcv2)") parser.add_argument('--alpha-voting', default="max", choices=['max', 'min'], help="The voting method to select alpha if there are several candidates") parser.add_argument('--debug-class', default=None, help="The class to be debugged") args = parser.parse_args() if args.falpha and args.fmeta and args.dataset and args.draw and args.data_path: workflow(falpha=args.falpha, draw_basename=args.draw, data_path=args.data_path, subject_col_fpath=args.subject_col, fmeta=args.fmeta, dataset=args.dataset, title_case=(args.title_case.lower() == "title"), alpha_voting=args.alpha_voting, debug_class=args.debug_class) else: parser.print_usage() parser.print_help() if __name__ == "__main__": main()
nilq/baby-python
python
import asyncio import datetime import time from pprint import pprint from typing import List, Optional, Tuple import meadowflow.event_log import meadowflow.jobs import meadowflow.time_event_publisher import pytest import pytz from meadowflow.time_event_publisher import ( Periodic, PointInTime, TimeEventPublisher, TimeOfDay, TimeOfDayPayload, _timedelta_to_str, ) # these need to be tuned to make the tests run fast, but avoid false negatives _TIME_DELAY = 0.1 _TIME_INCREMENT = datetime.timedelta(seconds=1) @pytest.mark.asyncio async def test_call_at(): # this uses the higher level interface (TimeEventPublisher) but mostly tests the low # level functionality of _CallAt and whether it's robust to different # sequences of events # test basic callback functionality async with meadowflow.event_log.EventLog() as event_log, TimeEventPublisher( event_log.append_event ) as p: now = pytz.utc.localize(datetime.datetime.utcnow()) p.create_point_in_time(PointInTime(now)) # called p.create_point_in_time(PointInTime(now - _TIME_INCREMENT)) # called p.create_point_in_time(PointInTime(now + 3 * _TIME_INCREMENT)) # not called await asyncio.sleep(_TIME_DELAY) assert len(event_log._event_log) == 2 now = pytz.utc.localize(datetime.datetime.utcnow()) p.create_point_in_time(PointInTime(now)) # called await asyncio.sleep(_TIME_DELAY) assert len(event_log._event_log) == 3 p.create_point_in_time(PointInTime(now + 3 * _TIME_INCREMENT)) # not called p.create_point_in_time(PointInTime(now - _TIME_INCREMENT)) # called await asyncio.sleep(_TIME_DELAY) assert len(event_log._event_log) == 4 @pytest.mark.asyncio async def test_call_at_callbacks_before_running(): # test adding callbacks before running # TODO this test seems moot now...the publisher is running # from the start, but it doesn't get a chance to schedule callbacks # because nothing is awaited until the sleep. async with meadowflow.event_log.EventLog() as event_log, TimeEventPublisher( event_log.append_event ) as p: now = pytz.utc.localize(datetime.datetime.utcnow()) p.create_point_in_time(PointInTime(now)) # called p.create_point_in_time(PointInTime(now - _TIME_INCREMENT)) # called p.create_point_in_time(PointInTime(now + _TIME_INCREMENT)) # not called assert len(event_log._event_log) == 0 await asyncio.sleep(_TIME_DELAY) assert len(event_log._event_log) == 2 def _dt_to_str(dt: datetime.datetime) -> str: return dt.strftime("%Y-%m-%d-%H-%M-%S-%f-%z-%Z") def _date_to_str(dt: datetime.date) -> str: return dt.strftime("%Y-%m-%d") @pytest.mark.asyncio async def test_time_event_publisher_point_in_time(): """Test TimeEventPublisher.point_in_time_trigger""" async with meadowflow.event_log.EventLog() as event_log, TimeEventPublisher( event_log.append_event ) as p: now = pytz.utc.localize(datetime.datetime.utcnow()) tz_ldn = pytz.timezone("Europe/London") tz_ny = pytz.timezone("America/New_York") tz_la = pytz.timezone("America/Los_Angeles") dts = [ now.astimezone(tz_ny) - _TIME_INCREMENT, now.astimezone(tz_la) + 1.5 * _TIME_INCREMENT, now.astimezone(tz_ldn) + 1.5 * _TIME_INCREMENT, now.astimezone(tz_ldn) + 3 * _TIME_INCREMENT, ] for dt in dts: p.create_point_in_time(PointInTime(dt)) # It's important to compare the results in string format because we care about # what timezone a datetime is in, and datetime equality does not care about the # timezone dt_strings = [_dt_to_str(dt) for dt in dts] t0 = time.time() await asyncio.sleep(_TIME_DELAY) assert 1 == len(event_log._event_log) assert dt_strings[0] == _dt_to_str(event_log._event_log[0].payload) await asyncio.sleep(1.5 * _TIME_INCREMENT.total_seconds() + t0 - time.time()) assert 3 == len(event_log._event_log) # make sure that 2 times with the same point in time but different timezones # create separate events assert 3 == len(event_log._topic_name_to_events) assert set(dt_strings[:3]) == set( _dt_to_str(e.payload) for e in event_log._event_log ) await asyncio.sleep(3 * _TIME_INCREMENT.total_seconds() + t0 - time.time()) assert 4 == len(event_log._event_log) assert set(dt_strings) == set( _dt_to_str(e.payload) for e in event_log._event_log ) pprint(dt_strings) @pytest.mark.asyncio async def test_time_event_publisher_periodic(): """ Test TimeEventPublisher.periodic_trigger. This can take up to 12 seconds in the worst case: 6 seconds to get to the top of a 6 second cycle, and then 6 seconds worth of events. """ async with meadowflow.event_log.EventLog() as event_log, TimeEventPublisher( event_log.append_event, # we're testing 6 seconds worth of time, so we set the schedule_recurring_limit # even shorter than that to test that "rolling over" to the next period works # correctly datetime.timedelta(seconds=4), datetime.timedelta(seconds=2), ) as p: # get us to just after the "top of a 6 second cycle", as that means both the 2s # and 3s periodic triggers will be "at the top of their cycles" await asyncio.sleep(6 - time.time() % 6 + _TIME_DELAY) t0 = time.time() p.create_periodic(Periodic(datetime.timedelta(seconds=1))) p.create_periodic(Periodic(datetime.timedelta(seconds=2))) p.create_periodic(Periodic(datetime.timedelta(seconds=3))) assert 0 == len(event_log._event_log) # these are effectively sleep(1), but this reduces the likelihood that we go out # of sync await asyncio.sleep(max(t0 + 1 - time.time(), 0)) assert 1 == len(event_log._event_log) await asyncio.sleep(max(t0 + 2 - time.time(), 0)) assert 1 + 2 == len(event_log._event_log) await asyncio.sleep(max(t0 + 3 - time.time(), 0)) assert 1 + 2 + 2 == len(event_log._event_log) await asyncio.sleep(max(t0 + 4 - time.time(), 0)) assert 1 + 2 + 2 + 2 == len(event_log._event_log) await asyncio.sleep(max(t0 + 5 - time.time(), 0)) assert 1 + 2 + 2 + 2 + 1 == len(event_log._event_log) await asyncio.sleep(max(t0 + 6 - time.time(), 0)) assert 1 + 2 + 2 + 2 + 1 + 3 == len(event_log._event_log) await asyncio.sleep(max(t0 + 7 - time.time(), 0)) @pytest.mark.asyncio async def test_time_event_publisher_time_of_day(): """Test TimeEventPublisher.time_of_day_trigger""" await _test_time_event_publisher_time_of_day() @pytest.mark.asyncio async def test_time_event_publisher_time_of_day_daylight_savings(): """ Test TimeEventPublisher.time_of_day_trigger in a case where we're crossing a daylight savings boundary. """ # New Zealand daylight savings time ended on 2021-04-04 at 3am, clocks turned # backward 1 hour at that point test_dt = pytz.timezone("Pacific/Auckland").localize( datetime.datetime(2021, 4, 4, 14, 0, 0) ) meadowflow.time_event_publisher._TEST_TIME_OFFSET = ( test_dt.timestamp() - time.time() ) try: await _test_time_event_publisher_time_of_day() finally: meadowflow.time_event_publisher._TEST_TIME_OFFSET = 0 async def _test_time_event_publisher_time_of_day(): async with meadowflow.event_log.EventLog() as event_log, TimeEventPublisher( event_log.append_event ) as p: tz_hi = pytz.timezone("Pacific/Honolulu") tz_nz = pytz.timezone("Pacific/Auckland") now = meadowflow.time_event_publisher._utc_now() now_rounded = datetime.datetime( year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=now.tzinfo, ) + datetime.timedelta(seconds=1) # this should make sure we're very close to now_rounded and possibly a little # bit after it await asyncio.sleep( max( now_rounded.timestamp() - meadowflow.time_event_publisher._time_time(), 0, ) ) day_delta = datetime.timedelta(days=1) now_hi = now_rounded.astimezone(tz_hi) today_hi = now_hi.date() today_dt_hi = tz_hi.localize( datetime.datetime.combine(today_hi, datetime.time()) ) yesterday_dt_hi = tz_hi.localize( datetime.datetime.combine(today_hi - day_delta, datetime.time()) ) tomorrow_dt_hi = tz_hi.localize( datetime.datetime.combine(today_hi + day_delta, datetime.time()) ) now_nz = now_rounded.astimezone(tz_nz) today_nz = now_nz.date() today_dt_nz = tz_nz.localize( datetime.datetime.combine(today_nz, datetime.time()) ) yesterday_dt_nz = tz_nz.localize( datetime.datetime.combine(today_nz - day_delta, datetime.time()) ) tomorrow_dt_nz = tz_nz.localize( datetime.datetime.combine(today_nz + day_delta, datetime.time()) ) expected_payloads: List[Tuple[str, Optional[str], str, str]] = [] def payload_to_strs( payload: TimeOfDayPayload, ) -> Tuple[str, Optional[str], str, str]: return ( _timedelta_to_str(payload.local_time_of_day), payload.time_zone.zone, _date_to_str(payload.date), _dt_to_str(payload.point_in_time), ) def add_trigger_and_payload( # the current time in the local timezone now_local: datetime.datetime, # midnight of the date you want to trigger for in the local timezone date_dt_local: datetime.datetime, # any jitter you want to add time_increment: datetime.timedelta, # the local timezone time_zone: pytz.BaseTzInfo, ): time_of_day = now_local - date_dt_local + time_increment p.create_time_of_day(TimeOfDay(time_of_day, time_zone)) expected_payloads.append( ( _timedelta_to_str(time_of_day), time_zone.zone, _date_to_str(date_dt_local.date()), _dt_to_str(time_zone.normalize(date_dt_local + time_of_day)), ) ) # not called p.create_time_of_day( TimeOfDay(now_hi - today_dt_hi - 3 * _TIME_INCREMENT, tz_hi) ) p.create_time_of_day( TimeOfDay(now_nz - today_dt_nz - 3 * _TIME_INCREMENT, tz_nz) ) add_trigger_and_payload(now_hi, today_dt_hi, _TIME_INCREMENT, tz_hi) # duplicate should be ignored p.create_time_of_day(TimeOfDay(now_hi - today_dt_hi + _TIME_INCREMENT, tz_hi)) add_trigger_and_payload(now_hi, yesterday_dt_hi, _TIME_INCREMENT, tz_hi) add_trigger_and_payload(now_nz, tomorrow_dt_nz, _TIME_INCREMENT, tz_nz) add_trigger_and_payload(now_hi, tomorrow_dt_hi, 2 * _TIME_INCREMENT, tz_hi) add_trigger_and_payload(now_nz, today_dt_nz, 2 * _TIME_INCREMENT, tz_nz) add_trigger_and_payload(now_nz, yesterday_dt_nz, 2 * _TIME_INCREMENT, tz_nz) assert 0 == len(event_log._event_log) await asyncio.sleep(_TIME_INCREMENT.total_seconds() + _TIME_DELAY) assert 3 == len(event_log._event_log) assert set(expected_payloads[:3]) == set( payload_to_strs(e.payload) for e in event_log._event_log ) await asyncio.sleep(_TIME_INCREMENT.total_seconds()) assert 6 == len(event_log._event_log) assert set(expected_payloads) == set( payload_to_strs(e.payload) for e in event_log._event_log ) pprint(expected_payloads)
nilq/baby-python
python
# Copyright (c) OpenMMLab. All rights reserved. import argparse import warnings from typing import Any import mmcv import torch from mmcv import Config, DictAction from mmcv.parallel import MMDataParallel from torch import nn from mmedit.apis import single_gpu_test from mmedit.core.export import ONNXRuntimeEditing from mmedit.datasets import build_dataloader, build_dataset from mmedit.models import BasicRestorer, build_model class TensorRTRestorerGenerator(nn.Module): """Inner class for tensorrt restorer model inference Args: trt_file (str): The path to the tensorrt file. device_id (int): Which device to place the model. """ def __init__(self, trt_file: str, device_id: int): super().__init__() from mmcv.tensorrt import TRTWrapper, load_tensorrt_plugin try: load_tensorrt_plugin() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, \ you may have to build mmcv with TensorRT from source.') model = TRTWrapper( trt_file, input_names=['input'], output_names=['output']) self.device_id = device_id self.model = model def forward(self, x): with torch.cuda.device(self.device_id), torch.no_grad(): seg_pred = self.model({'input': x})['output'] seg_pred = seg_pred.detach().cpu() return seg_pred class TensorRTRestorer(nn.Module): """A warper class for tensorrt restorer Args: base_model (Any): The base model build from config. trt_file (str): The path to the tensorrt file. device_id (int): Which device to place the model. """ def __init__(self, base_model: Any, trt_file: str, device_id: int): super().__init__() self.base_model = base_model restorer_generator = TensorRTRestorerGenerator( trt_file=trt_file, device_id=device_id) base_model.generator = restorer_generator def forward(self, lq, gt=None, test_mode=False, **kwargs): return self.base_model(lq, gt=gt, test_mode=test_mode, **kwargs) class TensorRTEditing(nn.Module): """A class for testing tensorrt deployment Args: trt_file (str): The path to the tensorrt file. cfg (Any): The configuration of the testing, \ decided by the config file. device_id (int): Which device to place the model. """ def __init__(self, trt_file: str, cfg: Any, device_id: int): super().__init__() base_model = build_model( cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) if isinstance(base_model, BasicRestorer): WrapperClass = TensorRTRestorer self.wrapper = WrapperClass(base_model, trt_file, device_id) def forward(self, **kwargs): return self.wrapper(**kwargs) def parse_args(): parser = argparse.ArgumentParser(description='mmediting tester') parser.add_argument('config', help='test config file path') parser.add_argument('model', help='input model file') parser.add_argument( 'backend', help='backend of the model.', choices=['onnxruntime', 'tensorrt']) parser.add_argument('--out', help='output result pickle file') parser.add_argument( '--save-path', default=None, type=str, help='path to store images and if not given, will not save image') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') args = parser.parse_args() return args def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # init distributed env first, since logger depends on the dist info. distributed = False # build the dataloader dataset = build_dataset(cfg.data.test) loader_cfg = { **dict((k, cfg.data[k]) for k in ['workers_per_gpu'] if k in cfg.data), **dict( samples_per_gpu=1, drop_last=False, shuffle=False, dist=distributed), **cfg.data.get('test_dataloader', {}) } data_loader = build_dataloader(dataset, **loader_cfg) # build the model if args.backend == 'onnxruntime': model = ONNXRuntimeEditing(args.model, cfg=cfg, device_id=0) elif args.backend == 'tensorrt': model = TensorRTEditing(args.model, cfg=cfg, device_id=0) args.save_image = args.save_path is not None model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test( model, data_loader, save_path=args.save_path, save_image=args.save_image) print() # print metrics stats = dataset.evaluate(outputs) for stat in stats: print('Eval-{}: {}'.format(stat, stats[stat])) # save result pickle if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out) if __name__ == '__main__': main()
nilq/baby-python
python
# -*- coding: utf-8 -*- """ @author: Daniel Jiménez-Caminero Costa """ import numpy as np import math def nonlinear_common(p_0, alpha, m_exponents, v_i_array, threshold_db_array): """ Array lists that are necessary for the calculation of the non-linearity and are general to each band. This has been implemented as described in formula F.18 (section F.3.6) of Annex F (ECMA-74). Parameters ---------- p_0: numpy.array '20 uPa'. alpha: float Constant for the exponent. m_exponents: int Max index of the multiplication sequence in Formula 18. v_i_array: numpy.array Exponents for the multiplication sequence in Formula 18. threshold_db_array: numpy.array Thresholds for their corresponding "vi" exponent. Returns ------- a_exponent_array: numpy.array pt_threshold_array: numpy.array 'dB' """ pt_threshold_array = np.zeros(m_exponents, dtype=float) # Numpy array for the exponent in the non-linearity function a_exponent_array = np.array(np.diff(v_i_array) / alpha) # COMMON CALCULATIONS for i_position in range(m_exponents): # "pt_threshold" changes to the value of each pt threshold (Table F.2) th_exponent = threshold_db_array[i_position] / 20 pt_threshold = p_0 * math.pow(10, th_exponent) # Numpy array for the threshold in the non-linearity function pt_threshold_array[i_position] = pt_threshold return a_exponent_array, pt_threshold_array
nilq/baby-python
python
import time import math from dronekit import connect from dronekit.mavlink import MAVConnection from dronekit.test import with_sitl from nose.tools import assert_not_equals, assert_equals @with_sitl def test_mavlink(connpath): vehicle = connect(connpath, wait_ready=True) out = MAVConnection('udpin:localhost:15668') vehicle._handler.pipe(out) out.start() vehicle2 = connect('udpout:localhost:15668', wait_ready=True) result = {'success': False} @vehicle2.on_attribute('location') def callback(*args): result['success'] = True i = 20 while not result['success'] and i > 0: time.sleep(1) i -= 1 assert result['success']
nilq/baby-python
python
# ----------------------------------------------------------------------------- # QP/Python Library # # Port of Miro Samek's Quantum Framework to Python. The implementation takes # the liberty to depart from Miro Samek's code where the specifics of desktop # systems (compared to embedded systems) seem to warrant a different approach. # # Reference: # Practical Statecharts in C/C++; Quantum Programming for Embedded Systems # Author: Miro Samek, Ph.D. # http://www.state-machine.com/ # # ----------------------------------------------------------------------------- # # Copyright (C) 2008-2014, Autolabel AB # All rights reserved # Author(s): Henrik Bohre (henrik.bohre@autolabel.se) # # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # - Neither the name of Autolabel AB, nor the names of its contributors # may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL # THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED # OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- """Python port of the Quantum Framework""" # Standard import sys assert (2, 4) <= sys.version_info[:2] < (3, 0), \ '%s.%s not supported. Python 2.4 <= required < 3.0' % sys.version_info[:2] # Local from qep import * from qf import * __version__ = '1.0.1'
nilq/baby-python
python
#!/usr/bin/python '''========================================================================= The Software is copyright (c) Commonwealth Scientific and Industrial Research Organisation (CSIRO) ABN 41 687 119 230. All rights reserved. Licensed under the CSIRO BSD 3-Clause License You may not use this file except in compliance with the License. You may obtain a copy of the License in the file LICENSE.md or at https://stash.csiro.au/projects/SMILI/repos/smili/browse/license.txt Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================================================================''' ''' This script generates Hausdorff distances for the hip results Here the app is a distance app with the syntax: Usage: milxHausdorffDistance [-c <Case>] [-s] --labelvalue <Label Value> -l <Label> [-p <Output Prefix>] [-o <Output>] [--] [--version] [-h] <Surfaces> ... Where: -c <Case>, --case <Case> Set the case ID being done. Used to name extra output. -s, --symmetric Compute forward and backward distances. This is required to get Hausdorff distance. --labelvalue <Label Value> (required) Set the label value for option --label. -l <Label>, --label <Label> (required) Compute the distances from the labelled image to the surface(s) provided. -p <Output Prefix>, --prefix <Output Prefix> Output prefix for multiple output -o <Output>, --output <Output> Output model name --, --ignore_rest Ignores the rest of the labeled arguments following this flag. --version Displays version information and exits. -h, --help Displays usage information and exits. <Surfaces> (accepted multiple times) (required) Surfaces to compute the distances with. A Hausdorff Distance tool for models Example: ''' import filenames import time import batch totalThreads = batch.cores/2 #Constants and paths #~ parent_dir = filenames.os.getcwd()+'/' parent_dir = '' manual_path = 'manuals_renamed/' output_path = 'Hausdorff/' home_dir = filenames.os.path.expanduser("~") smili = home_dir+'/Dev/smili/build/' app = smili+"bin/milxHausdorffDistance" options = " " #~ options = " --symmetric " #~ result_dirs = ['segmentations', 'segmentations_dyn', 'segmentations_robust', 'segmentations_weight', 'segmentations_weight_fast'] #~ result_dirs_names = ['std', 'dyn', 'robust', 'weight', 'weightfast'] result_dirs = ['results_clipped'] result_dirs_names = [''] objects = ['bladder', 'rectum', 'prostate', 'prostate_T2MR'] object_case_index = [0, 0, 0, 1] objects_values = ['1', '1', '1', '1'] for dir, dirName in zip(result_dirs, result_dirs_names): input_path = parent_dir + dir + '/' for object, value, case_index in zip(objects, objects_values, object_case_index): object_output_path = output_path+object+'/' output_prefix = object + "_" + dirName + "_" if not filenames.os.access(object_output_path, filenames.os.F_OK): #exists? No then create filenames.os.mkdir(object_output_path) #is manuals present, else dont bother manualList = filenames.getSortedFileList(manual_path+object, '*.nii.gz', True) if not manualList: print "Dataset doesn't have manuals. Skipping." continue #The ordering of the surfaces in the SSM are assumed to be alphabetical manualList, manCaseList = filenames.getSortedFileListAndCases(manual_path+object, case_index, '*.nii.gz') print manualList print manCaseList commonList = options.split() #create indexable list of above string print "Command Options: " print commonList #create job list to run jobsList = [] prefix_dirs = [] outNames = [] for file, case in zip(manualList, manCaseList): #output filenames prefix_dir = object full_prefix_dir = input_path + prefix_dir+'/' print "Case", case, ":", file #check if result present objList, cases = filenames.getSortedFileListAndCases(full_prefix_dir, case_index, 'asm_'+object+'_*' + '.vtk') if not case in cases: print "Not result for", case,"present at", full_prefix_dir continue manual = manual_path+object+'/'+file index = cases.index(case) result = full_prefix_dir+objList[index] print "Result found:", result #Hausdorff out name output_name = object_output_path + output_prefix + str(case).zfill(3) + ".vtk" outNames.append(output_name) #case command options image_option = "--label " + manual + " " value_option = "--labelvalue " + str(value) + " " out_option = "-o " + output_name + " " case_option = "-c " + str(case).zfill(3) + " " prefix_option = "-p " + object_output_path + output_prefix + " " command_options = prefix_option + out_option + image_option + value_option + case_option commandList = command_options.split() #create indexable list of above string command = app + " " + result + " " + command_options + options print command jobsList.append(command) #~ break #~ print jobsList #For each sorted filename, compute the segmentation of the image start = time.time() #run the jobs over multiple threads batch.processJobsFromList(jobsList, totalThreads, True) end = time.time() elapsed = end - start print "Hausdorffs took " + str(elapsed) + " secs or " + str(elapsed/60) + " mins in total"
nilq/baby-python
python
#!/usr/bin/python import sys import math, numpy as np import roslib; roslib.load_manifest('hrl_fabric_based_tactile_sensor') import rospy from hrl_msgs.msg import FloatArray import hrl_lib.util as ut import hrl_lib.transforms as tr import hrl_fabric_based_tactile_sensor.adc_publisher_node as apn from m3skin_ros.msg import RawTaxelArray from geometry_msgs.msg import Transform from m3skin_ros.srv import None_TransformArray, None_TransformArrayResponse from m3skin_ros.srv import None_String class Tactile_Sleeve(): def __init__(self): self.tar_forearm = None_TransformArrayResponse() self.setup_forearm_taxels_transforms() self.tar_wrist = None_TransformArrayResponse() self.setup_wrist_taxels_transforms() def setup_forearm_taxels_transforms(self): n_circum = 4 n_axis = 3 self.link_name_forearm = '/wrist_LEFT' rad = 0.04 dist_along_axis = 0.065 angle_along_circum = 2*math.pi / n_circum offset_along_axis = 0.02 offset_along_circum = math.radians(-45) n_taxels = n_circum * n_axis self.tar_forearm.data = [None for i in range(n_taxels)] # mapping the taxels to the raw ADC list. idx_list = [6,9,0,3,7,10,1,4,8,11,2,5] for i in range(n_axis): for j in range(n_circum): t = Transform() ang = j*angle_along_circum + offset_along_circum t.translation.x = rad * math.cos(ang) t.translation.y = rad * math.sin(ang) t.translation.z = offset_along_axis + i * dist_along_axis rot_mat = tr.Rz(-ang)*tr.Ry(math.radians(-90)) quat = tr.matrix_to_quaternion(rot_mat) t.rotation.x = quat[0] t.rotation.y = quat[1] t.rotation.z = quat[2] t.rotation.w = quat[3] self.tar_forearm.data[idx_list[i*n_circum+j]] = t def setup_wrist_taxels_transforms(self): self.link_name_wrist = '/handmount_LEFT' n_circum = 4 dist_along_axis = 0.065 angle_along_circum = 2*math.pi / n_circum offset_along_circum = math.radians(-45) self.tar_wrist.data = [None for i in range(13)] # mapping the taxels to the raw ADC list. idx_list = [6,9,2,5] n_axis = 1 rad = 0.03 offset_along_axis = -0.04 for i in range(n_axis): for j in range(n_circum): t = Transform() ang = j*angle_along_circum + offset_along_circum t.translation.x = rad * math.cos(ang) t.translation.y = rad * math.sin(ang) t.translation.z = offset_along_axis + i * dist_along_axis rot_mat = tr.Rz(-ang)*tr.Ry(math.radians(-90)) quat = tr.matrix_to_quaternion(rot_mat) t.rotation.x = quat[0] t.rotation.y = quat[1] t.rotation.z = quat[2] t.rotation.w = quat[3] self.tar_wrist.data[idx_list[i*n_circum+j]] = t # mapping the taxels to the raw ADC list. idx_list = [8,11,0,3,7,10,1,4] n_axis = 2 rad = 0.02 offset_along_axis = -0.17 for i in range(n_axis): for j in range(n_circum): t = Transform() ang = j*angle_along_circum + offset_along_circum t.translation.x = rad * math.cos(ang) t.translation.y = rad * math.sin(ang) t.translation.z = offset_along_axis + i * dist_along_axis rot_mat = tr.Rz(-ang)*tr.Ry(math.radians(-90)) quat = tr.matrix_to_quaternion(rot_mat) t.rotation.x = quat[0] t.rotation.y = quat[1] t.rotation.z = quat[2] t.rotation.w = quat[3] self.tar_wrist.data[idx_list[i*n_circum+j]] = t t = Transform() t.translation.x = 0. t.translation.y = 0 t.translation.z = -0.2 rot_mat = tr.Rx(math.radians(180)) quat = tr.matrix_to_quaternion(rot_mat) t.rotation.x = quat[0] t.rotation.y = quat[1] t.rotation.z = quat[2] t.rotation.w = quat[3] self.tar_wrist.data[12] = t def local_coord_frames_forearm_cb(self, req): return self.tar_forearm def local_coord_frames_wrist_cb(self, req): return self.tar_wrist def link_name_forearm_cb(self, req): return self.link_name_forearm def link_name_wrist_cb(self, req): return self.link_name_wrist if __name__ == '__main__': import optparse p = optparse.OptionParser() p.add_option('--wrist', action='store_true', dest='wrist', help='node for the wrist taxels of the sleeve') p.add_option('--forearm', action='store_true', dest='forearm', help='node for the forearm taxels of the sleeve') p.add_option('--serial_dev', action='store', dest='serial_dev_name', type='string', help='path to the arduino serial device') opt, args = p.parse_args() raw_data_forearm_pub = rospy.Publisher('taxels/raw_data', RawTaxelArray) ts = Tactile_Sleeve() if opt.forearm: rospy.Service('taxels/srv/local_coord_frames', None_TransformArray, ts.local_coord_frames_forearm_cb) rospy.Service('taxels/srv/link_name', None_String, ts.link_name_forearm_cb) n_taxels = 12 elif opt.wrist: rospy.Service('taxels/srv/local_coord_frames', None_TransformArray, ts.local_coord_frames_wrist_cb) rospy.Service('taxels/srv/link_name', None_String, ts.link_name_wrist_cb) n_taxels = 13 else: rospy.logerr('Specify either --forearm or --wrist') sys.exit() rospy.init_node('fabric_tactile_sleeve_driver_node') baudrate = 115200 dev = apn.setup_serial(opt.serial_dev_name, baudrate) for i in range(10): dev.readline() rospy.loginfo('Started publishing data') rta = RawTaxelArray() while not rospy.is_shutdown(): rta.val_z = apn.get_adc_data(dev, 16)[0:n_taxels] raw_data_forearm_pub.publish(rta) dev.close()
nilq/baby-python
python
# -*- coding: utf-8 -*- import os import h5py import pathlib as p import numpy as np from .trs import Trs __all__ = ['Ta'] class Ta(Trs): ''' TA experimental class Child class of TRS (time-resolve spectroscopy) Handels Uberfast ps/fs and Fastlab TA files. ''' def __init__(self, full_path=None, dir_save=None): super().__init__(dir_save) self.info = 'TA experimental data' self.probe = [] self.reference = [] # case of providing path to data if full_path is not None: self.path = p.PurePath(full_path) self.dir_path = self.path.parent self.save_path = self.create_save_path() self.load_data() else: # empty TA object self.path = None self.dir_path = None self.save_path = None print('correct version of analysis.') def reset_ta(self): """Reloading data after resetting the calculated attributes. Raises: RuntimeError: Cannot reset in the case of empty Ta instance """ if self.path is None: raise RuntimeError('empty TA object, cannot reset values') else: print('resetting all the values') self.kin = None self.kin_rng = None self.spe = None self.spe_rng = None self.tmax_id = None self.tmin_id = None self.wlmax_id = None self.wlmin_id = None self.t0 = 0 self.inc_sweeps = None self.figure = None self._fitParams = None self._fitData = None # store the fitted data self.chirp = None self._chirp = None self.load_data() def load_data(self): ''' Calls loading function based on file suffix. ''' if self.path.suffix == '.hdf5': self.fastlab_import() elif self.path.suffix == '.wtf': self.uberfast_import() else: print('Unknown suffix') def fastlab_import(self): ''' Importing .hdf5 files from Fastlab. ''' print('loading fastlab TA data') # os.chdir(p.PurePath(self.dir_path)) f = h5py.File(p.PurePath(self.path), 'r') avg = np.array(f['Average']) self.data, self.data_raw = avg[1:, 1:]*1000, avg[1:, 1:]*1000 self.wl = avg[0, 1:] # array loads transposed compared to Matlab self.wl_raw = self.wl self._t = avg[1:, 0] self.t_raw = self._t metaD = f['Average'].attrs['time zero'] if metaD: # check for empty list # Set wavelength units / not stored in HDF5 file self.wl_unit = 'nm' delay = f['/Average'].attrs['delay type'] self.delay_type = str(delay) if 'Long' in str(delay): self.t_unit = 'ns' self.t_conversion = 1e-9 elif 'UltraShort' in str(delay): self.t_unit = 'fs' self.t_conversion = 1e-15 elif 'Short' in str(delay): self.t_unit = 'ps' self.t_conversion = 1e-12 else: print('No delayType imported') print(str(delay)) self.n_sweeps = len(f['Sweeps'].keys()) self.inc_sweeps = [1]*self.n_sweeps self.n_shots = float(f['Average'].attrs['num shots']) self.px_low = float(f['Average'].attrs['calib pixel low']) self.wl_low = float(f['Average'].attrs['calib wave low']) self.px_high = float(f['Average'].attrs['calib pixel high']) self.wl_high = float(f['Average'].attrs['calib wave high']) # loading probe/reference spectra for i in list(f['Spectra']): if 'Error' in i: self.error.append(np.array(f['Spectra'][i])) elif 'Probe' in i: self.probe.append(np.array(f['Spectra'][i])) elif 'Reference' in i: self.reference.append(np.array(f['Spectra'][i])) else: print('Unknown specra to load..') self.ref_spe_init = self.reference[0] self.ref_spe_end = self.reference[-1] self.probe_spe_init = self.probe[0] self.probe_spe_end = self.probe[-1] self.sweeps = [] for i in list(f['Sweeps']): self.sweeps.append(np.array(f['Sweeps'][i][1:, 1:] * 1000)) pass def uberfast_import(self): """Importing .wtf files from Uberfast fs and ps setups. """ data = np.loadtxt(self.path) wl_last = -1 if max(data[:, 1]) > 0.1: print('ignoring first timeslice when importing ') ignore_first_spec = True data = np.delete(data, 1, axis=1) if not data[256:, 0].any(): # all zeros print('IR part empty, ps data') wl_last = 256 self.wl = data[1:wl_last, 0] self.data = data[1:wl_last, 1:].transpose()*1000 self._t = data[0, 1:]/1000 self.t_unit = 'ps' self.t_conversion = 1e-12 self.wl_unit = 'nm' # import sweeps try: sweep_files = [k for k in os.listdir(self.dir_path.joinpath('meas')) if 'meas' in k] except NameError: print('No sweeps to load') else: self.n_sweeps = len(sweep_files) self.inc_sweeps = [1]*self.n_sweeps self.sweeps = (np.loadtxt( self.dir_path.joinpath('meas', k) )[1:, 1:].transpose()[:, :wl_last]*1000 for k in sweep_files) if ignore_first_spec: self.sweeps = [np.delete(sweep, 0, axis=0) for sweep in self.sweeps]
nilq/baby-python
python
from . import card def get_image_slug(value, size: str="small"): if not isinstance(value, card.Card): return "ERROR[NOT_A_CARD({!r})]".format(value) try: s = card.size_from_str(size) except TypeError: return "ERROR[INVALID_SIZE({!r})]".format(size) try: return card.image_slug(value, s) except Exception as e: return "ERROR[{!s}]".format(e) def size_width(value): try: s = card.size_from_str(value) except TypeError: return "ERROR[INVALID_SIZE({!r})]".format(value) return str(s.w) def size_height(value): try: s = card.size_from_str(value) except TypeError: return "ERROR[INVALID_SIZE({!r})]".format(value) return str(s.h)
nilq/baby-python
python
# -*- coding: utf-8 -*- import os import mongomock import pymongo import pytest import requests from autoradarr.autoradarr import ( convert_imdb_in_radarr, filter_by_detail, filter_in_db, filter_in_radarr, filter_regular_result, get_db, get_imdb_data, get_radarr_data, get_tmdbid_by_imdbid, main, mark_filtred_in_db, necessary_fields_for_radarr, set_root_folders_by_genres, ) db_host = os.environ.get('AUTORADARR_DB_HOST') DB_NAME = 'autoradarr' db_user = os.environ.get('AUTORADARR_DB_USERNAME') db_password = os.environ.get('AUTORADARR_DB_PASSWORD') @pytest.fixture() def dbconnection(): pymongo_client = pymongo.MongoClient(db_host, username=db_user, password=db_password, authSource=DB_NAME) yield pymongo_client pymongo_client.close() def test_get_db_pass(dbconnection): ''' Testing returned db object, insert and delete object ''' assert get_db(db_host, DB_NAME, db_user, db_password) == dbconnection[DB_NAME] # create and remove object in db inserted_id = dbconnection[DB_NAME].test.insert_one({"test": "test"}).inserted_id assert dbconnection[DB_NAME].test.delete_one({'_id': inserted_id}) def test_get_db_fail(): assert get_db('incorrect dbname', DB_NAME, db_user, db_password) is None assert get_db(db_host, DB_NAME, 'bad_user', db_password) is None assert get_db(db_host, DB_NAME, db_user, 'bad_password') is None @pytest.mark.parametrize((('newfilms'), ('expected')), [ ( [ {'year': '2021', 'imDbRating': '5.9', 'imDbRatingCount': '952'}, {'year': '2020', 'imDbRating': '6.5', 'imDbRatingCount': '27165'}, {'year': '2021', 'imDbRating': '7.3', 'imDbRatingCount': '4999'}, {'year': '2021', 'imDbRating': '6.4', 'imDbRatingCount': '5000'} ], [ {'year': '2020', 'imDbRating': '6.5', 'imDbRatingCount': '27165'} ] ), ( [ {'year': '2019', 'imDbRating': '6.5', 'imDbRatingCount': '5000'}, {'year': '2021', 'imDbRating': '7.3', 'imDbRatingCount': '4999'}, {'imDbRating': '7.3', 'imDbRatingCount': '5000'}, {'year': '2021', 'imDbRatingCount': '5000'}, {'year': '2021', 'imDbRating': '7.3'}, {'year': '2021', 'imDbRating': '6.4', 'imDbRatingCount': '5000'} ], [] ) ]) def test_filter_regular_result(newfilms, expected): assert expected == filter_regular_result(newfilms, 'imDbRating', 'imDbRatingCount', 'year', 2021) @pytest.mark.parametrize((('film_in_db'), ('newfilms'), ('expected')), [ ( [{'imdbId': 'tt7979580'}], # film in db [ {'id': 'tt7979580'}, # newfilms {'id': 'tt7979581'}, {'id': 'tt79795801'} ], [ {'id': 'tt7979581'}, # expected {'id': 'tt79795801'} ] ), ( [ {'imdbId': 'tt180'}, # film in db {'imdbId': 'tt8080'}, {'imdbId': 'tt8'} ], [ {'id': 'tt180'}, # newfilms {'id': 'tt8080'}, {'id': 'tt8'} ], [] # expected ) ]) def test_filter_in_db(newfilms, film_in_db, expected): db_client = mongomock.MongoClient() db = db_client.db collection = db.films collection.insert_many(film_in_db) # If persist in db assert filter_in_db(db, newfilms, 'id') == expected def test_get_imdb_data_from_site(): ''' Test 'details' param from 'imdb-api.com' ''' r = get_imdb_data(requests.session(), 'details', 'tt7979580') assert r.json()['id'] == 'tt7979580' def test_get_imdb_data_mock(requests_mock): ''' Test 'popular' param from requests_mock ''' url = 'https://imdb-api.com/ru/API/MostPopularMovies/' + os.environ.get('IMDB_APIKEY') requests_mock.get(url, text='tt7979580', status_code=200) assert get_imdb_data(requests.session(), 'popular').text == 'tt7979580' def test_get_imdb_data_fail(requests_mock): url = 'https://imdb-api.com/ru/API/MostPopularMovies/' + os.environ.get('IMDB_APIKEY') requests_mock.get(url, text='tt7979580', status_code=300) assert get_imdb_data(requests.session(), 'popular') is None def test_get_radarr_data_get_movie(requests_mock): url = os.environ.get('RADARR_URL') + '/api/v3/movie?apiKey=' + \ os.environ.get('RADARR_APIKEY') requests_mock.get(url, text='tt7979580', status_code=200) assert get_radarr_data(requests.session(), 'get_movie').text == 'tt7979580' def test_get_radarr_data_add_movie(requests_mock): url = os.environ.get('RADARR_URL') + '/api/v3/movie?apiKey=' + \ os.environ.get('RADARR_APIKEY') requests_mock.post(url, text='tt7979580', status_code=201) assert get_radarr_data(requests.session(), 'add_movie', api_json={'a': 'b'}).text == 'tt7979580' def test_get_radarr_data_fail(requests_mock): url = os.environ.get('RADARR_URL') + '/api/v3/movie?apiKey=' + \ os.environ.get('RADARR_APIKEY') requests_mock.get(url, text='tt7979580', status_code=300) assert get_radarr_data(requests.session(), 'get_movie') is None requests_mock.post(url, text='tt7979580', status_code=301) assert get_radarr_data(requests.session(), 'add_movie', api_json={'a': 'b'}) is None @pytest.mark.parametrize((('film_in_db'), ('imdbid'), ('title'), ('persist_in_radarr'), ('expected')), [ ( [ {'imdbId': 'tt180'}, # film in db {'imdbId': 'tt8080'}, {'imdbId': 'tt8'} ], 'tt180', 'tt180 title', 0, False ), ( [ {'imdbId': 'tt180'}, # film in db {'imdbId': 'tt8080'}, {'imdbId': 'tt8'} ], 'tt170', 'tt170 title', 1, True ), ( [ {'imdbId': 'tt180'} # film in db ], 'tt170', 'tt170 title', 0, True ) ]) def test_mark_filtred_in_db(film_in_db, imdbid, title, persist_in_radarr, expected): db_client = mongomock.MongoClient() db = db_client.db collection = db.films collection.insert_many(film_in_db) # If persist in db assert mark_filtred_in_db(db, imdbid, title, persist_in_radarr) == expected if expected: film = collection.find_one({'imdbId': imdbid}) assert film['originalTitle'] == title assert film['added'] if persist_in_radarr == 1: assert film['persistInRadarr'] == 1 else: assert film['filtred'] == 1 def test_filter_in_radarr(mocker): mocker.patch('autoradarr.autoradarr.get_radarr_data', return_value=True) # imdbid_list in filter_in_radarr: mocker.patch('autoradarr.autoradarr.get_radarr_imdbid_list', return_value=['tt180', 'tt190']) db_client = mongomock.MongoClient() db = db_client.db newfilms = [{'id': 'tt180', 'title': 'Title'}, {'id': 'tt170', 'title': 'Title2'}] expected = [{'id': 'tt170', 'title': 'Title2'}] result = filter_in_radarr(requests.session(), db, newfilms, 'id', 'title') assert result == expected film_in_db = db.films.find_one({'imdbId': 'tt180'}) assert film_in_db['imdbId'] == 'tt180' assert film_in_db['originalTitle'] == 'Title' # Test empty return mocker.patch('autoradarr.autoradarr.get_radarr_imdbid_list', return_value=['tt180', 'tt190', 'tt170']) assert filter_in_radarr(requests.session(), db, newfilms, 'id', 'title') == [] def test_filter_in_radarr_fail(mocker): mocker.patch('autoradarr.autoradarr.get_radarr_data', return_value=None) # imdbid_list in filter_in_radarr: newfilms = [{'id': 'tt180', 'title': 'Title'}, {'id': 'tt170', 'title': 'Title2'}] db_client = mongomock.MongoClient() db = db_client.db assert filter_in_radarr(requests.session(), db, newfilms, 'id', 'title') == newfilms def test_set_root_folders_by_genres(): radarr_root_animations = os.environ.get('RADARR_ROOT_ANIMATIONS') film = {'fullTitle': 'Normal Full Title (2021)'} genres = ['Action', 'Animation'] expected = {'fullTitle': 'Normal Full Title (2021)', 'rootFolderPath': radarr_root_animations, 'folderName': radarr_root_animations + '/Normal Full Title (2021)'} assert set_root_folders_by_genres(film, genres) == expected film = {'fullTitle': '%Normal-Full\t\n\r\f\vTitle_ (2021)'} expected = {'fullTitle': '%Normal-Full\t\n\r\f\vTitle_ (2021)', 'rootFolderPath': radarr_root_animations, 'folderName': radarr_root_animations + '/Normal-Full-Title_ (2021)'} assert set_root_folders_by_genres(film, genres) == expected radarr_root_other = os.environ.get('RADARR_ROOT_OTHER') genres = ['Action', 'Crime'] film = {'fullTitle': ' %/Normal-Full\t/Title_ (2021)_ '} expected = {'fullTitle': ' %/Normal-Full\t/Title_ (2021)_ ', 'rootFolderPath': radarr_root_other, 'folderName': radarr_root_other + '/Normal-Full-Title_ (2021)_'} assert set_root_folders_by_genres(film, genres) == expected def test_set_root_folders_by_genres_fail(): with pytest.raises(Exception, match='Directory name can\'t be empty'): set_root_folders_by_genres({'fullTitle': ' %^$&% Ё '}, ['Action']) def test_filter_by_detail(requests_mock): url1 = 'https://imdb-api.com/ru/API/Title/' + os.environ.get('IMDB_APIKEY') + '/tt7979580' requests_mock.get(url1, json={'genres': 'Action, Adventure'}) url2 = 'https://imdb-api.com/ru/API/Title/' + os.environ.get('IMDB_APIKEY') + '/tt170' requests_mock.get(url2, json={'genres': 'Action, Drama'}) url3 = 'https://imdb-api.com/ru/API/Title/' + os.environ.get('IMDB_APIKEY') + '/tt190' requests_mock.get(url3, json={'genres': 'Drama'}) newfilms = [{'id': 'tt7979580', 'imDbRating': '6.9', 'title': 'Title1', 'fullTitle': '1'}, {'id': 'tt170', 'imDbRating': '7', 'title': 'Title2', 'fullTitle': '2'}, {'id': 'tt190', 'imDbRating': '6.9', 'title': 'Title3', 'fullTitle': '3'}] db_client = mongomock.MongoClient() db = db_client.db result = filter_by_detail(requests.session(), db, newfilms) assert len(result) == 2 assert result[0]['id'] == 'tt7979580' assert result[1]['id'] == 'tt170' # mark_filtred_in_db assert db.films.find_one({'imdbId': 'tt190'})['imdbId'] == 'tt190' def test_filter_by_detail_fail(mocker): mocker.patch('autoradarr.autoradarr.get_imdb_data', return_value=None) # imdbid_list in filter_in_radarr: newfilms = [{'id': 'tt180', 'title': 'Title'}, {'id': 'tt170', 'title': 'Title2'}] db_client = mongomock.MongoClient() db = db_client.db assert filter_by_detail(requests.session(), db, newfilms) == [] @pytest.mark.parametrize((('newfilms'), ('expected')), [ ( [ {'title': 'Title1', 'id': 'tt180', 'year': '2019', 'folderName': '/root/folder', 'rootFolderPath': '/root'}, {'title': 'Title2', 'id': 'tt8080', 'year': '2021', 'folderName': '/root/folder2', 'rootFolderPath': '/root'} ], [ {'originalTitle': 'Title1', 'imdbId': 'tt180', 'year': 2019, 'folderName': '/root/folder', 'rootFolderPath': '/root'}, {'originalTitle': 'Title2', 'imdbId': 'tt8080', 'year': 2021, 'folderName': '/root/folder2', 'rootFolderPath': '/root'} ] ), ( [ {'title': 'Title 1', 'id': 'tt180', 'year': '2033', 'folderName': '/root/folder-(3)', 'rootFolderPath': '/root'} ], [ {'originalTitle': 'Title 1', 'imdbId': 'tt180', 'year': 2033, 'folderName': '/root/folder-(3)', 'rootFolderPath': '/root'} ] ) ]) def test_convert_imdb_in_radarr(newfilms, expected): assert convert_imdb_in_radarr(newfilms) == expected def test_get_tmdbid_by_imdbid(): assert get_tmdbid_by_imdbid(requests.session(), 'tt7979580') == 501929 def test_get_tmdbid_by_imdbid_fail(): assert get_tmdbid_by_imdbid(requests.session(), 'tt70') == 0 def test_necessary_fields_for_radarr(): film = {} film['folderName'] = '/folder' film['originalTitle'] = 'Title 1' film['imdbId'] = 'tt7979580' excepted = film excepted['path'] = film['folderName'] excepted['title'] = film['originalTitle'] excepted['qualityProfileId'] = int(os.environ.get('RADARR_DEFAULT_QUALITY')) excepted['tmdbId'] = 501929 assert necessary_fields_for_radarr(requests.session(), film) == excepted def test_main_pass(mocker): newfilms = [ {'fullTitle': 'Mortal Kombat (2021)'}, {'fullTitle': 'I Care a Lot (2020)'} ] mocker.patch('autoradarr.autoradarr.get_new_films', return_value=newfilms) mocker.patch('autoradarr.autoradarr.add_to_radarr', return_value=len(newfilms)) assert main() == len(newfilms) def test_main_db_fail(mocker): mocker.patch('autoradarr.autoradarr.get_db', return_value=None) assert main() is None
nilq/baby-python
python
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from bigdl.nn.layer import Container, Layer from bigdl.util.common import * if sys.version >= '3': long = int unicode = str class ZooModelCreator(JavaValue): def jvm_class_constructor(self): name = "createZoo" + self.__class__.__name__ print("creating: " + name) return name class ZooModel(ZooModelCreator, Container): """ The base class for models in Analytics Zoo. """ def predict_classes(self, x, batch_size=32, zero_based_label=True): """ Predict for classes. By default, label predictions start from 0. # Arguments x: Prediction data. A Numpy array or RDD of Sample. batch_size: Number of samples per batch. Default is 32. zero_based_label: Boolean. Whether result labels start from 0. Default is True. If False, result labels start from 1. """ if isinstance(x, np.ndarray): data_rdd = to_sample_rdd(x, np.zeros([x.shape[0]])) elif isinstance(x, RDD): data_rdd = x else: raise TypeError("Unsupported prediction data type: %s" % type(x)) return callBigDlFunc(self.bigdl_type, "zooModelPredictClasses", self.value, data_rdd, batch_size, zero_based_label) def save_model(self, path, weight_path=None, over_write=False): """ Save the model to the specified path. # Arguments path: The path to save the model. Local file system, HDFS and Amazon S3 are supported. HDFS path should be like 'hdfs://[host]:[port]/xxx'. Amazon S3 path should be like 's3a://bucket/xxx'. weight_path: The path to save weights. Default is None. over_write: Whether to overwrite the file if it already exists. Default is False. """ callBigDlFunc(self.bigdl_type, "saveZooModel", self.value, path, weight_path, over_write) def summary(self): """ Print out the summary of the model. """ callBigDlFunc(self.bigdl_type, "zooModelSummary", self.value) @staticmethod def _do_load(jmodel, bigdl_type="float"): model = Layer(jvalue=jmodel, bigdl_type=bigdl_type) model.value = jmodel return model
nilq/baby-python
python
import base64 import difflib import threading from pathlib import Path from typing import Tuple from urllib.parse import quote from nornir.core.task import Optional, Result, Task import requests LOCK = threading.Lock() def _generate_diff(original: str, fromfile: str, tofile: str, content: str) -> str: diff = difflib.unified_diff( original.splitlines(), content.splitlines(), fromfile=fromfile, tofile=tofile ) return "\n".join(diff) def _remote_exists( task: Task, session: requests.Session, url: str, repository: str, filename: str, ref: str, ) -> Tuple[bool, str]: quoted_repository = quote(repository, safe="") quoted_filename = quote(filename, safe="") resp = session.get( f"{url}/api/v4/projects/{quoted_repository}/repository/files/{quoted_filename}?ref={ref}" ) if resp.status_code == 200: return ( True, base64.decodebytes(resp.json()["content"].encode("ascii")).decode(), ) return (False, "") def _local_exists(task: Task, filename: str) -> Tuple[bool, str]: try: with open(Path(filename)) as f: content = f.read() return (True, content) except FileNotFoundError: return (False, "") def _create( task: Task, session: requests.Session, url: str, repository: str, filename: str, content: str, branch: str, commit_message: str, dry_run: bool, ) -> str: quoted_repository = quote(repository, safe="") quoted_filename = quote(filename, safe="") if dry_run: return _generate_diff("", "", filename, content) with LOCK: url = f"{url}/api/v4/projects/{quoted_repository}/repository/files/{quoted_filename}" data = {"branch": branch, "content": content, "commit_message": commit_message} resp = session.post(url, data=data) if resp.status_code != 201: raise RuntimeError(f"Unable to create file: {filename}!") return _generate_diff("", "", filename, content) def _update( task: Task, session: requests.Session, url: str, repository: str, filename: str, content: str, branch: str, commit_message: str, dry_run: bool, ) -> str: quoted_repository = quote(repository, safe="") quoted_filename = quote(filename, safe="") exists, original = _remote_exists(task, session, url, repository, filename, branch) if not exists: raise RuntimeError(f"File '{filename}' does not exist!") if dry_run: return _generate_diff(original, filename, filename, content) if original != content: with LOCK: url = f"{url}/api/v4/projects/{quoted_repository}/repository/files/{quoted_filename}" data = { "branch": branch, "content": content, "commit_message": commit_message, } resp = session.put(url=url, data=data) if resp.status_code != 200: raise RuntimeError(f"Unable to update file: {filename}") return _generate_diff(original, filename, filename, content) def _get( task: Task, session: requests.Session, url: str, repository: str, filename: str, destination: str, ref: str, dry_run: bool, ) -> str: # if destination is not provided, use the filename as destination in current # directory if destination == "": destination = filename (_, local) = _local_exists(task, destination) (status, content) = _remote_exists(task, session, url, repository, filename, ref) if not status: raise RuntimeError(f"Unable to get file: {filename}") if not dry_run: if local != content: with open(destination, "w") as f: f.write(content) return _generate_diff(local, destination, destination, content) def gitlab( task: Task, url: str, token: str, repository: str, filename: str, content: str = "", action: str = "create", dry_run: Optional[bool] = None, branch: str = "master", destination: str = "", ref: str = "master", commit_message: str = "", ) -> Result: """ Exposes some of the Gitlab API functionality for operations on files in a Gitlab repository. Example: nornir.run(files.gitlab, action="create", url="https://gitlab.localhost.com", token="ABCD1234", repository="test", filename="config", ref="master") Arguments: dry_run: Whether to apply changes or not url: Gitlab instance URL token: Personal access token repository: source/destination repository filename: source/destination file name content: content to write action: ``create``, ``update``, ``get`` branch: destination branch destination: local destination filename (only used in get action) ref: branch, commit hash or tag (only used in get action) commit_message: commit message Returns: Result object with the following attributes set: * changed (``bool``): * diff (``str``): unified diff """ dry_run = dry_run if dry_run is not None else task.is_dry_run() session = requests.session() session.headers.update({"PRIVATE-TOKEN": token}) if commit_message == "": commit_message = "File created with nornir" if action == "create": diff = _create( task, session, url, repository, filename, content, branch, commit_message, dry_run, ) elif action == "update": diff = _update( task, session, url, repository, filename, content, branch, commit_message, dry_run, ) elif action == "get": diff = _get(task, session, url, repository, filename, destination, ref, dry_run) return Result(host=task.host, diff=diff, changed=bool(diff))
nilq/baby-python
python
#Imports from PIL import Image class RotateImage(object): ''' Rotates the image about the centre of the image. ''' def __init__(self, degrees): ''' Arguments: degrees: rotation degree. ''' # Write your code here self.degrees = degrees def __call__(self, sample): ''' Arguments: image (numpy array or PIL image) Returns: image (numpy array or PIL image) ''' # Write your code here return sample.rotate(self.degrees)
nilq/baby-python
python
""" @author:ACool(www.github.com/starFalll) 根据微博用户动态进行词云,词频分析,和时间分析 """ import jieba from wordcloud import WordCloud from sqlalchemy import create_engine, MetaData,Table, Column, Integer, String, ForeignKey,update,select import re from collections import Counter from pyecharts import Bar, Pie from weibo.Connect_mysql import Connect #去掉表情和一些不必要的符号 def format_content(content): content = content.replace(u'\xa0', u' ') content = re.sub(r'\[.*?\]','',content) content = content.replace('\n', ' ') return content #画出词云 def create_wordcloud(content,image='weibo.jpg',max_words=5000,max_font_size=50): cut_text = " ".join(content) cloud = WordCloud( # 设置字体,不指定就会出现乱码 font_path="HYQiHei-25J.ttf", # 允许最大词汇 max_words=max_words, # 设置背景色 # background_color='white', # 最大号字体 max_font_size=max_font_size ) word_cloud = cloud.generate(cut_text) word_cloud.to_file(image) # 分词并去除停用词 def word_segmentation(content, stop_words): # 使用 jieba 分词对文本进行分词处理 jieba.enable_parallel() seg_list = jieba.cut(content) seg_list = list(seg_list) # 去除停用词 user_dict = [' ', '哒'] filter_space = lambda w: w not in stop_words and w not in user_dict word_list = list(filter(filter_space, seg_list)) return word_list #将数据库中的微博动态转化为字符串 def get_time_str(uid): _,engine = Connect('../conf.yaml') # 连接数据库 conn = engine.connect() metadata = MetaData(engine) wb_data = Table('wb_data', metadata, autoload=True) s = select([wb_data]).where(wb_data.c.uid==uid) res = conn.execute(s) conn.close() str = '' time_lists = [] for row in res: str += row[2] + '\n' time_lists.append(row[3]) return time_lists,str # 词频统计 # 返回前 top_N 个值,如果不指定则返回所有值 def word_frequency(word_list, *top_N): if top_N: counter = Counter(word_list).most_common(top_N[0]) else: counter = Counter(word_list).most_common() return counter #画出词频图 def plot_chart(counter, chart_type='Bar'): items = [item[0] for item in counter] values = [item[1] for item in counter] if chart_type == 'Bar': chart = Bar('微博动态词频统计') chart.add('词频', items, values, is_more_utils=True) else: chart = Pie('微博动态词频统计') chart.add('词频', items, values, is_label_show=True, is_more_utils=True) chart.render('weibo_wordfrq.html') #画出微博发布时间的统计图 def plot_create_time(time_lists): recent_time = re.compile(r'\d{2}月\d{2}日',re.S) long_time = re.compile(r'(\d{4}-\d{2}-\d{2})',re.S) tmp_lists = []#保存**月**日格式的数据 tmp_nums = []#统计**月**日发帖数量 long_lists = []#保存20**-**-**格式的数据 long_nums = []#统计20**-**-**发帖数量 for t in time_lists: res = re.findall(recent_time, t) if(res):#res[0]为**月**日格式的数据 if(not tmp_lists or res[0]!= tmp_lists[-1]):#列表为空或者不与前一个日期重复 tmp_lists.append(res[0]) tmp_nums.append(1) else:#与前一个日期重复,计数加一 tmp_nums[-1]+=1 else:#res[0]20**-**-**格式的数据 res = re.findall(long_time,t) if(not long_lists or res[0]!=long_lists[-1]): long_lists.append(res[0]) long_nums.append(1) else: long_nums[-1]+=1 #将时间按照从远到进的顺序排列 tmp_lists.reverse() tmp_nums.reverse() long_lists.reverse() long_nums.reverse() time_list = long_lists + tmp_lists time_nums = long_nums + tmp_nums chart = Bar('用户微博动态发布时间') chart.add('动态数', time_list, time_nums, is_more_utils=True,datazoom_range=[10,40],is_datazoom_show=True) chart.render("weibo_dynamic.html") #可以指定需要分析的用户的uid(必须先存在conf.yaml里面,并且运行了一次sina_spider程序) def main(uid): time_lists,str=get_time_str(uid)#将数据库中的微博动态转化为字符串 plot_create_time(time_lists) with open('data/stop_words.txt') as f: stop_words = f.read().split('\n') str=format_content(str) word_list=word_segmentation(str,stop_words)#分词并去除停用词 create_wordcloud(word_list) #画出词云 counter = word_frequency(word_list, 10)# 返回前 top_N 个值,如果不指定则返回所有值 print(counter) plot_chart(counter)#会生成词频图保存在weibo_wordfrq.html中 if __name__=='__main__': conf, _ = Connect('../conf.yaml') uid = conf.get('uids') uid = list(uid.values())[0] main(uid)#指定需要分析的用户的uid(必须先存在conf.yaml里面,并且运行了一次sina_spider程序),默认为conf.yaml中的第一条uid
nilq/baby-python
python
import os DESCRIPTION = "sets a variable for the current module" def autocomplete(shell, line, text, state): env = shell.plugins[shell.state] # todo, here we can provide some defaults for bools/enums? i.e. True/False if len(line.split()) > 1: optionname = line.split()[1] if optionname in [x.name for x in env.options.options if not x.hidden]: option = [x for x in env.options.options if x.name == optionname][0] options = [] if option.boolean: options = [x for x in ["true", "false"] if x.upper().startswith(text.upper())] if option.file: options = filepaths(text) if option.implant: pass if option.enum: options = [x for x in option.enum if x.upper().startswith(text.upper())] if options: return options[state] options = [x.name + " " for x in env.options.options if x.name.upper().startswith(text.upper()) and not x.hidden] options += [x.alias + " " for x in env.options.options if x.alias.upper().startswith(text.upper()) and not x.hidden and x.alias] try: return options[state] except: return None def filepaths(text): import readline everything = readline.get_line_buffer() cursor_idx = readline.get_begidx() idx = 0 for chunk in everything.split(" "): fullpath = chunk idx += len(chunk) + 1 if idx > cursor_idx: break if os.path.isfile(fullpath): return None if "/" in fullpath: d = os.path.dirname(fullpath) else: d = "." res = [] for candidate in os.listdir(d): if not candidate.startswith(text): continue if os.path.isdir(d+os.path.sep+candidate): res.append(candidate + os.path.sep) else: res.append(candidate + " ") return res def help(shell): pass def execute(shell, cmd): env = shell.plugins[shell.state] splitted = cmd.split() if len(splitted) > 1: key = splitted[1].upper() value = env.options.get(key) if value != None: # if it's >2, we set the third argument if len(splitted) > 2: value = cmd.split(None, 2)[2] if not env.options.set(key, value): shell.print_error("That value is invalid") return shell.print_good("%s => %s" % (key, value)) else: shell.print_error("Option '%s' not found." % (key))
nilq/baby-python
python
import cv2 import pytesseract pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' img_path = "Resources/text.png" img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert to RGB hImg , wImg , = img.shape [0] , img.shape [1] print("Enter 1 to read image as characters and 2 to read image as words:") ch = int(input("Enter your choice: ")) if ch ==1: ## Image to boxes. config = r'--oem 3 --psm 6 outputphase digits' boxes = pytesseract. image_to_boxes(img , config = config) for b in boxes.splitlines(): b = b.split(' ') x,y,w,h = int(b[1]), int(b[2]), int(b[3]), int(b[4]) cv2.rectangle(img, (x,hImg -y), (w,hImg -h), (0,255,0), 1) cv2.putText(img , b[0] , (x,hImg -h ), cv2.FONT_HERSHEY_SIMPLEX , 1 , (255,0,255) , 2) cv2.imshow("Text", img) cv2.waitKey(0) # hImg , wImg = int(img.shape[0]) , int(img.shape[1]) elif ch ==2: ##Image to words. # Detect only digits. config = r'--oem 3 --psm 6 outputphase digits' boxes = pytesseract. image_to_data(img, config=config) # print(boxes) for x,b in enumerate(boxes.splitlines()): if x !=0: b = b.split() if len(b) == 12: x,y,w,h = int(b[6]), int(b[7]), int(b[8]), int(b[9]) cv2.rectangle(img, (x,y), (w+x, h+y), (0,255,0), 1) cv2.putText(img , b[-1] , (x,y ), cv2.FONT_HERSHEY_SIMPLEX , 1 , (0,0,255) , 1) cv2.imshow("Text", img) cv2.waitKey(0) else: print("Invalid choice")
nilq/baby-python
python
# -*- coding: utf-8 -*- """ hdu_api._internal_utils ----------------------- """ import sys from hdu_api import _pyDes _ver = sys.version_info #: Python 2.x? is_py2 = (_ver[0] == 2) #: Python 3.x? is_py3 = (_ver[0] == 3) def encrypt(data, first_key, second_key, third_key): bts_data = extend_to_16bits(data) bts_first_key = extend_to_16bits(first_key) bts_second_key = extend_to_16bits(second_key) bts_third_key = extend_to_16bits(third_key) i = 0 bts_result = [] while i < len(bts_data): # 将data分成每64位一段,分段加密 bts_temp = bts_data[i:i + 8] j, k, z = 0, 0, 0 while j < len(bts_first_key): # 分别取出 first_key 的64位作为密钥 des_k = _pyDes.des(bts_first_key[j: j + 8], _pyDes.ECB) bts_temp = list(des_k.encrypt(bts_temp)) j += 8 while k < len(bts_second_key): des_k = _pyDes.des(bts_second_key[k:k + 8], _pyDes.ECB) bts_temp = list(des_k.encrypt(bts_temp)) k += 8 while z < len(bts_third_key): des_k = _pyDes.des(bts_third_key[z:z + 8], _pyDes.ECB) bts_temp = list(des_k.encrypt(bts_temp)) z += 8 bts_result.extend(bts_temp) i += 8 str_result = '' for each in bts_result: if is_py2: each = ord(each) # 分别加密data的各段,串联成字符串 str_result += '%02X' % each return str_result def extend_to_16bits(data): """ 将字符串的每个字符前插入 0,变成16位,并在后面补0,使其长度是64位整数倍 :param data: :return: """ bts = data.encode() c = 0 if is_py2: c = chr(c) filled_bts = [] for each in bts: # 每个字符前插入 0 filled_bts.extend([c, each]) # 长度扩展到8的倍数,若不是8的倍数,后面添加0,便于DES加密时分组 while len(filled_bts) % 8 != 0: filled_bts.append(c) return filled_bts
nilq/baby-python
python
import os import numpy as np class Reversi: def __init__(self): # parameters self.name = os.path.splitext(os.path.basename(__file__))[0] self.Blank = 0 self.Black = 1 self.White = 2 self.screen_n_rows = 8 self.screen_n_cols = 8 self.enable_actions = np.arange(self.screen_n_rows*self.screen_n_cols) # variables self.reset() def reset(self): """ 盤面の初期化 """ # reset ball position self.screen = np.zeros((self.screen_n_rows, self.screen_n_cols)) self.set_cells(27, self.White) self.set_cells(28, self.Black) self.set_cells(35, self.Black) self.set_cells(36, self.White) def get_cells(self, i): r = int(i / self.screen_n_cols) c = int(i - ( r * self.screen_n_cols)) return self.screen[r][c] def set_cells(self, i, value): r = int(i / self.screen_n_cols) c = int(i - ( r * self.screen_n_cols)) self.screen[r][c] = value def print_screen(self): """ 盤面の出力 """ i = 0 for r in range(self.screen_n_rows): s1 = '' for c in range(self.screen_n_cols): s2 = '' if self.screen[r][c] == self.Blank: s2 = '{0:2d}'.format(self.enable_actions[i]) elif self.screen[r][c] == self.Black: s2 = '● ' elif self.screen[r][c] == self.White: s2 = '○ ' s1 = s1 + ' ' + s2 i += 1 print(s1) def put_piece(self, action, color, puton=True): """自駒color(1 or 2)を位置action(0~63)に置く関数 """ if self.get_cells(action) != self.Blank: return -1 """ --------------------------------------------------------- 縦横斜めの8通りは、1次元データなので、 現在位置から[-9, -8, -7, -1, 1, 7, 8, 9] ずれた方向を見ます。 これは、[-1, 0, 1]と[-8, 0, 8]の組合せで調べます (0と0のペアは除く)。 """ t, x, y, l = 0, action%8, action//8, [] for di, fi in zip([-1, 0, 1], [x, 7, 7-x]): for dj, fj in zip([-8, 0, 8], [y, 7, 7-y]): if not di == dj == 0: b, j, k, m, n =[], 0, 0, [], 0 """a:対象位置のid リスト""" a = self.enable_actions[action+di+dj::di+dj][:min(fi, fj)] """b:対象位置の駒id リスト""" for i in a: b.append(self.get_cells(i)) #print("a={:}".format(a)) #print("b={:}".format(b)) for i in b: if i == 0: #空白 break elif i == color: #自駒があればその間の相手の駒を取れる """ 取れる数を確定する """ n = k """ ひっくり返す駒を確定する """ l += m """ その方向の探査終了 """ break else: #相手の駒 k += 1 """ ひっくり返す位置をストックする """ m.insert(0, a[j]) j += 1 #print("n={:}".format(n)) t += n #print("t={:}".format(t)) #print("l={:}".format(l)) if t == 0: return 0 if puton: """ ひっくり返す石を登録する """ for i in l: self.set_cells(i, color) """ 今置いた石を追加する """ self.set_cells(action, color) return t def winner(self): """ 勝ったほうを返す """ Black_score = self.get_score(self.Black) White_score = self.get_score(self.White) if Black_score == White_score: return 0 # 引き分け elif Black_score > White_score: return self.Black # Blackの勝ち elif Black_score < White_score: return self.White # Whiteの勝ち def get_score(self, color): """ 指定した色の現在のスコアを返す """ score = 0 for i in self.enable_actions: if self.get_cells(i) == color: score += 1 return score def get_enables(self, color): result = [] """ 置ける位置のリストを返す """ for action in self.enable_actions: if self.get_cells(action) == self.Blank: """ 空白の位置 """ if self.put_piece(action, color, False) > 0: """ ここ置ける!! """ result.insert(0, action) return result def update(self, action, color): """ action:石を置く位置 0〜63 """ # そのマスにおいた場合の取れる数 n = self.put_piece(action, color, False) if n > 0: # そのマスは有効です self.put_piece(action, color) return n def isEnd(self): e1 = self.get_enables(self.Black) e2 = self.get_enables(self.White) if len(e1) == 0 and len(e2) == 0: #双方置けなくなったらゲーム終了 return True for action in self.enable_actions: if self.get_cells(action) == self.Blank: return False return True if __name__ == "__main__": # game env = Reversi() print("------------- GAME START ---------------") while not env.isEnd(): for i in range(1,3): if i == env.Black: print("*** 先手ターン● ***") else: print("*** 後手ターン○ ***") env.print_screen() enables = env.get_enables(i) if len(enables) > 0: flg = False while not flg: print("番号を入力してください") print(enables) inp = input('>>> ') action_t = int(inp) for j in enables: if action_t == j: flg = True break n = env.execute_action(action_t, i) else: print("パス") print("*** ゲーム終了 ***") env.print_screen() if env.winner() == env.Black: print("先手●の勝ち! スコアは、{:}/{:}です。".format(env.get_score(env.Black),len(env.enable_actions))) else: print("後手○の勝ち! スコアは、{:}/{:}です。".format(env.get_score(env.White),len(env.enable_actions)))
nilq/baby-python
python
__version__ = "v0.0.dev"
nilq/baby-python
python
# -*- coding: utf-8 -*- """Test different properties in FlowProposal""" from nessai.proposal import FlowProposal def test_poolsize(proposal): """Test poolsize property""" proposal._poolsize = 10 proposal._poolsize_scale = 2 assert FlowProposal.poolsize.__get__(proposal) == 20 def test_dims(proposal): """Test dims property""" proposal.names = ['x', 'y'] assert FlowProposal.dims.__get__(proposal) == 2 def test_rescaled_dims(proposal): """Test rescaled_dims property""" proposal.rescaled_names = ['x', 'y'] assert FlowProposal.rescaled_dims.__get__(proposal) == 2 def test_dtype(proposal): """Test dims property""" proposal.names = ['x', 'y'] proposal._x_dtype = None assert FlowProposal.x_dtype.__get__(proposal) == \ [('x', 'f8'), ('y', 'f8'), ('logP', 'f8'), ('logL', 'f8')] def test_prime_dtype(proposal): """Test dims property""" proposal.rescaled_names = ['x', 'y'] proposal._x_prime_dtype = None assert FlowProposal.x_prime_dtype.__get__(proposal) == \ [('x', 'f8'), ('y', 'f8'), ('logP', 'f8'), ('logL', 'f8')] def test_population_dtype(proposal): """Test dims property""" proposal.x_dtype = \ [('x', 'f8'), ('y', 'f8'), ('logP', 'f8'), ('logL', 'f8')] proposal.use_x_prime_prior = False assert FlowProposal.population_dtype.__get__(proposal) == \ [('x', 'f8'), ('y', 'f8'), ('logP', 'f8'), ('logL', 'f8')] def test_population_dtype_prime_prior(proposal): """Test dims property""" proposal.x_prime_dtype = \ [('x_p', 'f8'), ('y_p', 'f8'), ('logP', 'f8'), ('logL', 'f8')] proposal.use_x_prime_prior = True assert FlowProposal.population_dtype.__get__(proposal) == \ [('x_p', 'f8'), ('y_p', 'f8'), ('logP', 'f8'), ('logL', 'f8')]
nilq/baby-python
python
from corehq.apps.groups.models import Group from corehq.apps.users.models import CommCareUser, CouchUser from corehq.apps.users.util import WEIRD_USER_IDS from corehq.elastic import es_query, ES_URLS, stream_es_query, get_es from corehq.pillows.mappings.user_mapping import USER_MAPPING, USER_INDEX from couchforms.models import XFormInstance from dimagi.utils.decorators.memoized import memoized from pillowtop.listener import AliasedElasticPillow, BulkPillow from django.conf import settings class UserPillow(AliasedElasticPillow): """ Simple/Common Case properties Indexer """ document_class = CommCareUser # while this index includes all users, # I assume we don't care about querying on properties specific to WebUsers couch_filter = "users/all_users" es_host = settings.ELASTICSEARCH_HOST es_port = settings.ELASTICSEARCH_PORT es_timeout = 60 es_index_prefix = "hqusers" es_alias = "hqusers" es_type = "user" es_meta = { "settings": { "analysis": { "analyzer": { "default": { "type": "custom", "tokenizer": "whitespace", "filter": ["lowercase"] }, } } } } es_index = USER_INDEX default_mapping = USER_MAPPING @memoized def calc_meta(self): #todo: actually do this correctly """ override of the meta calculator since we're separating out all the types, so we just do a hash of the "prototype" instead to determined md5 """ return self.calc_mapping_hash({"es_meta": self.es_meta, "mapping": self.default_mapping}) def get_mapping_from_type(self, doc_dict): """ Define mapping uniquely to the user_type document. See below on why date_detection is False NOTE: DO NOT MODIFY THIS UNLESS ABSOLUTELY NECESSARY. A CHANGE BELOW WILL GENERATE A NEW HASH FOR THE INDEX NAME REQUIRING A REINDEX+RE-ALIAS. THIS IS A SERIOUSLY RESOURCE INTENSIVE OPERATION THAT REQUIRES SOME CAREFUL LOGISTICS TO MIGRATE """ #the meta here is defined for when the case index + type is created for the FIRST time #subsequent data added to it will be added automatically, but date_detection is necessary # to be false to prevent indexes from not being created due to the way we store dates #all are strings EXCEPT the core case properties which we need to explicitly define below. #that way date sort and ranges will work with canonical date formats for queries. return { self.get_type_string(doc_dict): self.default_mapping } def get_type_string(self, doc_dict): return self.es_type class GroupToUserPillow(BulkPillow): couch_filter = "groups/all_groups" document_class = CommCareUser def __init__(self, **kwargs): super(GroupToUserPillow, self).__init__(**kwargs) self.couch_db = Group.get_db() def change_trigger(self, changes_dict): es = get_es() user_ids = changes_dict["doc"].get("users", []) q = {"filter": {"and": [{"terms": {"_id": user_ids}}]}} for user_source in stream_es_query(es_url=ES_URLS["users"], q=q, fields=["__group_ids", "__group_names"]): group_ids = set(user_source.get('fields', {}).get("__group_ids", [])) group_names = set(user_source.get('fields', {}).get("__group_names", [])) if changes_dict["doc"]["name"] not in group_names or changes_dict["doc"]["_id"] not in group_ids: group_ids.add(changes_dict["doc"]["_id"]) group_names.add(changes_dict["doc"]["name"]) doc = {"__group_ids": list(group_ids), "__group_names": list(group_names)} es.post("%s/user/%s/_update" % (USER_INDEX, user_source["_id"]), data={"doc": doc}) def change_transport(self, doc_dict): pass def send_bulk(self, payload): pass class UnknownUsersPillow(BulkPillow): """ This pillow adds users from xform submissions that come in to the User Index if they don't exist in HQ """ document_class = XFormInstance couch_filter = "couchforms/xforms" include_docs_when_preindexing = False def __init__(self, **kwargs): super(UnknownUsersPillow, self).__init__(**kwargs) self.couch_db = XFormInstance.get_db() self.user_db = CouchUser.get_db() self.es = get_es() def get_fields_from_emitted_dict(self, emitted_dict): domain = emitted_dict['key'][1] user_id = emitted_dict['value'].get('user_id') username = emitted_dict['value'].get('username') xform_id = emitted_dict['id'] return user_id, username, domain, xform_id def get_fields_from_doc(self, doc): form_meta = doc.get('form', {}).get('meta', {}) domain = doc.get('domain') user_id = form_meta.get('userID') username = form_meta.get('username') xform_id = doc.get('_id') return user_id, username, domain, xform_id @memoized def _user_exists(self, user_id): return self.user_db.doc_exist(user_id) def change_trigger(self, changes_dict): if 'key' in changes_dict: user_id, username, domain, xform_id = self.get_fields_from_emitted_dict(changes_dict) else: doc = changes_dict['doc'] if 'doc' in changes_dict else self.couch_db.open_doc(changes_dict['id']) user_id, username, domain, xform_id = self.get_fields_from_doc(doc) if user_id in WEIRD_USER_IDS: user_id = None es_path = USER_INDEX + "/user/" if (user_id and not self._user_exists(user_id) and not self.es.head(es_path + user_id)): doc_type = "AdminUser" if username == "admin" else "UnknownUser" doc = { "_id": user_id, "domain": domain, "username": username, "first_form_found_in": xform_id, "doc_type": doc_type, } if domain: doc["domain_membership"] = {"domain": domain} self.es.put(es_path + user_id, data=doc) def change_transport(self, doc_dict): pass def send_bulk(self, payload): pass def add_demo_user_to_user_index(): es = get_es() es_path = USER_INDEX + "/user/demo_user" es.put(es_path, data={"_id": "demo_user", "username": "demo_user", "doc_type": "DemoUser"})
nilq/baby-python
python
"""Contains classes to store the result of a genetic algorithm run. Additionally, the classes in this module allow for figure generation. """ from abc import ABC import copy import enum import math import random from typing import Dict, List, Union from os import listdir, mkdir from matplotlib import pyplot as plt from matplotlib.colors import LogNorm import matplotlib.lines as mlines import numpy as np import pandas as pd import ga_configs import mod_protocols as protocols import mod_trace as trace import mod_kernik as kernik ############################################# from scipy.integrate import ode, solve_ivp import os, sys, time import numpy as np import matplotlib.pyplot as plt import pickle import bisect sys.path.append('../') sys.path.append('../Protocols') import protocol_lib import simulator_scipy import simulator_myokit import myokit from Models.br1977 import BR1977 from Models.ord2011 import ORD2011 import mod_trace def get_model_response_JK( model, protocol, prestep=None): model.cell.mode = 1 simulator = simulator_scipy.Simulator(model) if prestep == None: print("There is no pre-step simulation.") elif prestep == 5000: y0 = [-8.00000003e+01, 6.94549002e+00, 6.94553614e+00, 1.44766826e+02, 1.44766919e+02, 5.46283800e-05, 5.38550879e-05, 1.25377970e+00, 1.25388392e+00, 1.63694063e-02, 3.83078124e-01, 3.83078124e-01, 3.83078124e-01, 1.83137288e-01, 3.83078124e-01, 8.60298196e-04, 2.65750243e-01, 1.36775744e-01, 1.71654793e-03, 9.98192733e-01, 9.98192733e-01, 8.74934836e-04, 9.98192733e-01, 9.98192733e-01, 1.55207580e-08, 9.99999920e-01, 9.99999921e-01, 9.99999920e-01, 9.99999920e-01, 9.99999920e-01, 4.72523502e-04, 9.99999920e-01, 9.99999920e-01, 2.60425715e-05, 2.54957029e-05, 4.27866636e-04, 4.72094402e-04, 9.98307893e-01, 6.06464770e-07, 7.58083578e-07, 2.45432407e-04] simulator.model.y0 = y0 else: simulator.pre_simulate( pre_step=prestep, protocol='constant') solution = simulator.simulate( [0, protocol.get_voltage_change_endpoints()[-1]], method='BDF', max_step=1, atol=1e-06, rtol=1e-6) command_voltages = [protocol.get_voltage_at_time(t) for t in solution.t] tr = trace.Trace(protocol, cell_params=None, t=solution.t, y=command_voltages, # simulator.model.V, command_voltages=command_voltages, current_response_info=simulator.model.current_response_info, default_unit=None) # print(solution) return tr def get_model_response_with_myokit( simulator, protocol, prestep=None): model, p, s = myokit.load( "../mmt-model-files/ohara-cipa-v1-2017_VC.mmt" ) simulator = simulator_myokit.Simulator(model, protocol, max_step=1.0, abs_tol=1e-8, rel_tol=1e-8, vhold=-80) # 1e-12, 1e-14 # 1e-08, 1e-10 # max_step=1, atol=1E-2, rtol=1E-4 # defalt: abs_tol=1e-06, rel_tol=0.0001 # simulator.reset_simulation_with_new_protocol( protocol ) simulator.simulation.set_constant('cell.mode', 1) if prestep == None: print("There is no pre-step simulation.") elif prestep == 15000: y0 = [-8.69999996e+01, 6.94732336e+00, 6.94736848e+00, 1.44992431e+02, 1.44992434e+02, 5.48328391e-05, 5.40431668e-05, 1.25617506e+00, 1.25618638e+00, 8.12231733e-03, 6.62326077e-01, 6.62326077e-01, 6.62326077e-01, 4.14582271e-01, 6.62326077e-01, 2.27721811e-04, 4.79645030e-01, 2.87189165e-01, 1.07103663e-03, 9.99468797e-01, 9.99468797e-01, 5.45740810e-04, 9.99468797e-01, 9.99468797e-01, 2.96634937e-09, 9.99999988e-01, 9.99999988e-01, 9.99999988e-01, 9.99999988e-01, 9.99999988e-01, 4.78979614e-04, 9.99999988e-01, 9.99999988e-01, 9.28750206e-06, 9.23466020e-06, 1.96054631e-04, 2.15667189e-04, 9.97012407e-01, 1.27419629e-07, 1.59274616e-07, 2.47073549e-04] simulator.set_initial_values(y0) else: simulator.pre_simulate(pre_step=prestep, sim_type=1) d = simulator.simulate(protocol.get_voltage_change_endpoints()[-1], log_times=None, extra_log=['ina.INa', 'inal.INaL', 'ito.Ito', 'ical.ICaL', 'ikr.IKr', 'iks.IKs', 'ik1.IK1']) times = d['engine.time'] command_voltages = [protocol.get_voltage_at_time(t) for t in times] tr = trace.Trace(protocol, cell_params=None, t=times, y=command_voltages, # simulator.model.V, command_voltages=command_voltages, current_response_info=simulator.current_response_info, default_unit=None) return tr
nilq/baby-python
python
from flask import Flask from flask import request from flask import jsonify from flask import send_from_directory from flask import Response from flask import abort from werkzeug import secure_filename from setup import * app = Flask(__name__) #app = Flask(__name__, static_url_path='') app.config['MAX_CONTENT_LENGTH'] = MAX_CONTENT_LENGTH app.config['UPLOAD_FOLDER' ] = UPLOAD_FOLDER app.config['RNG_ID' ] = RNG_ID app.config['DB_NAME' ] = DB_NAME
nilq/baby-python
python
import gizeh from .base_form import BaseForm from .base_picture import BasePicture @BasePicture.register_subclass('circle') class Circle(BaseForm): def draw(self, ind): circle = gizeh.circle( r=self.radius[ind], xy=self.center, fill=self.color[ind], **self.kwargs ) circle.draw(self.surface)
nilq/baby-python
python
from datetime import datetime from gtts import gTTS def speech_1(text, sender): msg = 'Da: {}. Oggetto: {}'.format(sender, text) tts = gTTS(text=msg, lang='it') now = datetime.now() title = sender.replace(' ', '_') + now.strftime('_%d-%m-%y_%H-%M-%S') + '.mp3' print(title) tts.save(title) print('_________________________________________________________________') if __name__ == '__main__': msg = ''' So dove abiti. Ti ho visto l'altra sera con il cane. Se provi a spaventare Pietro ancora una volta, ti metto una puntina su per il culo. ''' speech_1(msg, 'Pit')
nilq/baby-python
python
import random import numpy as np import torch class min_max_node_tracker: def __init__(self): self.max = float('-inf') self.min = float('inf') def normalized(self, node_Q): """ Normalize the value to [0, 1] Parameters ---------- node_Q : float the node score form any node Returns ------- float normalized score to [0, 1] """ if self.min != self.max: # TODO : this shouldn't have to be called again. Find out why it is not called the first time. self.update(node_Q) return (node_Q - self.min) / (self.max - self.min) return node_Q def update(self, node_q): """ Update the min-max tracker Parameters ---------- node_q : float the node value """ self.max = max(self.max, node_q) self.min = min(self.min, node_q) def __str__(self): return "min : {}, max : {}".format(self.min, self.max) def __repr__(self): return self.__str__() class node: def __init__(self, parrent, node_id=None, hidden_state=None, prior=0): assert type(parrent) in [node, type(None)], type(parrent) assert hidden_state is None or torch.is_tensor( hidden_state), "{} {}".format(type(hidden_state), hidden_state) assert node_id is None or type(node_id) == int self.children = {} self.node_id = node_id self.parrent = parrent if self.parrent is not None and self.node_id not in self.parrent.children: self.parrent.children[self.node_id] = self self.min_max_node_tracker = min_max_node_tracker( ) if parrent is None else parrent.min_max_node_tracker self._value = 0 self.value_sum = 0 self.explored_count = 0 self.wins_count = 0 self.outcome = 0 self.reward = 0 self.policy = None self.prior = prior self.value_of_model = 0 self.cumulative_discounted_reward = 0 self.has_init = False self.hidden_state = hidden_state self.environment_state = None self.depth = 0 if parrent is None else (parrent.depth + 1) self.max_depth = self.depth self.available_children_paths = None self.score_metric = self.upper_confidence_boundary self.ucb_score_parts = [ ] self.random_id = str(np.random.rand()) def add_exploration_noise(self): """ Add exploration noise as described in the paper in Appendix C """ dirichlet_alpha = 0.03 root_exploration_fraction = 0.25 child_actions = list(self.children.values()) noise = np.random.dirichlet([dirichlet_alpha] * len(child_actions)) for child_action, noise in zip(child_actions, noise): child_action.prior = child_action.prior * \ (1 - root_exploration_fraction) + \ noise * root_exploration_fraction def disable_illegal_actions(self, legal_actions): """ Removes illegal actions Parameters ---------- legal_actions : list list of legal actions """ if not legal_actions is None: # we just delete the illegal actions from the node for action in list(self.children.keys()): if action not in legal_actions: del self.children[action] def search_value_exploration_exploration(self): """ Nodes seelection algorithm As described in section "Exploration and exploitation" from https://en.wikipedia.org/wiki/Monte_Carlo_tree_search Returns ------- float the node score """ parrent_explored = np.log2(self.parrent.explored_count) / \ self.explored_count if self.parrent.explored_count != 1 and self.explored_count != 0 else 0 child_explored = self.wins_count / \ self.explored_count if self.explored_count > 0 else 0 c = np.sqrt(2) return child_explored + c * np.sqrt(parrent_explored) def upper_confidence_boundary(self): """ The upper confidene boundary as described in the appendix B of the paper. Returns ------- float the upper confidence boundary """ if self.parrent is None: return 0 self.c1 = 1.25 self.c2 = 19652 self.q_s_a = self.q self.p_s_a = self.prior all_actions_sum = np.sum([ i.explored_count for i in self.parrent.children.values() ]) second_part_numerator_1 = np.sqrt(all_actions_sum) second_part_denominator_1 = (1 + self.explored_count) second_part_numerator_2 = (all_actions_sum + self.c2 + 1) second_part_denominator_2 = self.c2 second_part = second_part_numerator_1 / second_part_denominator_1 * \ (self.c1 + np.log(second_part_numerator_2 / second_part_denominator_2)) value = self.q_s_a + self.p_s_a * second_part assert type(value) in [float, int, np.float64], "bad type {}, {}".format( type(value), value) self.ucb_score_parts = [ self.q_s_a, self.p_s_a, all_actions_sum, second_part_numerator_1, second_part_denominator_1, second_part_numerator_2, second_part_denominator_2, second_part ] assert not np.isnan(value), "ucb score is nan {}".format( self.ucb_score_parts) return value @property def q(self): """ Calculated the node value As described in appendix B Returns ------- float node value score """ reward = self.reward.item() if torch.is_tensor(self.reward) else self.reward node_value = self.node_value() value = self.min_max_node_tracker.normalized( node_value ) assert type(reward) in [int, float] assert type(value) in [int, float] assert type(node_value) in [int, float] assert not np.isnan(reward), "reward is nan" assert not np.isnan(node_value), "node_value is nan" assert not np.isnan(value), "value is nan {}, {}".format( value, self.min_max_node_tracker) return reward + value @property def N(self): """ Calculate the node visit count Returns ------- int node visit count """ return self.parrent.explored_count + 1 if self.parrent else 0 @property def value(self): """ Return the value of the node Returns ------- float value of node (predicted by model) """ return self.value_sum @value.setter def value(self, value): """ Set the value Parameters ---------- value : float the value of the node """ self.value_sum = value.item() if torch.is_tensor(value) else value self.min_max_node_tracker.update(self.node_value()) def node_value(self): """ The value of the node based on exploration Returns ------- float value divided by exploration count """ if self.explored_count == 0: return 0 return self.value_sum / self.explored_count def on_node_creation(self, hidden_state, reward, policy, value): """ When a node is created this callback will be used Parameters ---------- hidden_state : torch.tensor the hidden state from the model reward : float the reward from the environment """ self.reward = reward self.hidden_state = hidden_state self.policy = policy self.value_of_model = value self.value = value self.has_init = True policy = policy[0] if len(policy.shape) > 1 else policy policy_sum = torch.sum(policy) self.prior = (torch.exp(policy[self.node_id]) / policy_sum).item() def get_a_children_node(self, children_count): """ Returns a unexplored child node Parameters ---------- children_count : int the count of available children Returns ------- node the new child node """ if self.available_children_paths is None: self.available_children_paths = list( filter(lambda x: x not in self.children, list(range(children_count)))) if len(self.available_children_paths) == 0: return None picked_node = self.available_children_paths[random.randint( 0, len(self.available_children_paths) - 1)] self.available_children_paths.remove(picked_node) return self.create_node(picked_node) def create_node(self, node_id): """ Create a specific child node Parameters ---------- node_id : int the action / node-id Returns ------- node the new node """ self.children[node_id] = node(self, node_id=node_id) return self.children[node_id] def get_children_with_id(self, node_id): """ Get node if it is a existing child node else none Parameters ---------- node_id : int the node id Returns ------- node the newly created node """ return self.children.get(node_id, None) def create_children_if_not_exist(self, node_id): """ Create node if it does not exist as child Parameters ---------- node_id : int the node id Returns ------- node the newly created node """ node = self.get_children_with_id(node_id) if node is None: return self.create_node(node_id) return node def get_best_action(self): """ Get the best available action based on children node score Returns ------- int action """ return max(self.children.items(), key=lambda x: x[1].search_value_exploration_exploration())[1].node_id def __str__(self): return "id : {}, value: {}, depth: {}".format(self.node_id, self.value_sum, self.depth) def __repr__(self): return self.__str__()
nilq/baby-python
python
""" We are given head, the head node of a linked list containing unique integer values. We are also given the list G, a subset of the values in the linked list. Return the number of connected components in G, where two values are connected if they appear consecutively in the linked list. Example 1: Input: head: 0->1->2->3 G = [0, 1, 3] Output: 2 Explanation: 0 and 1 are connected, so [0, 1] and [3] are the two connected components. Example 2: Input: head: 0->1->2->3->4 G = [0, 3, 1, 4] Output: 2 Explanation: 0 and 1 are connected, 3 and 4 are connected, so [0, 1] and [3, 4] are the two connected components. Note: If N is the length of the linked list given by head, 1 <= N <= 10000. The value of each node in the linked list will be in the range [0, N - 1]. 1 <= G.length <= 10000. G is a subset of all values in the linked list. """ # Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: def numComponents(self, head, G): """ :type head: ListNode :type G: List[int] :rtype: int """ G = set(G) cc = 0 last = False node = head while node: if node.val in G: if not last: cc += 1 last = True else: last = False node = node.next return cc sol = Solution().numComponents nodes = [ListNode(i) for i in range(5)] for i in range(4): nodes[i].next = nodes[i + 1] head = nodes[0] # 0->1->2->3->4 print(sol(head, [0, 1, 3])) print(sol(head, [0, 3, 1, 4]))
nilq/baby-python
python
# -*- coding: utf-8 -*- import unittest from clu.phontools.struct import * from .utils import phrase1 """ Test `clu.phontools.struct.Phrase` behaviors """ class PhraseTests(unittest.TestCase): phrase1: Phrase = phrase1 def test_equality(self): """Comparisions of pairs of `clu.phontools.struct.Phrase` should be sensitive to the order of `clu.phontools.struct.Phrase.words`.""" phrase: Phrase = PhraseTests.phrase1 # the order of words matters in a phrase phrase2 = Phrase(words=phrase1.words[-1::]) self.assertNotEqual(phrase1, phrase2) def test_coarse_stress(self): """A `clu.phontools.struct.Phrase` should have a coarse_stress property and mask_stress method.""" phrase: Phrase = PhraseTests.phrase1 # syllable structure in terms of stress (weak or strong) # should return ['WS', 'S', 'S', 'S'] self.assertEqual(phrase.coarse_stress, ["WS", "S", "S", "S"]) def test_mask_syllables(self): """A `clu.phontools.struct.Phrase.mask_syllables` should mask strong (S) and weak (W) stress.""" phrase: Phrase = PhraseTests.phrase1 # num. syllables for each word represented using a mask. # should return ['XX', 'X', 'X', 'X'] self.assertEqual(phrase.mask_syllables(mask="X"), ["XX", "X", "X", "X"])
nilq/baby-python
python
from django.db import models from django.utils.translation import gettext as _ from django.conf import settings from django.utils import timezone from dateutil.relativedelta import relativedelta from datetime import date from django.urls import reverse_lazy from app.models import TimeStampMixin class TypeOfService(TimeStampMixin): """ Model to set the type of service.""" name = models.CharField(verbose_name=_("Name"), max_length=128, blank=False, null=False) active = models.BooleanField(verbose_name=_("Active"), default=True) color = models.CharField(_("Color"), default="#ffffff", max_length=7) def __str__(self) -> str: return "{}".format(self.name) class WebService(TimeStampMixin): """ Model to represent a web service, like build a One Page, a web system, an application for mobile with backend. """ client = models.ForeignKey("client.Client", on_delete=models.CASCADE, verbose_name=_("Client")) type_of_service = models.ForeignKey("service.TypeOfService", on_delete=models.SET_NULL, null=True, verbose_name=_("Type of Service")) domain = models.ForeignKey("service.Domain", on_delete=models.SET_NULL, null=True, verbose_name=_("Domain")) # The contract is ManyToManyField because the same service can have # multiple contracts over time, this occurs for example when a contract has # expired and then a new one is contracted. contract = models.ManyToManyField("service.Contract", blank=True, verbose_name=_("Contract")) date = models.DateField( verbose_name=_("Date"), default=timezone.now, help_text=_("This date is used for statistics, build charts. "), ) def __str__(self) -> str: return "{} - {}".format(self.client, self.type_of_service) def get_absolute_url(self): return reverse_lazy(f'{self._meta.app_label}:{self._meta.model_name}:details') @property def contracts(self): return ', '.join(c.__str__() for c in self.contract.all()) @ staticmethod def get_exclude_fields(): """ Fields of the current model that is marked to get excluded from visualization. """ return [] def get_add_fields(self): """ Custom fields to be added for visualization. Need to be a dict with {'name': content} """ return {} def get_dict_data(self): """ This method automatically gathers all the fields in the current model and returns them as a dictionary, used mainly to build a layout. """ exclude = self.get_exclude_fields() data = dict([(field.verbose_name, getattr(self, field.name)) for field in self._meta.fields if field.name not in exclude]) data.update(self.get_add_fields()) return data class Domain(TimeStampMixin): """Model to identify a domain (network domain).""" name = models.CharField(verbose_name=_("Name"), max_length=128) link = models.TextField(verbose_name=_("Link")) acquisition_date = models.DateField(verbose_name=_("Acquisition Date"), help_text=_("Date that the domain was buyed.")) active = models.BooleanField(_("Active"), default=True) # The contract is ManyToManyField because the same domain can have multiple # contracts over time, this occurs for example when a contract has expired # and then a new one is contracted. contract = models.ManyToManyField("service.Contract", blank=True, verbose_name=_("Contract")) def __str__(self) -> str: return "{}".format(self.name) def get_absolute_url(self): return reverse_lazy(f'{self._meta.app_label}:{self._meta.model_name}:details') @ staticmethod def get_exclude_fields(): """ Fields of the current model that is marked to get excluded from visualization. """ return [] def get_add_fields(self): """ Custom fields to be added for visualization. Need to be a dict with {'name': content} """ return {} def get_dict_data(self): """ This method automatically gathers all the fields in the current model and returns them as a dictionary, used mainly to build a layout. """ exclude = self.get_exclude_fields() data = dict([(field.verbose_name, getattr(self, field.name)) for field in self._meta.fields if field.name not in exclude]) data.update(self.get_add_fields()) return data class Contract(TimeStampMixin): """ Model to identify a contract, this will be used in most of services. This model will never exist alone, it is the complement of some service. A contract has a value, the start date that was signed/agreed upon, an expiration in months (because most contracts have a deadline to expire and it is usually in months). The end date can be generated using start date and expiration (months). """ name = models.CharField(verbose_name=_("Name"), max_length=32, help_text=_("A friendly name to easy remember.")) value = models.DecimalField(verbose_name=_("Value"), max_digits=settings.DEFAULT_MAX_DIGITS, decimal_places=settings.DEFAULT_DECIMAL_PLACES) start_date = models.DateField(verbose_name=_("Start Date"), help_text=_("Date that the contract was signed/agreed.")) end_date = models.DateField(verbose_name=_("End Date"), null=True, blank=True) expiration = models.IntegerField(verbose_name=_("Expiration"), default=12, help_text=_("Expiration of the contract in months.")) description = models.TextField(verbose_name=_("Description"), blank=True) attachment = models.FileField(verbose_name=_("Attachment"), upload_to="contracts", blank=True, null=True) def save(self, *args, **kwargs): self.end_date = self.expiration_date() super(Contract, self).save(*args, **kwargs) def __str__(self) -> str: return self.name def expiration_date(self): """Return the expiration date using a relativedelta.""" return self.start_date + relativedelta(months=self.expiration) def months_passed(self): """Return the months that has passed since start_date until today.""" return relativedelta(self.start_date, date.today()).months def is_expired(self): """ Check if the contract is expired. (start_date + relativedelta(months=expiration)) < today """ return self.expiration_date() < date.today() def get_reference(self): """ Return Domain if this contract is set to domain or Service ir set to a service. """ domain = self.domain_set.all().values('id').count() if domain > 0: return _("Domain") webservice = self.webservice_set.all().values('id').count() if webservice > 0: return _("Web Service") return _("None") def get_absolute_url(self): return reverse_lazy(f'{self._meta.app_label}:{self._meta.model_name}:details') @ staticmethod def get_exclude_fields(): """ Fields of the current model that is marked to get excluded from visualization. """ return [] def get_add_fields(self): """ Custom fields to be added for visualization. Need to be a dict with {'name': content} """ return {} def get_dict_data(self): """ This method automatically gathers all the fields in the current model and returns them as a dictionary, used mainly to build a layout. """ exclude = self.get_exclude_fields() data = dict([(field.verbose_name, getattr(self, field.name)) for field in self._meta.fields if field.name not in exclude]) data.update(self.get_add_fields()) return data
nilq/baby-python
python
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-13 10:48 from __future__ import unicode_literals import annoying.fields import django.core.validators from django.db import migrations, models import django.db.models.deletion import django_fsm import silver.models.documents.base def move_documents_to_billing_document(apps, schema_editor): OldInvoiceModel = apps.get_model("silver", "Invoice") OldProformaModel = apps.get_model("silver", "Proforma") BillingDocumentBase = apps.get_model("silver", "BillingDocumentBase") db_alias = schema_editor.connection.alias fields_to_move = [ "series", "number", "archived_customer", "archived_provider", "due_date", "issue_date", "paid_date", "cancel_date", "sales_tax_percent", "sales_tax_name", "currency", "transaction_currency", "transaction_xe_rate", "transaction_xe_date", "state", "_total", "_total_in_transaction_currency", "customer", "pdf", "provider", ] for old_proforma in OldProformaModel.objects.using(db_alias).filter(invoice=None): new_proforma = BillingDocumentBase(kind="proforma") for field in fields_to_move: setattr(new_proforma, field, getattr(old_proforma, field)) new_proforma.save(using=db_alias) for transaction in old_proforma.old_proforma_transactions.all(): transaction.proforma = new_proforma transaction.save() for entry in old_proforma.old_proforma_entries.all(): entry.proforma = new_proforma entry.save() for log in old_proforma.old_proforma_logs.all(): log.proforma = new_proforma log.save() for old_invoice in OldInvoiceModel.objects.using(db_alias).all(): new_invoice = BillingDocumentBase(kind="invoice") for field in fields_to_move: setattr(new_invoice, field, getattr(old_invoice, field)) new_invoice.save(using=db_alias) if old_invoice.proforma: new_proforma = BillingDocumentBase( kind="proforma", related_document=new_invoice ) for field in fields_to_move: setattr(new_proforma, field, getattr(old_invoice.proforma, field)) new_proforma.save(using=db_alias) new_invoice.related_document = new_proforma new_invoice.save(using=db_alias) else: new_proforma = None for transaction in old_invoice.old_invoice_transactions.all(): transaction.invoice = new_invoice transaction.proforma = new_proforma transaction.save() for entry in old_invoice.old_invoice_entries.all(): entry.invoice = new_invoice entry.proforma = new_proforma entry.save() for log in old_invoice.old_invoice_logs.all(): log.invoice = new_invoice log.proforma = new_proforma log.save() class Migration(migrations.Migration): dependencies = [ ("silver", "0042_compute_totals_in_document_view"), ] operations = [ migrations.CreateModel( name="BillingDocumentBase", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "kind", models.CharField( db_index=True, max_length=8, verbose_name=silver.models.documents.base.get_billing_documents_kinds, ), ), ( "series", models.CharField( blank=True, db_index=True, max_length=20, null=True ), ), ("number", models.IntegerField(blank=True, db_index=True, null=True)), ( "archived_customer", models.JSONField(blank=True, default=dict, null=True), ), ( "archived_provider", models.JSONField(blank=True, default=dict, null=True), ), ("due_date", models.DateField(blank=True, null=True)), ("issue_date", models.DateField(blank=True, db_index=True, null=True)), ("paid_date", models.DateField(blank=True, null=True)), ("cancel_date", models.DateField(blank=True, null=True)), ( "sales_tax_percent", models.DecimalField( blank=True, decimal_places=2, max_digits=4, null=True, validators=[django.core.validators.MinValueValidator(0.0)], ), ), ( "sales_tax_name", models.CharField(blank=True, max_length=64, null=True), ), ( "currency", models.CharField( choices=[ ("AED", "AED (UAE Dirham)"), ("AFN", "AFN (Afghani)"), ("ALL", "ALL (Lek)"), ("AMD", "AMD (Armenian Dram)"), ("ANG", "ANG (Netherlands Antillean Guilder)"), ("AOA", "AOA (Kwanza)"), ("ARS", "ARS (Argentine Peso)"), ("AUD", "AUD (Australian Dollar)"), ("AWG", "AWG (Aruban Florin)"), ("AZN", "AZN (Azerbaijanian Manat)"), ("BAM", "BAM (Convertible Mark)"), ("BBD", "BBD (Barbados Dollar)"), ("BDT", "BDT (Taka)"), ("BGN", "BGN (Bulgarian Lev)"), ("BHD", "BHD (Bahraini Dinar)"), ("BIF", "BIF (Burundi Franc)"), ("BMD", "BMD (Bermudian Dollar)"), ("BND", "BND (Brunei Dollar)"), ("BOB", "BOB (Boliviano)"), ("BRL", "BRL (Brazilian Real)"), ("BSD", "BSD (Bahamian Dollar)"), ("BTN", "BTN (Ngultrum)"), ("BWP", "BWP (Pula)"), ("BYN", "BYN (Belarusian Ruble)"), ("BZD", "BZD (Belize Dollar)"), ("CAD", "CAD (Canadian Dollar)"), ("CDF", "CDF (Congolese Franc)"), ("CHF", "CHF (Swiss Franc)"), ("CLP", "CLP (Chilean Peso)"), ("CNY", "CNY (Yuan Renminbi)"), ("COP", "COP (Colombian Peso)"), ("CRC", "CRC (Costa Rican Colon)"), ("CUC", "CUC (Peso Convertible)"), ("CUP", "CUP (Cuban Peso)"), ("CVE", "CVE (Cabo Verde Escudo)"), ("CZK", "CZK (Czech Koruna)"), ("DJF", "DJF (Djibouti Franc)"), ("DKK", "DKK (Danish Krone)"), ("DOP", "DOP (Dominican Peso)"), ("DZD", "DZD (Algerian Dinar)"), ("EGP", "EGP (Egyptian Pound)"), ("ERN", "ERN (Nakfa)"), ("ETB", "ETB (Ethiopian Birr)"), ("EUR", "EUR (Euro)"), ("FJD", "FJD (Fiji Dollar)"), ("FKP", "FKP (Falkland Islands Pound)"), ("GBP", "GBP (Pound Sterling)"), ("GEL", "GEL (Lari)"), ("GHS", "GHS (Ghana Cedi)"), ("GIP", "GIP (Gibraltar Pound)"), ("GMD", "GMD (Dalasi)"), ("GNF", "GNF (Guinea Franc)"), ("GTQ", "GTQ (Quetzal)"), ("GYD", "GYD (Guyana Dollar)"), ("HKD", "HKD (Hong Kong Dollar)"), ("HNL", "HNL (Lempira)"), ("HRK", "HRK (Kuna)"), ("HTG", "HTG (Gourde)"), ("HUF", "HUF (Forint)"), ("IDR", "IDR (Rupiah)"), ("ILS", "ILS (New Israeli Sheqel)"), ("INR", "INR (Indian Rupee)"), ("IQD", "IQD (Iraqi Dinar)"), ("IRR", "IRR (Iranian Rial)"), ("ISK", "ISK (Iceland Krona)"), ("JMD", "JMD (Jamaican Dollar)"), ("JOD", "JOD (Jordanian Dinar)"), ("JPY", "JPY (Yen)"), ("KES", "KES (Kenyan Shilling)"), ("KGS", "KGS (Som)"), ("KHR", "KHR (Riel)"), ("KMF", "KMF (Comoro Franc)"), ("KPW", "KPW (North Korean Won)"), ("KRW", "KRW (Won)"), ("KWD", "KWD (Kuwaiti Dinar)"), ("KYD", "KYD (Cayman Islands Dollar)"), ("KZT", "KZT (Tenge)"), ("LAK", "LAK (Kip)"), ("LBP", "LBP (Lebanese Pound)"), ("LKR", "LKR (Sri Lanka Rupee)"), ("LRD", "LRD (Liberian Dollar)"), ("LSL", "LSL (Loti)"), ("LYD", "LYD (Libyan Dinar)"), ("MAD", "MAD (Moroccan Dirham)"), ("MDL", "MDL (Moldovan Leu)"), ("MGA", "MGA (Malagasy Ariary)"), ("MKD", "MKD (Denar)"), ("MMK", "MMK (Kyat)"), ("MNT", "MNT (Tugrik)"), ("MOP", "MOP (Pataca)"), ("MRO", "MRO (Ouguiya)"), ("MUR", "MUR (Mauritius Rupee)"), ("MVR", "MVR (Rufiyaa)"), ("MWK", "MWK (Malawi Kwacha)"), ("MXN", "MXN (Mexican Peso)"), ("MYR", "MYR (Malaysian Ringgit)"), ("MZN", "MZN (Mozambique Metical)"), ("NAD", "NAD (Namibia Dollar)"), ("NGN", "NGN (Naira)"), ("NIO", "NIO (Cordoba Oro)"), ("NOK", "NOK (Norwegian Krone)"), ("NPR", "NPR (Nepalese Rupee)"), ("NZD", "NZD (New Zealand Dollar)"), ("OMR", "OMR (Rial Omani)"), ("PAB", "PAB (Balboa)"), ("PEN", "PEN (Sol)"), ("PGK", "PGK (Kina)"), ("PHP", "PHP (Philippine Peso)"), ("PKR", "PKR (Pakistan Rupee)"), ("PLN", "PLN (Zloty)"), ("PYG", "PYG (Guarani)"), ("QAR", "QAR (Qatari Rial)"), ("RON", "RON (Romanian Leu)"), ("RSD", "RSD (Serbian Dinar)"), ("RUB", "RUB (Russian Ruble)"), ("RWF", "RWF (Rwanda Franc)"), ("SAR", "SAR (Saudi Riyal)"), ("SBD", "SBD (Solomon Islands Dollar)"), ("SCR", "SCR (Seychelles Rupee)"), ("SDG", "SDG (Sudanese Pound)"), ("SEK", "SEK (Swedish Krona)"), ("SGD", "SGD (Singapore Dollar)"), ("SHP", "SHP (Saint Helena Pound)"), ("SLL", "SLL (Leone)"), ("SOS", "SOS (Somali Shilling)"), ("SRD", "SRD (Surinam Dollar)"), ("SSP", "SSP (South Sudanese Pound)"), ("STD", "STD (Dobra)"), ("SVC", "SVC (El Salvador Colon)"), ("SYP", "SYP (Syrian Pound)"), ("SZL", "SZL (Lilangeni)"), ("THB", "THB (Baht)"), ("TJS", "TJS (Somoni)"), ("TMT", "TMT (Turkmenistan New Manat)"), ("TND", "TND (Tunisian Dinar)"), ("TOP", "TOP (Pa\u2019anga)"), ("TRY", "TRY (Turkish Lira)"), ("TTD", "TTD (Trinidad and Tobago Dollar)"), ("TWD", "TWD (New Taiwan Dollar)"), ("TZS", "TZS (Tanzanian Shilling)"), ("UAH", "UAH (Hryvnia)"), ("UGX", "UGX (Uganda Shilling)"), ("USD", "USD (US Dollar)"), ("UYU", "UYU (Peso Uruguayo)"), ("UZS", "UZS (Uzbekistan Sum)"), ("VEF", "VEF (Bol\xedvar)"), ("VND", "VND (Dong)"), ("VUV", "VUV (Vatu)"), ("WST", "WST (Tala)"), ("XAF", "XAF (CFA Franc BEAC)"), ("XAG", "XAG (Silver)"), ("XAU", "XAU (Gold)"), ( "XBA", "XBA (Bond Markets Unit European Composite Unit (EURCO))", ), ( "XBB", "XBB (Bond Markets Unit European Monetary Unit (E.M.U.-6))", ), ( "XBC", "XBC (Bond Markets Unit European Unit of Account 9 (E.U.A.-9))", ), ( "XBD", "XBD (Bond Markets Unit European Unit of Account 17 (E.U.A.-17))", ), ("XCD", "XCD (East Caribbean Dollar)"), ("XDR", "XDR (SDR (Special Drawing Right))"), ("XOF", "XOF (CFA Franc BCEAO)"), ("XPD", "XPD (Palladium)"), ("XPF", "XPF (CFP Franc)"), ("XPT", "XPT (Platinum)"), ("XSU", "XSU (Sucre)"), ( "XTS", "XTS (Codes specifically reserved for testing purposes)", ), ("XUA", "XUA (ADB Unit of Account)"), ( "XXX", "XXX (The codes assigned for transactions where no currency is involved)", ), ("YER", "YER (Yemeni Rial)"), ("ZAR", "ZAR (Rand)"), ("ZMW", "ZMW (Zambian Kwacha)"), ("ZWL", "ZWL (Zimbabwe Dollar)"), ], default=b"USD", help_text=b"The currency used for billing.", max_length=4, ), ), ( "transaction_currency", models.CharField( choices=[ ("AED", "AED (UAE Dirham)"), ("AFN", "AFN (Afghani)"), ("ALL", "ALL (Lek)"), ("AMD", "AMD (Armenian Dram)"), ("ANG", "ANG (Netherlands Antillean Guilder)"), ("AOA", "AOA (Kwanza)"), ("ARS", "ARS (Argentine Peso)"), ("AUD", "AUD (Australian Dollar)"), ("AWG", "AWG (Aruban Florin)"), ("AZN", "AZN (Azerbaijanian Manat)"), ("BAM", "BAM (Convertible Mark)"), ("BBD", "BBD (Barbados Dollar)"), ("BDT", "BDT (Taka)"), ("BGN", "BGN (Bulgarian Lev)"), ("BHD", "BHD (Bahraini Dinar)"), ("BIF", "BIF (Burundi Franc)"), ("BMD", "BMD (Bermudian Dollar)"), ("BND", "BND (Brunei Dollar)"), ("BOB", "BOB (Boliviano)"), ("BRL", "BRL (Brazilian Real)"), ("BSD", "BSD (Bahamian Dollar)"), ("BTN", "BTN (Ngultrum)"), ("BWP", "BWP (Pula)"), ("BYN", "BYN (Belarusian Ruble)"), ("BZD", "BZD (Belize Dollar)"), ("CAD", "CAD (Canadian Dollar)"), ("CDF", "CDF (Congolese Franc)"), ("CHF", "CHF (Swiss Franc)"), ("CLP", "CLP (Chilean Peso)"), ("CNY", "CNY (Yuan Renminbi)"), ("COP", "COP (Colombian Peso)"), ("CRC", "CRC (Costa Rican Colon)"), ("CUC", "CUC (Peso Convertible)"), ("CUP", "CUP (Cuban Peso)"), ("CVE", "CVE (Cabo Verde Escudo)"), ("CZK", "CZK (Czech Koruna)"), ("DJF", "DJF (Djibouti Franc)"), ("DKK", "DKK (Danish Krone)"), ("DOP", "DOP (Dominican Peso)"), ("DZD", "DZD (Algerian Dinar)"), ("EGP", "EGP (Egyptian Pound)"), ("ERN", "ERN (Nakfa)"), ("ETB", "ETB (Ethiopian Birr)"), ("EUR", "EUR (Euro)"), ("FJD", "FJD (Fiji Dollar)"), ("FKP", "FKP (Falkland Islands Pound)"), ("GBP", "GBP (Pound Sterling)"), ("GEL", "GEL (Lari)"), ("GHS", "GHS (Ghana Cedi)"), ("GIP", "GIP (Gibraltar Pound)"), ("GMD", "GMD (Dalasi)"), ("GNF", "GNF (Guinea Franc)"), ("GTQ", "GTQ (Quetzal)"), ("GYD", "GYD (Guyana Dollar)"), ("HKD", "HKD (Hong Kong Dollar)"), ("HNL", "HNL (Lempira)"), ("HRK", "HRK (Kuna)"), ("HTG", "HTG (Gourde)"), ("HUF", "HUF (Forint)"), ("IDR", "IDR (Rupiah)"), ("ILS", "ILS (New Israeli Sheqel)"), ("INR", "INR (Indian Rupee)"), ("IQD", "IQD (Iraqi Dinar)"), ("IRR", "IRR (Iranian Rial)"), ("ISK", "ISK (Iceland Krona)"), ("JMD", "JMD (Jamaican Dollar)"), ("JOD", "JOD (Jordanian Dinar)"), ("JPY", "JPY (Yen)"), ("KES", "KES (Kenyan Shilling)"), ("KGS", "KGS (Som)"), ("KHR", "KHR (Riel)"), ("KMF", "KMF (Comoro Franc)"), ("KPW", "KPW (North Korean Won)"), ("KRW", "KRW (Won)"), ("KWD", "KWD (Kuwaiti Dinar)"), ("KYD", "KYD (Cayman Islands Dollar)"), ("KZT", "KZT (Tenge)"), ("LAK", "LAK (Kip)"), ("LBP", "LBP (Lebanese Pound)"), ("LKR", "LKR (Sri Lanka Rupee)"), ("LRD", "LRD (Liberian Dollar)"), ("LSL", "LSL (Loti)"), ("LYD", "LYD (Libyan Dinar)"), ("MAD", "MAD (Moroccan Dirham)"), ("MDL", "MDL (Moldovan Leu)"), ("MGA", "MGA (Malagasy Ariary)"), ("MKD", "MKD (Denar)"), ("MMK", "MMK (Kyat)"), ("MNT", "MNT (Tugrik)"), ("MOP", "MOP (Pataca)"), ("MRO", "MRO (Ouguiya)"), ("MUR", "MUR (Mauritius Rupee)"), ("MVR", "MVR (Rufiyaa)"), ("MWK", "MWK (Malawi Kwacha)"), ("MXN", "MXN (Mexican Peso)"), ("MYR", "MYR (Malaysian Ringgit)"), ("MZN", "MZN (Mozambique Metical)"), ("NAD", "NAD (Namibia Dollar)"), ("NGN", "NGN (Naira)"), ("NIO", "NIO (Cordoba Oro)"), ("NOK", "NOK (Norwegian Krone)"), ("NPR", "NPR (Nepalese Rupee)"), ("NZD", "NZD (New Zealand Dollar)"), ("OMR", "OMR (Rial Omani)"), ("PAB", "PAB (Balboa)"), ("PEN", "PEN (Sol)"), ("PGK", "PGK (Kina)"), ("PHP", "PHP (Philippine Peso)"), ("PKR", "PKR (Pakistan Rupee)"), ("PLN", "PLN (Zloty)"), ("PYG", "PYG (Guarani)"), ("QAR", "QAR (Qatari Rial)"), ("RON", "RON (Romanian Leu)"), ("RSD", "RSD (Serbian Dinar)"), ("RUB", "RUB (Russian Ruble)"), ("RWF", "RWF (Rwanda Franc)"), ("SAR", "SAR (Saudi Riyal)"), ("SBD", "SBD (Solomon Islands Dollar)"), ("SCR", "SCR (Seychelles Rupee)"), ("SDG", "SDG (Sudanese Pound)"), ("SEK", "SEK (Swedish Krona)"), ("SGD", "SGD (Singapore Dollar)"), ("SHP", "SHP (Saint Helena Pound)"), ("SLL", "SLL (Leone)"), ("SOS", "SOS (Somali Shilling)"), ("SRD", "SRD (Surinam Dollar)"), ("SSP", "SSP (South Sudanese Pound)"), ("STD", "STD (Dobra)"), ("SVC", "SVC (El Salvador Colon)"), ("SYP", "SYP (Syrian Pound)"), ("SZL", "SZL (Lilangeni)"), ("THB", "THB (Baht)"), ("TJS", "TJS (Somoni)"), ("TMT", "TMT (Turkmenistan New Manat)"), ("TND", "TND (Tunisian Dinar)"), ("TOP", "TOP (Pa\u2019anga)"), ("TRY", "TRY (Turkish Lira)"), ("TTD", "TTD (Trinidad and Tobago Dollar)"), ("TWD", "TWD (New Taiwan Dollar)"), ("TZS", "TZS (Tanzanian Shilling)"), ("UAH", "UAH (Hryvnia)"), ("UGX", "UGX (Uganda Shilling)"), ("USD", "USD (US Dollar)"), ("UYU", "UYU (Peso Uruguayo)"), ("UZS", "UZS (Uzbekistan Sum)"), ("VEF", "VEF (Bol\xedvar)"), ("VND", "VND (Dong)"), ("VUV", "VUV (Vatu)"), ("WST", "WST (Tala)"), ("XAF", "XAF (CFA Franc BEAC)"), ("XAG", "XAG (Silver)"), ("XAU", "XAU (Gold)"), ( "XBA", "XBA (Bond Markets Unit European Composite Unit (EURCO))", ), ( "XBB", "XBB (Bond Markets Unit European Monetary Unit (E.M.U.-6))", ), ( "XBC", "XBC (Bond Markets Unit European Unit of Account 9 (E.U.A.-9))", ), ( "XBD", "XBD (Bond Markets Unit European Unit of Account 17 (E.U.A.-17))", ), ("XCD", "XCD (East Caribbean Dollar)"), ("XDR", "XDR (SDR (Special Drawing Right))"), ("XOF", "XOF (CFA Franc BCEAO)"), ("XPD", "XPD (Palladium)"), ("XPF", "XPF (CFP Franc)"), ("XPT", "XPT (Platinum)"), ("XSU", "XSU (Sucre)"), ( "XTS", "XTS (Codes specifically reserved for testing purposes)", ), ("XUA", "XUA (ADB Unit of Account)"), ( "XXX", "XXX (The codes assigned for transactions where no currency is involved)", ), ("YER", "YER (Yemeni Rial)"), ("ZAR", "ZAR (Rand)"), ("ZMW", "ZMW (Zambian Kwacha)"), ("ZWL", "ZWL (Zimbabwe Dollar)"), ], help_text=b"The currency used when making a transaction.", max_length=4, ), ), ( "transaction_xe_rate", models.DecimalField( blank=True, decimal_places=4, help_text=b"Currency exchange rate from document currency to transaction_currency.", max_digits=16, null=True, ), ), ( "transaction_xe_date", models.DateField( blank=True, help_text=b"Date of the transaction exchange rate.", null=True, ), ), ( "state", django_fsm.FSMField( choices=[ (b"draft", "Draft"), (b"issued", "Issued"), (b"paid", "Paid"), (b"canceled", "Canceled"), ], default=b"draft", help_text=b"The state the invoice is in.", max_length=10, verbose_name=b"State", ), ), ( "_total", models.DecimalField( blank=True, decimal_places=2, max_digits=19, null=True ), ), ( "_total_in_transaction_currency", models.DecimalField( blank=True, decimal_places=2, max_digits=19, null=True ), ), ( "customer", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="silver.Customer", ), ), ( "pdf", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="silver.PDF", ), ), ( "provider", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="silver.Provider", ), ), ( "related_document", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="reverse_related_document", to="silver.BillingDocumentBase", ), ), ], options={ "ordering": ("-issue_date", "series", "-number"), }, ), migrations.RenameField( model_name="billinglog", old_name="invoice", new_name="invoice_old" ), migrations.RenameField( model_name="billinglog", old_name="proforma", new_name="proforma_old" ), migrations.AlterField( model_name="billinglog", name="invoice_old", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="old_invoice_logs", to="silver.Invoice", ), ), migrations.AlterField( model_name="billinglog", name="proforma_old", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="old_proforma_logs", to="silver.Proforma", ), ), migrations.AddField( model_name="billinglog", name="invoice", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="invoice_billing_logs", to="silver.BillingDocumentBase", ), ), migrations.AddField( model_name="billinglog", name="proforma", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="proforma_billing_logs", to="silver.BillingDocumentBase", ), ), migrations.RenameField( model_name="documententry", old_name="invoice", new_name="invoice_old" ), migrations.RenameField( model_name="documententry", old_name="proforma", new_name="proforma_old" ), migrations.AlterField( model_name="documententry", name="invoice_old", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="old_invoice_entries", to="silver.Invoice", ), ), migrations.AlterField( model_name="documententry", name="proforma_old", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="old_proforma_entries", to="silver.Proforma", ), ), migrations.AddField( model_name="documententry", name="invoice", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="invoice_entries", to="silver.BillingDocumentBase", ), ), migrations.AddField( model_name="documententry", name="proforma", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="proforma_entries", to="silver.BillingDocumentBase", ), ), migrations.RenameField( model_name="transaction", old_name="invoice", new_name="invoice_old" ), migrations.RenameField( model_name="transaction", old_name="proforma", new_name="proforma_old" ), migrations.AlterField( model_name="transaction", name="invoice_old", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="old_invoice_transactions", to="silver.Invoice", ), ), migrations.AlterField( model_name="transaction", name="proforma_old", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="old_proforma_transactions", to="silver.Proforma", ), ), migrations.AddField( model_name="transaction", name="invoice", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="invoice_transactions", to="silver.BillingDocumentBase", ), ), migrations.AddField( model_name="transaction", name="proforma", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="proforma_transactions", to="silver.BillingDocumentBase", ), ), migrations.RunPython( move_documents_to_billing_document, migrations.RunPython.noop ), migrations.RemoveField( model_name="billinglog", name="invoice_old", ), migrations.RemoveField( model_name="billinglog", name="proforma_old", ), migrations.RemoveField( model_name="documententry", name="invoice_old", ), migrations.RemoveField( model_name="documententry", name="proforma_old", ), migrations.RemoveField( model_name="transaction", name="invoice_old", ), migrations.RemoveField( model_name="transaction", name="proforma_old", ), migrations.AlterUniqueTogether( name="invoice", unique_together=set([]), ), migrations.RemoveField( model_name="invoice", name="customer", ), migrations.RemoveField( model_name="invoice", name="pdf", ), migrations.RemoveField( model_name="invoice", name="proforma", ), migrations.RemoveField( model_name="invoice", name="provider", ), migrations.AlterUniqueTogether( name="proforma", unique_together=set([]), ), migrations.RemoveField( model_name="proforma", name="customer", ), migrations.RemoveField( model_name="proforma", name="invoice", ), migrations.RemoveField( model_name="proforma", name="pdf", ), migrations.RemoveField( model_name="proforma", name="provider", ), migrations.DeleteModel( name="Invoice", ), migrations.DeleteModel( name="Proforma", ), migrations.AlterUniqueTogether( name="billingdocumentbase", unique_together=set([("kind", "provider", "series", "number")]), ), migrations.CreateModel( name="Invoice", fields=[], options={ "proxy": True, }, bases=("silver.billingdocumentbase",), ), migrations.CreateModel( name="Proforma", fields=[], options={ "proxy": True, }, bases=("silver.billingdocumentbase",), ), migrations.RunSQL(sql="DROP VIEW IF EXISTS silver_document;", reverse_sql=""), ]
nilq/baby-python
python
from Factory import customer # set data if __name__ == "__main__": cust = customer.Customer() data = cust.get_data() sorted_data = cust.sort_by_revenue(cust.data) print('\n\n') print("In order of annual revenue the accounts low to high are:") cust.print_data(sorted_data) print('\n') print("In order of SLA the accounts are: ") cust.print_data_sla() print('\n\n\n')
nilq/baby-python
python
#!/usr/bin/python3 class Transform: def __init__(self, position, rotation=0, scale=1, layer=0): """ The transform defines spatial orientation parameters :param position: the position :param rotation: the rotation in degrees :param scale: the scale (1 is 100%) :param layer: the layer for rendering """ self.position = position self.rotation = rotation self.scale = scale self.layer = layer
nilq/baby-python
python
#!/usr/bin/python3 # First choice pack and unpack into sqlite # Paul H Alfille 2021 # Wrap firstchoice-specific code into an sqlite3 one. try: import sys except: print("Please install the sys module") print("\tit should be part of the standard python3 distribution") raise import first import sqltable import common def SqlField( field ): return field.replace(' ','_') def PrintField( field ): return field.replace('_',' ') class SQL_FOL_handler(first.FOL_handler): def __init__(self, FOLfile, FOLout='OUTPUT.FOL' , sqlfile=None, **kwargs): # Read in the FOL file (dbase) into an sql database sqlfile -- None for memory # Alternatively use the connection to use an already opened database file super().__init__( FOLfile, FOLout, **kwargs) # Create new table self.Fields() self.SQLtable = sqltable.SQL_table( sqlfile, self.fields ) # Put all FOL data into SQL table self.SQLtable.AllDataPut(self.data) def Fields( self ): self.fields = [SqlField(f['field']) for f in self.form['fields']] #print(self.fields) def Write( self ): self.data = self.SQLtable.AllDataGet() super().Write() def CommandLineArgs( cl ): first.CommandLineArgs( cl ) cl.add_argument("-s","--sql",help="Show SQL statements",action="count") if __name__ == '__main__': def signal_handler( signal, frame ): # Signal handler # signal.signal( signal.SIGINT, signal.SIG_IGN ) sys.exit(0) def CommandLineInterp( ): first.CommandLineInterp( ) def CommandLine(): """Setup argparser object to process the command line""" cl = argparse.ArgumentParser(description="SQL access to a PFS:First Choice v3 database file (.FOL). 2021 by Paul H Alfille") CommandLineArgs( cl ) cl.add_argument("In",help="Existing database file (type .FOL)",type=argparse.FileType('rb')) return cl.parse_args() if __name__ == '__main__': # command line """ First Choice FOL_handler File *.fol """ common.args = CommandLine() # Get args from command line CommandLineInterp( ) # Set up keyboard interrupt handler signal.signal(signal.SIGINT, first.signal_handler ) # Start program # Read in databaase (FOL file already open from command line) try: dbase_class = SQL_FOL_handler( args.In, args.Out ) except common.User_Error as error: print("Error parsing database file: {}".format(error)) dbase_class = None # Changes could happen here, # If nothing else, this is a test of parsing # Write out file to new database if dbase_class is not None: dbase_class.Write() sys.exit(None) else: #module def OpenDatabase( databasename ): return SQL_FOL_handler( databasename ) def Fields(dbase_class): return dbase_class.fields; def SaveDatabase( dbase_class, newdatabase ): if dbase_class is not None: dbase_class.Write()
nilq/baby-python
python
from io import BytesIO import math from wand.image import Image as WandImageBase from wand.color import Color as WandColor import aiohttp import discord class Color(WandColor): """ A little subclass of wand.color.Color Adds functionality for ascii art. """ def __init__(self, *args, **kwargs): self.ascii_characters = { 300: "@", 275: "#", 250: ";", 225: "+", 200: "=", 175: ":", 150: "-", 125: "\"", 100: ",", 75: "'", 50: ".", 25: " ", 0: " " } super().__init__(*args, **kwargs) @property def ascii_character(self): value = self.red + self.green + self.blue value *= 100 return self.ascii_characters[int(math.ceil(value / 25.) * 25)] class Image(WandImageBase): """ A little custom version of wand.image.WandImage. Adds functionality such as... from_link(link) - For creating an image from a link using aiohttp. from_bytes_io(BytesIO) - For creating an image from a bytes io object. Not very useful but saves some lines of code. to_bytes_io() - For saving an image to a BytesIO object. to_discord_file() - For saving an image to a discord.File object. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @classmethod async def from_link(cls, link: str = None): if link is None: return cls().blank(width=0, height=0) link.strip("<>") # Start a client session and get the link. Read the link to response variable. async with aiohttp.ClientSession() as session: async with session.get(link) as response: response = await response.read() # Convert the response the a byte object byte_response = BytesIO(response) byte_response.seek(0) # Start an image object with the bytes. image = cls(file=byte_response) return image @classmethod async def from_bytes_io(cls, bytes_io : BytesIO): # Convert the response the a byte object bytes_io.seek(0) # Start an image object with the bytes. image = cls(file=bytes_io) return image def to_bytes_io(self): bytes_io = BytesIO() # save self to the bytes io and seek to the beginning self.save(file=bytes_io) bytes_io.seek(0) return bytes_io def to_discord_file(self, filename: str): bytes_io = self.to_bytes_io() file = discord.File(bytes_io, filename=filename) return file
nilq/baby-python
python
# Check if One Array can be Nested in Another # Create a function that returns True if the first list can be nested inside the second. def can_nest(list1, list2): sortedlist1 = sorted(list1) sortedlist2 = sorted(list2) return sortedlist2[0] < sortedlist1[0] and sortedlist1[-1] < sortedlist2[1] print(can_nest([3, 1], [4, 0])) #True print(can_nest([9, 9, 8], [8, 9])) print(can_nest([1, 2, 3, 4], [0, 6]))
nilq/baby-python
python
import cpg_scpi from time import sleep def main(): cpg = cpg_scpi.CircuitPlayground() if cpg.is_open: repeat(what=cpg.buttonAny, count=10, delaySeconds=1) repeat(what=cpg.buttonLeft, count=10, delaySeconds=1) repeat(what=cpg.buttonRight, count=10, delaySeconds=1) repeat(what=cpg.switch, count=10, delaySeconds=1) repeat(what=cpg.temp, count=10, delaySeconds=1) repeat(what=cpg.acc, count=10, delaySeconds=1) repeat(what=cpg.light, count=10, delaySeconds=1) repeat(what=cpg.sound, count=10, delaySeconds=1) repeat(what=cpg.capSense, count=10, delaySeconds=1) repeat(what=cpg.capTap, count=10, delaySeconds=1) repeat(what=cpg.uptime, count=10, delaySeconds=1) cpg.close() print() print(f'Closed connection to CPG. {cpg.is_open=}') def repeat(what, count, delaySeconds=0): print(f'Repeating {count} times: {what}') for i in range(count): print(what()) if delaySeconds>0: sleep(delaySeconds) main()
nilq/baby-python
python
import random ch = {} class Node(object): def __init__(self, val): #self.ID = id(self) #self.weight = random.randint(1, 10) self.ID = val self.weight = self.ID self.cluster = [] self.clusterHead = None self.neighbours = [] ch[self] = False self.start() def start(self): big = self.largestNeighbour() if big and big.weight > self.weight: self.callJoin(big) self.clusterHead = big else: self.callCh() ch[self] = True self.clusterHead = self if self not in self.cluster: self.cluster.append(self) def show(self): print("ID = %s | Weight = %s | clusterhead = %s" % (self.ID, self.weight, self.clusterHead.ID)) if self.clusterHead == self: print(" ".join([str(mem.ID) for mem in self.cluster])) def largestNeighbour(self): max = 0 biggest = None for neighbour in self.neighbours: if neighbour.weight > max and ch[neighbour]: max = neighbour.weight biggest = neighbour return biggest def callJoin(self, big): for neighbour in self.neighbours: neighbour.getJoin(self, big) def getJoin(self, u, z): if ch[self]: if z == self: self.cluster.append(u) elif u in self.cluster: self.cluster.remove(u) elif self.clusterHead == u: self.start() def callCh(self): for neighbour in self.neighbours: neighbour.getCh(self) def getCh(self, u): if not self.clusterHead or u.weight > self.clusterHead.weight: self.callJoin(u) ch[self] = False def newLink(self, u): self.neighbours.append(u) if ch[u]: if self.clusterHead and u.weight > self.clusterHead.weight: self.callJoin(u) self.clusterHead = u ch[self] = False self.cluster.clear() elif self.clusterHead and self.clusterHead.weight > u.weight: u.start() def linkFail(self, u): self.neighbours.remove(u) if ch[self] and u in self.cluster: self.cluster.remove(u) elif self.clusterHead == u: self.start()
nilq/baby-python
python
import torch.nn as nn from ..builder import BACKBONES from .base_backbone import BaseBackbone import numbers import collections import logging import functools import torch from torch import nn from torch.nn import functional as F from mmcls.models.backbones.transformer import Transformer checkpoint_kwparams = None # checkpoint_kwparams = json.load(open('checkpoint.json')) def resize(input, size=None, scale_factor=None, mode='nearest', align_corners=None): return F.interpolate(input, size, scale_factor, mode, align_corners) class InvertedResidualChannels(nn.Module): """MobiletNetV2 building block.""" def __init__(self, inp, oup, stride, channels, kernel_sizes, expand, active_fn=None, batch_norm_kwargs=None): super(InvertedResidualChannels, self).__init__() # assert stride in [1, 2] assert len(channels) == len(kernel_sizes) self.input_dim = inp self.output_dim = oup self.expand = expand self.stride = stride self.kernel_sizes = kernel_sizes self.channels = channels self.use_res_connect = self.stride == 1 and inp == oup self.batch_norm_kwargs = batch_norm_kwargs self.active_fn = active_fn self.ops, self.pw_bn = self._build(channels, kernel_sizes, expand) if not self.use_res_connect: # TODO(Mingyu): Add this residual # assert (self.input_dim % min(self.input_dim, self.output_dim) == 0 # and self.output_dim % min(self.input_dim, self.output_dim) == 0) group = [x for x in range(1, self.input_dim + 1) if self.input_dim % x == 0 and self.output_dim % x == 0][-1] self.residual = nn.Conv2d(self.input_dim, self.output_dim, kernel_size=1, stride=self.stride, padding=0, groups=group, bias=False) def _build(self, hidden_dims, kernel_sizes, expand): _batch_norm_kwargs = self.batch_norm_kwargs \ if self.batch_norm_kwargs is not None else {} narrow_start = 0 ops = nn.ModuleList() for k, hidden_dim in zip(kernel_sizes, hidden_dims): layers = [] if expand: # pw layers.append( ConvBNReLU(self.input_dim, hidden_dim, kernel_size=1, batch_norm_kwargs=_batch_norm_kwargs, active_fn=self.active_fn)) else: if hidden_dim != self.input_dim: raise RuntimeError('uncomment this for search_first model') logging.warning( 'uncomment this for previous trained search_first model') # layers.append(Narrow(1, narrow_start, hidden_dim)) narrow_start += hidden_dim layers.extend([ # dw ConvBNReLU(hidden_dim, hidden_dim, kernel_size=k, stride=self.stride, groups=hidden_dim, batch_norm_kwargs=_batch_norm_kwargs, active_fn=self.active_fn), # pw-linear nn.Conv2d(hidden_dim, self.output_dim, 1, 1, 0, bias=False), # nn.BatchNorm2d(oup, **batch_norm_kwargs), ]) ops.append(nn.Sequential(*layers)) pw_bn = None if len(ops) != 0: pw_bn = nn.BatchNorm2d(self.output_dim, **_batch_norm_kwargs) if not expand and narrow_start != self.input_dim: raise ValueError('Part of input are not used') return ops, pw_bn def forward(self, x): # logging.warning( # 'The whole block is pruned') if len(self.ops) == 0: if not self.use_res_connect: return self.residual(x) else: return x tmp = sum([op(x) for op in self.ops]) tmp = self.pw_bn(tmp) if self.use_res_connect: return x + tmp else: return self.residual(x) + tmp return tmp def __repr__(self): return ('{}({}, {}, channels={}, kernel_sizes={}, expand={},' ' stride={})').format(self._get_name(), self.input_dim, self.output_dim, self.channels, self.kernel_sizes, self.expand, self.stride) class InvertedResidual(InvertedResidualChannels): def __init__(self, inp, oup, stride, expand_ratio, kernel_sizes, active_fn=None, batch_norm_kwargs=None, **kwargs): def _expand_ratio_to_hiddens(expand_ratio): if isinstance(expand_ratio, list): assert len(expand_ratio) == len(kernel_sizes) expand = True elif isinstance(expand_ratio, numbers.Number): expand = expand_ratio != 1 expand_ratio = [expand_ratio for _ in kernel_sizes] else: raise ValueError( 'Unknown expand_ratio type: {}'.format(expand_ratio)) hidden_dims = [int(round(inp * e)) for e in expand_ratio] return hidden_dims, expand hidden_dims, expand = _expand_ratio_to_hiddens(expand_ratio) if checkpoint_kwparams: assert oup == checkpoint_kwparams[0][0] print('loading: {} -> {}, {} -> {}'.format( hidden_dims, checkpoint_kwparams[0][4], kernel_sizes, checkpoint_kwparams[0][3])) hidden_dims = checkpoint_kwparams[0][4] kernel_sizes = checkpoint_kwparams[0][3] checkpoint_kwparams.pop(0) super(InvertedResidual, self).__init__(inp, oup, stride, hidden_dims, kernel_sizes, expand, active_fn=active_fn, batch_norm_kwargs=batch_norm_kwargs) self.expand_ratio = expand_ratio class Identity(nn.Module): """Module proxy for null op.""" def forward(self, x): return x def get_active_fn(name): """Select activation function.""" active_fn = { 'nn.ReLU6': functools.partial(nn.ReLU6, inplace=True), 'nn.ReLU': functools.partial(nn.ReLU, inplace=True), }[name] return active_fn def _make_divisible(v, divisor, min_value=None): """Make channels divisible to divisor. This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v def conv3x3(in_planes, out_planes, stride=1, dilation=1): """3x3 convolution with padding.""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False) class ConvBNReLU(nn.Sequential): """Convolution-BatchNormalization-ActivateFn.""" def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, active_fn=None, batch_norm_kwargs=None, dilation=1, padding=None, **kwargs): if batch_norm_kwargs is None: batch_norm_kwargs = {} if not padding: padding = (kernel_size - 1) // 2 super(ConvBNReLU, self).__init__( nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, dilation=dilation, groups=groups, bias=False), nn.BatchNorm2d(out_planes, **batch_norm_kwargs), active_fn() if active_fn is not None else Identity()) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, kernel_size=3, active_fn=None, batch_norm_kwargs=None, expand_ratio=None, kernel_sizes=None ): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = active_fn() self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = None self.stride = stride if self.stride != 1 or inplanes != planes: self.downsample = nn.Sequential( nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes), ) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out def get_block_wrapper(block_str): """Wrapper for MobileNetV2 block. Use `expand_ratio` instead of manually specified channels number.""" if block_str == 'InvertedResidualChannels': return InvertedResidual elif block_str == 'ConvBNReLU': return ConvBNReLU elif block_str == 'BasicBlock': return BasicBlock else: raise ValueError('Unknown type of blocks.') class ParallelModule(nn.Module): def __init__(self, num_branches=2, block=get_block_wrapper('InvertedResidualChannels'), num_blocks=[2, 2], num_channels=[32, 32], expand_ratio=6, kernel_sizes=[3, 5, 7], batch_norm_kwargs=None, active_fn=get_active_fn('nn.ReLU6')): super(ParallelModule, self).__init__() self.num_branches = num_branches self.active_fn = active_fn self.batch_norm_kwargs = batch_norm_kwargs self.expand_ratio = expand_ratio self.kernel_sizes = kernel_sizes self._check_branches( num_branches, num_blocks, num_channels) self.branches = self._make_branches( num_branches, block, num_blocks, num_channels) def _check_branches(self, num_branches, num_blocks, num_channels): if num_branches != len(num_blocks): error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format( num_branches, len(num_blocks)) logging.error(error_msg) raise ValueError(error_msg) if num_branches != len(num_channels): error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format( num_branches, len(num_channels)) logging.error(error_msg) raise ValueError(error_msg) def _make_one_branch(self, branch_index, block, num_blocks, num_channels): layers = [] for i in range(0, num_blocks[branch_index]): layers.append( block( num_channels[branch_index], num_channels[branch_index], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=1, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.active_fn)) return nn.Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append( self._make_one_branch(i, block, num_blocks, num_channels)) return nn.ModuleList(branches) def forward(self, x): for i in range(self.num_branches): x[i] = self.branches[i](x[i]) return x class FuseModule(nn.Module): ''' Consistent with HRNET: 1. self.use_hr_format, eg: fuse 3 branches, and then add 4th branch from 3rd branch. (default fuse 4 branches) 2. use_hr_format, if the channels are the same and stride==1, use None rather than fuse. (default, always fuse) and use convbnrelu, and kernel_size=1 when upsample. also control the relu here (last layer no relu) 3. self.in_channels_large_stride, use 16->16->64 instead of 16->32->64 for large stride. (default, True) 4. The only difference in self.use_hr_format when adding a branch: is we use add 4th branch from 3rd branch, add 5th branch from 4rd branch hrnet use add 4th branch from 3rd branch, add 5th branch from 3rd branch (2 conv layers) actually only affect 1->2 stage can be hard coded: self.use_hr_format = self.use_hr_format and not(out_branches == 2 and in_branches == 1) 5. hrnet have a fuse layer at the end, we remove it ''' def __init__(self, in_branches=1, out_branches=2, block=get_block_wrapper('InvertedResidualChannels'), in_channels=[16], out_channels=[16, 32], expand_ratio=6, kernel_sizes=[3, 5, 7], batch_norm_kwargs=None, active_fn=get_active_fn('nn.ReLU6'), use_hr_format=False, only_fuse_neighbor=True, directly_downsample=True): super(FuseModule, self).__init__() self.out_branches = out_branches self.in_branches = in_branches self.active_fn = active_fn self.batch_norm_kwargs = batch_norm_kwargs self.expand_ratio = expand_ratio self.kernel_sizes = kernel_sizes self.only_fuse_neighbor = only_fuse_neighbor self.in_channels_large_stride = True # see 3. if only_fuse_neighbor: self.use_hr_format = out_branches > in_branches # w/o self, are two different flags. (see 1.) else: self.use_hr_format = out_branches > in_branches and \ not (out_branches == 2 and in_branches == 1) # see 4. self.relu = functools.partial(nn.ReLU, inplace=False) if use_hr_format: block = ConvBNReLU # See 2. block = ConvBNReLU fuse_layers = [] for i in range(out_branches if not self.use_hr_format else in_branches): fuse_layer = [] for j in range(in_branches): if only_fuse_neighbor: if j < i - 1 or j > i + 1: fuse_layer.append(None) continue if j > i: fuse_layer.append(nn.Sequential( block( in_channels[j], out_channels[i], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=1, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.relu if not use_hr_format else None, kernel_size=1 # for hr format ), nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) elif j == i: if use_hr_format and in_channels[j] == out_channels[i]: fuse_layer.append(None) else: fuse_layer.append( block( in_channels[j], out_channels[i], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=1, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.relu if not use_hr_format else None, kernel_size=3 # for hr format )) else: downsamples = [] if directly_downsample: downsamples.append( block( in_channels[j], out_channels[i], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=2 ** (i - j), batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.relu if not use_hr_format else None, kernel_size=3 # for hr format )) else: for k in range(i - j): if self.in_channels_large_stride: if k == i - j - 1: downsamples.append( block( in_channels[j], out_channels[i], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=2, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.relu if not use_hr_format else None, kernel_size=3 # for hr format )) else: downsamples.append( block( in_channels[j], in_channels[j], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=2, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.relu, kernel_size=3 # for hr format )) else: if k == 0: downsamples.append( block( in_channels[j], out_channels[j + 1], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=2, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.relu if not (use_hr_format and i == j + 1) else None, kernel_size=3 # for hr format )) elif k == i - j - 1: downsamples.append( block( out_channels[j + k], out_channels[i], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=2, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.relu if not use_hr_format else None, kernel_size=3 # for hr format )) else: downsamples.append( block( out_channels[j + k], out_channels[j + k + 1], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=2, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.relu, kernel_size=3 # for hr format )) fuse_layer.append(nn.Sequential(*downsamples)) fuse_layers.append(nn.ModuleList(fuse_layer)) if self.use_hr_format: for branch in range(in_branches, out_branches): fuse_layers.append(nn.ModuleList([block( out_channels[branch - 1], out_channels[branch], expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=2, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.relu, kernel_size=3 # for hr format )])) self.fuse_layers = nn.ModuleList(fuse_layers) def forward(self, x): x_fuse = [] if not self.only_fuse_neighbor: for i in range(len(self.fuse_layers) if not self.use_hr_format else self.in_branches): y = self.fuse_layers[i][0](x[0]) if self.fuse_layers[i][0] else x[0] # hr_format, None for j in range(1, self.in_branches): if self.fuse_layers[i][j]: y = y + self.fuse_layers[i][j](x[j]) else: # hr_format, None y = y + x[j] x_fuse.append(self.relu(y)) # TODO(Mingyu): Use ReLU? if self.use_hr_format: for branch in range(self.in_branches, self.out_branches): x_fuse.append(self.fuse_layers[branch][0](x_fuse[branch - 1])) else: for i in range(len(self.fuse_layers) if not self.use_hr_format else self.in_branches): flag = 1 for j in range(i-1, i+2): if 0 <= j < self.in_branches: if flag: y = self.fuse_layers[i][j](x[j]) if self.fuse_layers[i][j] else x[j] # hr_format, None flag = 0 else: if self.fuse_layers[i][j]: y = y + self.fuse_layers[i][j](x[j]) else: # hr_format, None y = y + x[j] x_fuse.append(self.relu()(y)) # TODO(Mingyu): Use ReLU? if self.use_hr_format: for branch in range(self.in_branches, self.out_branches): x_fuse.append(self.fuse_layers[branch][0](x_fuse[branch - 1])) return x_fuse @BACKBONES.register_module() class HighResolutionNet(nn.Module): def __init__(self, num_classes=1000, input_size=224, input_stride=4, input_channel=[16, 16], last_channel=1024, head_channels=None, bn_momentum=0.1, bn_epsilon=1e-5, dropout_ratio=0.2, active_fn='nn.ReLU6', block='InvertedResidualChannels', width_mult=1.0, round_nearest=8, expand_ratio=4, kernel_sizes=[3, 5, 7], inverted_residual_setting=None, task='segmentation', align_corners=False, start_with_atomcell=False, fcn_head_for_seg=False, **kwargs): super(HighResolutionNet, self).__init__() batch_norm_kwargs = { 'momentum': bn_momentum, 'eps': bn_epsilon } self.avg_pool_size = input_size // 32 self.input_stride = input_stride self.input_channel = [_make_divisible(item * width_mult, round_nearest) for item in input_channel] self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) self.batch_norm_kwargs = batch_norm_kwargs self.active_fn = get_active_fn(active_fn) self.kernel_sizes = kernel_sizes self.expand_ratio = expand_ratio self.task = task self.align_corners = align_corners self.block = get_block_wrapper(block) self.inverted_residual_setting = inverted_residual_setting downsamples = [] if self.input_stride > 1: downsamples.append(ConvBNReLU( 3, input_channel[0], kernel_size=3, stride=2, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.active_fn)) if self.input_stride > 2: if start_with_atomcell: downsamples.append(InvertedResidual(input_channel[0], input_channel[0], 1, 1, [3], self.active_fn, self.batch_norm_kwargs)) downsamples.append(ConvBNReLU( input_channel[0], input_channel[1], kernel_size=3, stride=2, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.active_fn)) self.downsamples = nn.Sequential(*downsamples) features = [] for index in range(len(inverted_residual_setting)): in_branches = 1 if index == 0 else inverted_residual_setting[index - 1][0] in_channels = [input_channel[1]] if index == 0 else inverted_residual_setting[index - 1][-1] features.append( FuseModule( in_branches=in_branches, out_branches=inverted_residual_setting[index][0], in_channels=in_channels, out_channels=inverted_residual_setting[index][-1], block=self.block, expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.active_fn) ) features.append( ParallelModule( num_branches=inverted_residual_setting[index][0], num_blocks=inverted_residual_setting[index][1], num_channels=inverted_residual_setting[index][2], block=self.block, expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.active_fn) ) if fcn_head_for_seg: self.transform = ConvBNReLU( sum(inverted_residual_setting[-1][-1]), last_channel, kernel_size=1, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.active_fn ) else: self.transform = self.block( sum(inverted_residual_setting[-1][-1]), last_channel, expand_ratio=self.expand_ratio, kernel_sizes=self.kernel_sizes, stride=1, batch_norm_kwargs=self.batch_norm_kwargs, active_fn=self.active_fn, ) # self.classifier = nn.Conv2d(last_channel, # num_classes, # kernel_size=1) self.features = nn.Sequential(*features) self.init_weights() def _transform_inputs(self, inputs): """Transform inputs for decoder. Args: inputs (list[Tensor]): List of multi-level img features. Returns: Tensor: The transformed inputs """ upsampled_inputs = [ resize( input=x, size=inputs[-1].shape[2:], mode='bilinear', align_corners=self.align_corners) for x in inputs ] inputs = torch.cat(upsampled_inputs, dim=1) inputs = self.transform(inputs) return inputs def init_weights(self, pretrained=None): logging.info('=> init weights from normal distribution') for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): x = self.downsamples(x) x = self.features([x]) x = self._transform_inputs(x) # x = self.classifier(x) return x
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Mon Jan 28 13:33:54 2019 Programa que permite contar las vocales dentro de una palabra @author: Luis Cobian """ palabra = input("Una palabra: ") palabra = palabra.lower(); a = palabra.count("a"); e = palabra.count("e"); i = palabra.count("i"); o = palabra.count("o"); u = palabra.count("u"); print("A: ", a) print("E: ", e) print("I: ", i) print("O: ", o) print("U: ", u)
nilq/baby-python
python
import pytest import urlpath from cortex.utils.databases import mongo_db
nilq/baby-python
python
from __future__ import unicode_literals import cv2 import numpy as np import mediapipe as mp from tensorflow.keras.models import load_model import cv2 mp_hands = mp.solutions.hands # Hands model mp_drawing = mp.solutions.drawing_utils # Drawing utilities def mediapipe_detection_hands(image, model): # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # for mobile camera image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) # for web camera image.flags.writeable = False # Image is no longer writeable results = model.process(image) # Make prediction image.flags.writeable = True # Image is now writeable image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # COLOR COVERSION RGB 2 BGR return image, results def draw_styled_landmarks(image, results): # Draw left hand connections if results.multi_hand_landmarks and results.multi_handedness: for index in range(len(results.multi_hand_landmarks)) : classification = results.multi_handedness[index].classification if classification[0].label == 'Right': mp_drawing.draw_landmarks(image, results.multi_hand_landmarks[index], mp_hands.HAND_CONNECTIONS, mp_drawing.DrawingSpec(color=(121,22,76), thickness=2, circle_radius=4), mp_drawing.DrawingSpec(color=(121,44,250), thickness=2, circle_radius=2) ) # Draw right hand connections else : mp_drawing.draw_landmarks(image, results.multi_hand_landmarks[index], mp_hands.HAND_CONNECTIONS, mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=4), mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2) ) # get left hand and right hand landmarks if there. def extract_keypoints(results): lh = np.zeros(21*3) rh = np.zeros(21*3) for index in range(len(results.multi_hand_landmarks)) : classification = results.multi_handedness[index].classification if classification[0].label == 'Right': rh = np.array([[res.x, res.y, res.z] for res in results.multi_hand_landmarks[index].landmark]).flatten() else : lh = np.array([[res.x, res.y, res.z] for res in results.multi_hand_landmarks[index].landmark]).flatten() return np.concatenate([lh, rh]) def prob_viz(res, action, input_frame): output_frame = input_frame.copy() cv2.rectangle(output_frame, (0,60), (int(res[1]*100), 90), (245,117,16), -1) cv2.putText(output_frame, action, (0, 85), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA) return output_frame def main(): modelname = input("model name : " ) # name of the model # Load actions array from CSV file actions = np.genfromtxt(modelname+'.txt',dtype=None, delimiter=',',encoding='UTF-8') print(actions) model = load_model(modelname) #loading model no_frames = int(input("number of frames per sequence for prediction: " )) # Cam source that you use (normally 0) no_cam = int(input("you cam source number (try 0 or 1 or 2): " )) threshold = float(input("accuracy threshold: " )) stability_coff = int(input("get highest prediction in last ... : ")) # 1. New detection variables sequence = [] sentence = [] predictions = [] cap = cv2.VideoCapture(no_cam) # Set mediapipe model with mp_hands.Hands(max_num_hands=2,min_detection_confidence=0.7,min_tracking_confidence=0.5) as hands : while cap.isOpened(): # Read feed success, frame = cap.read() if not success: print("Ignoring empty camera frame.") # If loading a video, use 'break' instead of 'continue'. continue # Make detections image, results = mediapipe_detection_hands(frame, hands) # Draw landmarks draw_styled_landmarks(image, results) # 2. Prediction logic if results.multi_hand_landmarks and results.multi_handedness: keypoints = extract_keypoints(results) # ignore frames with no hands if not np.array_equal(keypoints , np.zeros(126)): sequence.append(keypoints) # sequence = sequence[-30:] # do predictions when enough frames are aquired if len(sequence) == no_frames: res = model.predict(np.expand_dims(sequence, axis=0))[0] if np.amax(res) > threshold: predictions.append(np.argmax(res)) sequence = [] #empty sequence to collect new frames #3 Viz logic if len(predictions)>= int(stability_coff) and np.unique(predictions[-stability_coff:])[0]==np.argmax(res): predictions = predictions[int(-stability_coff):] if len(sentence) > 0: if actions[np.argmax(res)] != sentence[-1]: sentence.append(actions[np.argmax(res)]) print(sentence[-1]) else: sentence.append(actions[np.argmax(res)]) print(sentence[-1]) if len(sentence) > 5: sentence = sentence[-5:] # Viz probabilities if len(sentence) > 0: image = prob_viz((np.argmax(res),np.amax(res)), actions[np.argmax(res)], image) cv2.rectangle(image, (0,0), (640, 40), (245, 117, 16), -1) cv2.putText(image, ' '.join(sentence), (3,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA) # Show to screen cv2.imshow('OpenCV Feed', image) # Break gracefully if cv2.waitKey(10) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
nilq/baby-python
python
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import os import unittest from paddlenlp.datasets import load_dataset import sys sys.path.append(os.pardir) from common_test import CpuCommonTest import util import unittest def get_examples(mode='train'): """ dataset[0][0] examples """ examples = { 'train': ({'text_a': '原告李某1。委托代理人蒋水光,湘阴县南湖法律服务所法律工作者。被告夏某1。\n\n原告李某1诉称,2015年3月6日,被告夏某1因欠缺资金,向丰辉借款70000元。因丰辉又欠他70000元,' '2015年3月23日,他向丰辉追收欠款时,丰辉将被告夏某1所欠其70000元债权转让予他,被告夏某1同意转让并向他出具欠条一张。后被告夏某1经他多次催要,至今尚未归还本金及利息。为维护他的合法权益,' '特起诉至法院,请求法院依法判决:1、被告夏某1偿还其本金70000元及利息;2、由被告夏某1承担本案诉讼费。被告夏某1未提出答辩,亦未提交任何证据,本院视为其放弃答辩、举证、质证的权利,由此造' '成对其不利的法律后果由其自行承担。经审理查明,原告李某1与被告夏某1经人介绍相识。被告夏某1因资金周转困难,向丰辉借款70000元。丰辉因资金周转困难向原告李某1借款70000元。2015年3月23日,' '三方在原告李某1家里达成一致意见,由被告夏某1向原告李某1归还借款70000元,归还时间为2016年3月23日之前,同时被告夏某1向原告李某1出具欠条一张,内容为:“今欠到李某1人币柒万元整。(¥70000元)' '欠款归还之日李某1将丰辉打给我7万元收条一并归还。证明:凭此条兑换丰辉收条李某12015年3月23日夏某1归还时间一年之内430624195801035630”。后原告李某1多次催要未果,遂诉至法院。以上事实有原告' '当庭陈述、欠条及庭审笔录等在卷证实,足以认定。\n', 'text_b': '原告:牛某1,男,1972年11月10日出生,汉族,无业,住山西省太原市。委托诉讼代理人:李晓星,山西新国泰律师事务所律师。委托诉讼' '代理人:崔飞杰,山西新国泰律师事务所律师。被告:山西智伟基业房地产开发有限公司,住山西省太原市小店区通达西街29号7-13号房,统一社会信用代码×××。法定代表人:李欣,总经理。被告:冯某1,男,' '1970年6月29日出生,汉族,住山西省太原市。被告:康某1,女,1973年7月26日出生,汉族,住山西省太原市。以上被告共同委托诉讼代理人:李建业,男,1955年8月30日出生,汉族,山西智伟基业房地产开' '发有限公司职工,住山西省太原市。\n\n原告牛某1向本院提出诉讼请求:1.请求法院判令三被告立即共同连带归还原告借款本金3000000元,并按照年利率24%的标准支付原告自2013年6月10日起至三被告实际' '还清全部欠款之日的利息,该利息暂计至2017年11月9日为3230000元;2.请求法院判令三被告承担本案全部诉讼费用。事实和理由:2011年11月2日,原告与被告冯某1、被告康某1签订了《借款协议书》,约定' '原告出借给被告冯某1、被告康某1人民币300万元,借款期限为12个月,自2011年11月2日至2012年10月31日。双方约定按每月3%计算利息,被告按季度向原告支付利息。上述合同签订后,原告依约向被告支付了' '全部款项,但被告一直未能按时支付利息,借款期限届满后也未能归还本金。2014年2月10日,被告山西智伟基业房地产开发有限公司向原告出具《承诺书》,明确了其向原告借款的事实,并承诺于2014年3月、6月' '向原告支付利息,于2014年11月2日前向原告还清全部本息。该承诺书出具后,原告与被告冯某1、被告康某1于2014年3月5日签订了《借款补充协议书》,约定将前述借款延期至2014年11月2日。但借款期限届满后' '三被告仍未依约还款,经原告多次催要无果,故诉至法院,请求法院依法支持原告的诉讼请求。被告山西智伟基业房地产开发有限公司、冯某1、康某1承认原告牛某1提出的全部诉讼请求。\n', 'text_c': '原告:' '王某1,女,1988年6月3日出生,汉族,无固定职业,住哈尔滨市道里区。被告:路某1,男,1987年9月9日出生,汉族,无固定职业,(户籍地)住哈尔滨市南岗区,现住哈尔滨市道里区。\n\n原告王某1向本院提' '出诉讼请求:1.判令路某1给付王某1借款本金7.7万元,利息从借贷日开始计算到实际给付之日止;2.由路某1承担本案诉讼费用。事实和理由:王某1与路某1经业务关系相识,路某1因经营需要于2017年1月24日向' '王某1借款5万元,约定月利息2%,2017年3月17日向王某1借款27000元。路某1承诺2017年5月1日前偿还两笔借款,还款期限届满后,王某1多次找到路某1追索借款未果,故诉至法院。被告路某1未出庭,未答辩。' '原告为证实其诉讼请求成立向本院提交了两份证据,1.2017年1月24日,路某1出具的借条一份;证明路某1向王某1第一次借款5万元的事实,利息约定月利率2%返给王某1,还款期限为借款之日起至2017年5月1日止。' '2.借条一份;证明被告2017年3月17日向路某1借款27000元,月利息2%,2017年5月1还清,这一条是后补的。根据当事人的陈述和经审查确认的证据,本院认定事实如下:2017年1月14日,路某1向王某1借款50000元,' '并出具借条一份,约定:借款日期为2017年1月24日至2017年5月1日;借款利息为月利息2%。后路某1又向王某1借款,2017年5月17日,路某1向王某1出具借条一份,约定:借款金额27000元,借款日期为2017年3月17日' '至2017年5月1日,借款利息为月利息2%。王某1多次催讨未果,诉至法院。\n', 'label': 1}), } return examples[mode] class TestCail2019_scm(CpuCommonTest): """ clue tnews case """ def setUp(self): """ check input params & datasets all flies """ self.config['path_or_read_func'] = 'cail2019_scm' self.config['splits'] = ['train', 'dev','test'] def test_train_set(self): """ check train.json length, label,text """ expected_ds_num = 3 expected_len = 5102 expected_train= get_examples('train') ds = load_dataset(**self.config) self.check_output_equal(len(ds), expected_ds_num) self.check_output_equal(len(ds[0]), expected_len) self.check_output_equal(expected_train['text_a'], ds[0][0]['text_a']) self.check_output_equal(int(expected_train['label']), ds[0][0]['label']) class TestCail2019NoSplitDataFiles(CpuCommonTest): """ check no splits """ def setUp(self): self.config['path_or_read_func'] = 'cail2019_scm' @util.assert_raises def test_no_split_datafiles(self): load_dataset(**self.config) if __name__ == "__main__": unittest.main()
nilq/baby-python
python
from typing import Any, Dict from .ml_model import MLModel from .modeler import Modeler class MLModeler(Modeler): """ Base class for H1st ML Modelers. Has capabilities that are specific to MLModels. """ def train_model(self, prepared_data: dict) -> MLModel: """ Implement logic of training model :param prepared_data: prepared data from ``prep`` method """ def train_base_model(self, prepared_data: Dict[str, Any]) -> Any: """ Implement logic of training the base/native model :param prepared_data: prepared data """ def build_model(self, data: Dict[str, Any] = None) -> MLModel: """ Implement logic to create the corresponding MLModel, including both training and evaluation. """ if not data: data = self.load_data() base_model = self.train_base_model(data) if self.model_class is None: raise ValueError('Model class not provided') ml_model = self.model_class() ml_model.base_model = base_model # Pass stats to the model if self.stats is not None: ml_model.stats = self.stats.copy() # Compute metrics and pass to the model ml_model.metrics = self.evaluate_model(data, ml_model) return ml_model
nilq/baby-python
python
# PLEASE NOTE # =========== # # The code in this module is a slightly modified version of the code from # the Chemistry Toolkit Rosetta Wiki. # http://ctr.wikia.com/wiki/Calculate_TPSA # # The algorithm follows the approach of Ertl et al., which is to sum partial # surface contributions based on fragments defined in a SMARTS string. # Ertl, Rohde, and Selzer (J. Med. Chem., 43:3714-3717, 2000) # The SMARTS string is from TJ O'Donnell's CHORD chemistry extension for # PostgreSQL. # Core module imports import collections import logging import os # Third-party module imports import indigo as indigo_module def tpsa_count_matches(indigo, subsearch, mol_obj): """ Helper function for tpsa() """ matcher = indigo.substructureMatcher(mol_obj) return matcher.countMatches(subsearch) def tpsa(smiles): """ Compute the topological polar surface area of a molecule specified as a SMILES string. """ return_value = False # Variables to store the pattern defintions Pattern = collections.namedtuple("Pattern", ["value", "subsearch"]) patterns = [] try: # Initialise the Indigo library indigo = indigo_module.Indigo() # Build the path to the tpsa data file, relative to this file. fn = os.path.join(os.path.dirname(__file__), 'data/tpsa.tab') # Get the patterns from the tpsa.tab file, ignoring the header line for line in open(fn).readlines()[1:]: # Extract the fields value, smarts, comment = line.split("\t") subsearch = indigo.loadSmarts(smarts) # Store for later use patterns.append(Pattern(float(value), subsearch)) # Load the molecule mol = indigo.loadMolecule(smiles) # Molecules MUST be dearomatized for this TPSA calculation to work correctly. mol.dearomatize() return_value = sum(tpsa_count_matches(indigo, pattern.subsearch, mol)*pattern.value for pattern in patterns) except IndigoException as e: logging.error("Indigo exception: %s" % (e)) except Exception as e: logging.error("Exception: %s" % (e)) finally: return return_value
nilq/baby-python
python
from io import BytesIO from os import path from PIL import Image # Each entry in the list contains the information necessary to render the final # image with each of the layers resized and cropped accordingly. Some of this # information is also required by the JavaScript on the page. TEMPLATES = { 'bq-aquaris': { 'title': 'BQ Aquaris E4.5 Ubuntu Edition', 'frame': (671, 1305), 'screen': (540, 960), 'offset': (65, 145), 'panel': 37, }, 'meizu-mx3': { 'title': 'Meizu MX3', 'frame': (1346, 2313), 'screen': (1080, 1800), 'offset': (131, 213), 'panel': 73, }, } def blit_source_image(output, template, image, panel): """ Blit the source image to the output image, scaling and cropping as needed. """ img = Image.open(image) screen = TEMPLATES[template]['screen'] factor = float(screen[0]) / float(img.size[0]) dimensions = [int(i * factor) for i in img.size] if panel: dimensions[1] -= TEMPLATES[template]['panel'] img = img.resize(dimensions, Image.ANTIALIAS) img = img.crop([0, 0] + [min(*i) for i in zip(dimensions, screen)]) offset = list(TEMPLATES[template]['offset']) if panel: offset[1] += TEMPLATES[template]['panel'] output.paste(img, tuple(offset)) def blit_template_image(output, template, filename): """ Blit the specified file from the template to the output image. """ img = Image.open(path.join(path.dirname(__file__), 'img', template, filename)) return Image.alpha_composite(output, img) def generate_device_art(template, image, panel, glossy): """ Combine the layers for the template into a final image. """ output = Image.new('RGBA', TEMPLATES[template]['frame']) blit_source_image(output, template, image, panel) if panel: output = blit_template_image(output, template, 'panel.png') output = blit_template_image(output, template, 'frame.png') if glossy: output = blit_template_image(output, template, 'gloss.png') response = BytesIO() output.save(response, format='PNG') return response.getvalue()
nilq/baby-python
python
import json from urllib.parse import parse_qsl, urlencode import falcon import requests from requests_oauthlib import OAuth1Session from sikre import settings from sikre.models.users import User from sikre.resources.auth import utils from sikre.utils.logs import logger class LinkedinAuth(object): def on_post(self, req, res): @app.route('/auth/linkedin', methods=['POST']) def linkedin(): access_token_url = 'https://www.linkedin.com/uas/oauth2/accessToken' people_api_url = 'https://api.linkedin.com/v1/people/~:(id,first-name,last-name,email-address)' payload = dict(client_id=request.json['clientId'], redirect_uri=request.json['redirectUri'], client_secret=app.config['LINKEDIN_SECRET'], code=request.json['code'], grant_type='authorization_code') # Step 1. Exchange authorization code for access token. r = requests.post(access_token_url, data=payload) access_token = json.loads(r.text) params = dict(oauth2_access_token=access_token['access_token'], format='json') # Step 2. Retrieve information about the current user. r = requests.get(people_api_url, params=params) profile = json.loads(r.text) user = User.query.filter_by(linkedin=profile['id']).first() if user: token = create_token(user) return jsonify(token=token) u = User(linkedin=profile['id'], display_name=profile['firstName'] + ' ' + profile['lastName']) db.session.add(u) db.session.commit() token = create_token(u) return jsonify(token=token) def on_options(self, req, res): """Acknowledge the OPTIONS method. """ res.status = falcon.HTTP_200 def on_get(self, req, res): raise falcon.HTTPError(falcon.HTTP_405, title="Client error", description=req.method + " method not allowed.", href=settings.__docs__) def on_put(self, req, res): raise falcon.HTTPError(falcon.HTTP_405, title="Client error", description=req.method + " method not allowed.", href=settings.__docs__) def on_update(self, req, res): raise falcon.HTTPError(falcon.HTTP_405, title="Client error", description=req.method + " method not allowed.", href=settings.__docs__) def on_delete(self, req, res): raise falcon.HTTPError(falcon.HTTP_405, title="Client error", description=req.method + " method not allowed.", href=settings.__docs__)
nilq/baby-python
python
#!/usr/bin/env python # coding: utf-8 """ mq modules defines Message Queue clients and some tools. """ from .rabbit import RabbitMQConnection from .kafka import KafkaConnection from .consts import MQTypes class MQClientFactory(): def __init__(self, mq_type): self._mq_type = mq_type if mq_type not in MQTypes.values: raise RuntimeError('Unsupported MQ type "%s"' % mq_type) @staticmethod def create_connection(mq_type, conf): """ A factory for MQ Connection :param mq_type: Message queue type from MQTypes :param conf: The configuration dict :return: """ conn = None if mq_type == MQTypes.RabbitMQ: conn = RabbitMQConnection(conf) elif mq_type == MQTypes.Kafka: conn = KafkaConnection(conf) else: raise RuntimeError('Unsupported MQ type "%s"' % mq_type) # assign methods conn.mq_type = mq_type # conn.create_producer = MQClientFactory.__create_producer # conn.create_consumer = MQClientFactory.__create_consumer return conn @staticmethod def create_producer(mq_type, conf): """ Create a MQ producer instance. :param mq_type: :param conf: :return: """ conn = MQClientFactory.create_connection(mq_type, conf) producer = conn.create_producer() return producer @staticmethod def create_consumer(mq_type, conf): """ Create a MQ consumer instance. :param mq_type: :param conf: :return: """ conn = MQClientFactory.create_connection(mq_type, conf) producer = conn.create_consumer() return producer
nilq/baby-python
python
import os import sys import csv import copy import time import random import argparse import numpy as np np.set_printoptions(precision=4) from matplotlib.animation import FFMpegWriter from tqdm import tqdm # from minisam import * # how to install minisam: https://minisam.readthedocs.io/install.html from slam_utils.ScanContextManager import * from slam_utils.PoseGraphManager import * from slam_utils.UtilsMisc import * import slam_utils.UtilsPointcloud as Ptutils import slam_utils.ICP as ICP # params parser = argparse.ArgumentParser(description='PyICP SLAM arguments') parser.add_argument('--num_icp_points', type=int, default=10000) # 5000 is enough for real time parser.add_argument('--num_rings', type=int, default=20) # same as the original paper parser.add_argument('--num_sectors', type=int, default=60) # same as the original paper parser.add_argument('--num_candidates', type=int, default=10) # must be int parser.add_argument('--try_gap_loop_detection', type=int, default=10) # same as the original paper parser.add_argument('--loop_threshold', type=float, default=0.11) # 0.11 is usually safe (for avoiding false loop closure) parser.add_argument('--data_base_dir', type=str, default='/your/path/.../data_odometry_velodyne/dataset/sequences') parser.add_argument('--sequence_idx', type=str, default='00') parser.add_argument('--save_gap', type=int, default=300) args = parser.parse_args() # dataset sequence_dir = os.path.join(args.data_base_dir, args.sequence_idx, 'velodyne') sequence_manager = Ptutils.KittiScanDirManager(sequence_dir) scan_paths = sequence_manager.scan_fullpaths num_frames = len(scan_paths) # Pose Graph Manager (for back-end optimization) initialization PGM = PoseGraphManager() PGM.addPriorFactor() # Result saver save_dir = "result/" + args.sequence_idx if not os.path.exists(save_dir): os.makedirs(save_dir) ResultSaver = PoseGraphResultSaver(init_pose=PGM.curr_se3, save_gap=args.save_gap, num_frames=num_frames, seq_idx=args.sequence_idx, save_dir=save_dir) # Scan Context Manager (for loop detection) initialization SCM = ScanContextManager(shape=[args.num_rings, args.num_sectors], num_candidates=args.num_candidates, threshold=args.loop_threshold) # for save the results as a video fig_idx = 1 fig = plt.figure(fig_idx) writer = FFMpegWriter(fps=15) video_name = args.sequence_idx + "_" + str(args.num_icp_points) + ".mp4" num_frames_to_skip_to_show = 5 num_frames_to_save = np.floor(num_frames / num_frames_to_skip_to_show) with writer.saving(fig, video_name, num_frames_to_save): # this video saving part is optional # @@@ MAIN @@@: data stream for for_idx, scan_path in tqdm(enumerate(scan_paths), total=num_frames, mininterval=5.0): # get current information curr_scan_pts = Ptutils.readScan(scan_path) curr_scan_down_pts = Ptutils.random_sampling(curr_scan_pts, num_points=args.num_icp_points) # save current node PGM.curr_node_idx = for_idx # make start with 0 SCM.addNode(node_idx=PGM.curr_node_idx, ptcloud=curr_scan_down_pts) if (PGM.curr_node_idx == 0): PGM.prev_node_idx = PGM.curr_node_idx prev_scan_pts = copy.deepcopy(curr_scan_pts) icp_initial = np.eye(4) continue # calc odometry:odom_transform 4*4 prev_scan_down_pts = Ptutils.random_sampling(prev_scan_pts, num_points=args.num_icp_points) odom_transform, _, iteration_num = ICP.icp(curr_scan_down_pts, prev_scan_down_pts, init_pose=icp_initial, max_iterations=50,tolerance=0.0001) print("帧间位姿") print(odom_transform) print("迭代次数:s%",iteration_num) # update the current (moved) pose PGM.curr_se3 = np.matmul(PGM.curr_se3, odom_transform) icp_initial = odom_transform # assumption: constant velocity model (for better next ICP converges) # add the odometry factor to the graph PGM.addOdometryFactor(odom_transform) # renewal the prev information PGM.prev_node_idx = PGM.curr_node_idx prev_scan_pts = copy.deepcopy(curr_scan_pts) # loop detection and optimize the graph if (PGM.curr_node_idx > 1 and PGM.curr_node_idx % args.try_gap_loop_detection == 0): # 1/ loop detection loop_idx, loop_dist, yaw_diff_deg = SCM.detectLoop() if (loop_idx == None): # NOT FOUND pass else: print("Loop event detected: ", PGM.curr_node_idx, loop_idx, loop_dist) # 2-1/ add the loop factor loop_scan_down_pts = SCM.getPtcloud(loop_idx) loop_transform, _, _ = ICP.icp(curr_scan_down_pts, loop_scan_down_pts, init_pose=yawdeg2se3(yaw_diff_deg), max_iterations=20) PGM.addLoopFactor(loop_transform, loop_idx) # 2-2/ graph optimization PGM.optimizePoseGraph() # 2-2/ save optimized poses ResultSaver.saveOptimizedPoseGraphResult(PGM.curr_node_idx, PGM.graph_optimized) # save the ICP odometry pose result (no loop closure) ResultSaver.saveUnoptimizedPoseGraphResult(PGM.curr_se3, PGM.curr_node_idx) if (for_idx % num_frames_to_skip_to_show == 0): ResultSaver.vizCurrentTrajectory(fig_idx=fig_idx) writer.grab_frame()
nilq/baby-python
python
""" ========================== Non blocking stream reader ========================== """ import time from typing import Optional, TypeVar, Union from threading import Thread from queue import Queue, Empty Stdout = TypeVar('Stdout') Seconds = TypeVar('Seconds') ######################################################################## class NonBlockingStreamReader: """Artificial `timeout` for blocking process.""" # ---------------------------------------------------------------------- def __init__(self, stream: Stdout): """""" self.stream_stdout = stream self.queue_messages = Queue() self.kepp_alive = True def _populateQueue(stream, queue): """Collect lines from 'stream' and put them in 'quque'.""" while self.kepp_alive: line = stream.readline() if line: queue.put(line) time.sleep(0.1) self.thread_collector = Thread(target=_populateQueue, args=(self.stream_stdout, self.queue_messages)) self.thread_collector.daemon = True self.thread_collector.start() # start collecting lines from the stream # ---------------------------------------------------------------------- def readline(self, timeout: Optional[Seconds] = 0.1) -> Union[str, None]: """Read lines from queue object.""" try: return self.queue_messages.get(block=timeout is not None, timeout=timeout) except Empty: return None # ---------------------------------------------------------------------- def stop(self) -> None: """Stop the readline.""" self.kepp_alive = False
nilq/baby-python
python
from aws_ssm_copy.copy import main
nilq/baby-python
python
import torch import torch.nn as nn import numpy as np import torch.nn.functional as F class Conv2DBatchNorm(nn.Module): def __init__( self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, is_batchnorm=True, ): super(Conv2DBatchNorm, self).__init__() conv_mod = nn.Conv2d( int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, dilation=dilation, ) if is_batchnorm: self.cb_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters))) else: self.cb_unit = nn.Sequential(conv_mod) def forward(self, inputs): outputs = self.cb_unit(inputs) return outputs class Conv2DGroupNorm(nn.Module): def __init__( self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, n_groups=16 ): super(Conv2DGroupNorm, self).__init__() conv_mod = nn.Conv2d( int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, dilation=dilation, ) self.cg_unit = nn.Sequential(conv_mod, nn.GroupNorm(n_groups, int(n_filters))) def forward(self, inputs): outputs = self.cg_unit(inputs) return outputs class Deconv2DBatchNorm(nn.Module): def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True): super(Deconv2DBatchNorm, self).__init__() self.dcb_unit = nn.Sequential( nn.ConvTranspose2d( int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, ), nn.BatchNorm2d(int(n_filters)), ) def forward(self, inputs): outputs = self.dcb_unit(inputs) return outputs class Conv2DBatchNormRelu(nn.Module): def __init__( self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, is_batchnorm=True, ): super(Conv2DBatchNormRelu, self).__init__() conv_mod = nn.Conv2d( int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, dilation=dilation, ) if is_batchnorm: self.cbr_unit = nn.Sequential( conv_mod, nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True) ) else: self.cbr_unit = nn.Sequential(conv_mod, nn.ReLU(inplace=True)) def forward(self, inputs): outputs = self.cbr_unit(inputs) return outputs class Conv2DGroupNormRelu(nn.Module): def __init__( self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, n_groups=16 ): super(Conv2DGroupNormRelu, self).__init__() conv_mod = nn.Conv2d( int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, dilation=dilation, ) self.cgr_unit = nn.Sequential( conv_mod, nn.GroupNorm(n_groups, int(n_filters)), nn.ReLU(inplace=True) ) def forward(self, inputs): outputs = self.cgr_unit(inputs) return outputs class Deconv2DBatchNormRelu(nn.Module): def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True): super(Deconv2DBatchNormRelu, self).__init__() self.dcbr_unit = nn.Sequential( nn.ConvTranspose2d( int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, ), nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True), ) def forward(self, inputs): outputs = self.dcbr_unit(inputs) return outputs class SegnetDown2(nn.Module): def __init__(self, in_size, out_size): super(SegnetDown2, self).__init__() self.conv1 = Conv2DBatchNormRelu(in_size, out_size, 3, 1, 1) self.conv2 = Conv2DBatchNormRelu(out_size, out_size, 3, 1, 1) self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True) def forward(self, inputs): outputs = self.conv1(inputs) outputs = self.conv2(outputs) unpooled_shape = outputs.size() outputs, indices = self.maxpool_with_argmax(outputs) return outputs, indices, unpooled_shape class SegnetDown3(nn.Module): def __init__(self, in_size, out_size): super(SegnetDown3, self).__init__() self.conv1 = Conv2DBatchNormRelu(in_size, out_size, 3, 1, 1) self.conv2 = Conv2DBatchNormRelu(out_size, out_size, 3, 1, 1) self.conv3 = Conv2DBatchNormRelu(out_size, out_size, 3, 1, 1) self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True) def forward(self, inputs): outputs = self.conv1(inputs) outputs = self.conv2(outputs) outputs = self.conv3(outputs) unpooled_shape = outputs.size() outputs, indices = self.maxpool_with_argmax(outputs) return outputs, indices, unpooled_shape class SegnetUp2(nn.Module): def __init__(self, in_size, out_size): super(SegnetUp2, self).__init__() self.unpool = nn.MaxUnpool2d(2, 2) self.conv1 = Conv2DBatchNormRelu(in_size, in_size, 3, 1, 1) self.conv2 = Conv2DBatchNormRelu(in_size, out_size, 3, 1, 1) def forward(self, inputs, indices, output_shape): outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape) outputs = self.conv1(outputs) outputs = self.conv2(outputs) return outputs class SegnetUp3(nn.Module): def __init__(self, in_size, out_size): super(SegnetUp3, self).__init__() self.unpool = nn.MaxUnpool2d(2, 2) self.conv1 = Conv2DBatchNormRelu(in_size, in_size, 3, 1, 1) self.conv2 = Conv2DBatchNormRelu(in_size, in_size, 3, 1, 1) self.conv3 = Conv2DBatchNormRelu(in_size, out_size, 3, 1, 1) def forward(self, inputs, indices, output_shape): outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape) outputs = self.conv1(outputs) outputs = self.conv2(outputs) outputs = self.conv3(outputs) return outputs class ResidualBlock(nn.Module): expansion = 1 def __init__(self, in_channels, n_filters, stride=1, downsample=None): super(ResidualBlock, self).__init__() self.convbnrelu1 = Conv2DBatchNormRelu(in_channels, n_filters, 3, stride, 1, bias=False) self.convbn2 = Conv2DBatchNorm(n_filters, n_filters, 3, 1, 1, bias=False) self.downsample = downsample self.stride = stride self.relu = nn.ReLU(inplace=True) def forward(self, x): residual = x out = self.convbnrelu1(x) out = self.convbn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResidualBottleneck(nn.Module): expansion = 4 def __init__(self, in_channels, n_filters, stride=1, downsample=None): super(ResidualBottleneck, self).__init__() self.convbn1 = nn.Conv2DBatchNorm(in_channels, n_filters, k_size=1, bias=False) self.convbn2 = nn.Conv2DBatchNorm( n_filters, n_filters, k_size=3, padding=1, stride=stride, bias=False ) self.convbn3 = nn.Conv2DBatchNorm(n_filters, n_filters * 4, k_size=1, bias=False) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.convbn1(x) out = self.convbn2(out) out = self.convbn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class FRRU(nn.Module): """ Full Resolution Residual Unit for FRRN """ def __init__(self, prev_channels, out_channels, scale, group_norm=False, n_groups=None): super(FRRU, self).__init__() self.scale = scale self.prev_channels = prev_channels self.out_channels = out_channels self.group_norm = group_norm self.n_groups = n_groups if self.group_norm: conv_unit = Conv2DGroupNormRelu self.conv1 = conv_unit( prev_channels + 32, out_channels, k_size=3, stride=1, padding=1, bias=False, n_groups=self.n_groups, ) self.conv2 = conv_unit( out_channels, out_channels, k_size=3, stride=1, padding=1, bias=False, n_groups=self.n_groups, ) else: conv_unit = Conv2DBatchNormRelu self.conv1 = conv_unit( prev_channels + 32, out_channels, k_size=3, stride=1, padding=1, bias=False ) self.conv2 = conv_unit( out_channels, out_channels, k_size=3, stride=1, padding=1, bias=False ) self.conv_res = nn.Conv2d(out_channels, 32, kernel_size=1, stride=1, padding=0) def forward(self, y, z): x = torch.cat([y, nn.MaxPool2d(self.scale, self.scale)(z)], dim=1) y_prime = self.conv1(x) y_prime = self.conv2(y_prime) x = self.conv_res(y_prime) upsample_size = torch.Size([_s * self.scale for _s in y_prime.shape[-2:]]) x = F.upsample(x, size=upsample_size, mode="nearest") z_prime = z + x return y_prime, z_prime class RU(nn.Module): """ Residual Unit for FRRN """ def __init__(self, channels, kernel_size=3, strides=1, group_norm=False, n_groups=None): super(RU, self).__init__() self.group_norm = group_norm self.n_groups = n_groups if self.group_norm: self.conv1 = Conv2DGroupNormRelu( channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False, n_groups=self.n_groups, ) self.conv2 = Conv2DGroupNorm( channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False, n_groups=self.n_groups, ) else: self.conv1 = Conv2DBatchNormRelu( channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False ) self.conv2 = Conv2DBatchNorm( channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False ) def forward(self, x): incoming = x x = self.conv1(x) x = self.conv2(x) return x + incoming class ResidualConvUnit(nn.Module): def __init__(self, channels, kernel_size=3): super(ResidualConvUnit, self).__init__() self.residual_conv_unit = nn.Sequential( nn.ReLU(inplace=True), nn.Conv2d(channels, channels, kernel_size=kernel_size), nn.ReLU(inplace=True), nn.Conv2d(channels, channels, kernel_size=kernel_size), ) def forward(self, x): input = x x = self.residual_conv_unit(x) return x + input class MultiResolutionFusion(nn.Module): def __init__(self, channels, up_scale_high, up_scale_low, high_shape, low_shape): super(MultiResolutionFusion, self).__init__() self.up_scale_high = up_scale_high self.up_scale_low = up_scale_low self.conv_high = nn.Conv2d(high_shape[1], channels, kernel_size=3) if low_shape is not None: self.conv_low = nn.Conv2d(low_shape[1], channels, kernel_size=3) def forward(self, x_high, x_low): high_upsampled = F.upsample( self.conv_high(x_high), scale_factor=self.up_scale_high, mode="bilinear" ) if x_low is None: return high_upsampled low_upsampled = F.upsample( self.conv_low(x_low), scale_factor=self.up_scale_low, mode="bilinear" ) return low_upsampled + high_upsampled class ChainedResidualPooling(nn.Module): def __init__(self, channels, input_shape): super(ChainedResidualPooling, self).__init__() self.chained_residual_pooling = nn.Sequential( nn.ReLU(inplace=True), nn.MaxPool2d(5, 1, 2), nn.Conv2d(input_shape[1], channels, kernel_size=3), ) def forward(self, x): input = x x = self.chained_residual_pooling(x) return x + input class PyramidPooling(nn.Module): def __init__( self, in_channels, pool_sizes, model_name="pspnet", fusion_mode="cat", is_batchnorm=True ): super(PyramidPooling, self).__init__() bias = not is_batchnorm self.paths = [] for i in range(len(pool_sizes)): self.paths.append( Conv2DBatchNormRelu( in_channels, int(in_channels / len(pool_sizes)), 1, 1, 0, bias=bias, is_batchnorm=is_batchnorm, ) ) self.path_module_list = nn.ModuleList(self.paths) self.pool_sizes = pool_sizes self.model_name = model_name self.fusion_mode = fusion_mode def forward(self, x): h, w = x.shape[2:] if self.training or self.model_name != "icnet": # general settings or pspnet k_sizes = [] strides = [] for pool_size in self.pool_sizes: k_sizes.append((int(h / pool_size), int(w / pool_size))) strides.append((int(h / pool_size), int(w / pool_size))) else: # eval mode and icnet: pre-trained for 1025 x 2049 k_sizes = [(8, 15), (13, 25), (17, 33), (33, 65)] strides = [(5, 10), (10, 20), (16, 32), (33, 65)] if self.fusion_mode == "cat": # pspnet: concat (including x) output_slices = [x] for i, (module, pool_size) in enumerate(zip(self.path_module_list, self.pool_sizes)): out = F.avg_pool2d(x, k_sizes[i], stride=strides[i], padding=0) # out = F.adaptive_avg_pool2d(x, output_size=(pool_size, pool_size)) if self.model_name != "icnet": out = module(out) out = F.interpolate(out, size=(h, w), mode="bilinear", align_corners=True) output_slices.append(out) return torch.cat(output_slices, dim=1) else: # icnet: element-wise sum (including x) pp_sum = x for i, (module, pool_size) in enumerate(zip(self.path_module_list, self.pool_sizes)): out = F.avg_pool2d(x, k_sizes[i], stride=strides[i], padding=0) # out = F.adaptive_avg_pool2d(x, output_size=(pool_size, pool_size)) if self.model_name != "icnet": out = module(out) out = F.interpolate(out, size=(h, w), mode="bilinear", align_corners=True) pp_sum = pp_sum + out return pp_sum class BottleNeckPSP(nn.Module): def __init__( self, in_channels, mid_channels, out_channels, stride, dilation=1, is_batchnorm=True ): super(BottleNeckPSP, self).__init__() bias = not is_batchnorm self.cbr1 = Conv2DBatchNormRelu( in_channels, mid_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm ) if dilation > 1: self.cbr2 = Conv2DBatchNormRelu( mid_channels, mid_channels, 3, stride=stride, padding=dilation, bias=bias, dilation=dilation, is_batchnorm=is_batchnorm, ) else: self.cbr2 = Conv2DBatchNormRelu( mid_channels, mid_channels, 3, stride=stride, padding=1, bias=bias, dilation=1, is_batchnorm=is_batchnorm, ) self.cb3 = Conv2DBatchNorm( mid_channels, out_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm ) self.cb4 = Conv2DBatchNorm( in_channels, out_channels, 1, stride=stride, padding=0, bias=bias, is_batchnorm=is_batchnorm, ) def forward(self, x): conv = self.cb3(self.cbr2(self.cbr1(x))) residual = self.cb4(x) return F.relu(conv + residual, inplace=True) class BottleNeckIdentifyPSP(nn.Module): def __init__(self, in_channels, mid_channels, stride, dilation=1, is_batchnorm=True): super(BottleNeckIdentifyPSP, self).__init__() bias = not is_batchnorm self.cbr1 = Conv2DBatchNormRelu( in_channels, mid_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm ) if dilation > 1: self.cbr2 = Conv2DBatchNormRelu( mid_channels, mid_channels, 3, stride=1, padding=dilation, bias=bias, dilation=dilation, is_batchnorm=is_batchnorm, ) else: self.cbr2 = Conv2DBatchNormRelu( mid_channels, mid_channels, 3, stride=1, padding=1, bias=bias, dilation=1, is_batchnorm=is_batchnorm, ) self.cb3 = Conv2DBatchNorm( mid_channels, in_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm ) def forward(self, x): residual = x x = self.cb3(self.cbr2(self.cbr1(x))) return F.relu(x + residual, inplace=True) class ResidualBlockPSP(nn.Module): def __init__( self, n_blocks, in_channels, mid_channels, out_channels, stride, dilation=1, include_range="all", is_batchnorm=True, ): super(ResidualBlockPSP, self).__init__() if dilation > 1: stride = 1 # residualBlockPSP = convBlockPSP + identityBlockPSPs layers = [] if include_range in ["all", "conv"]: layers.append( BottleNeckPSP( in_channels, mid_channels, out_channels, stride, dilation, is_batchnorm=is_batchnorm, ) ) if include_range in ["all", "identity"]: for i in range(n_blocks - 1): layers.append( BottleNeckIdentifyPSP( out_channels, mid_channels, stride, dilation, is_batchnorm=is_batchnorm ) ) self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) class CascadeFeatureFusion(nn.Module): def __init__( self, n_classes, low_in_channels, high_in_channels, out_channels, is_batchnorm=True ): super(CascadeFeatureFusion, self).__init__() bias = not is_batchnorm self.low_dilated_conv_bn = Conv2DBatchNorm( low_in_channels, out_channels, 3, stride=1, padding=2, bias=bias, dilation=2, is_batchnorm=is_batchnorm, ) self.low_classifier_conv = nn.Conv2d( int(low_in_channels), int(n_classes), kernel_size=1, padding=0, stride=1, bias=True, dilation=1, ) # Train only self.high_proj_conv_bn = Conv2DBatchNorm( high_in_channels, out_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm, ) def forward(self, x_low, x_high): x_low_upsampled = F.interpolate( x_low, size=get_interp_size(x_low, z_factor=2), mode="bilinear", align_corners=True ) low_cls = self.low_classifier_conv(x_low_upsampled) low_fm = self.low_dilated_conv_bn(x_low_upsampled) high_fm = self.high_proj_conv_bn(x_high) high_fused_fm = F.relu(low_fm + high_fm, inplace=True) return high_fused_fm, low_cls def get_interp_size(input, s_factor=1, z_factor=1): # for caffe ori_h, ori_w = input.shape[2:] # shrink (s_factor >= 1) ori_h = (ori_h - 1) / s_factor + 1 ori_w = (ori_w - 1) / s_factor + 1 # zoom (z_factor >= 1) ori_h = ori_h + ori_h * (z_factor - 1) ori_w = ori_w + ori_w * (z_factor - 1) resize_shape = (int(ori_h), int(ori_w)) return resize_shape def interp(input, output_size, mode="bilinear"): n, c, ih, iw = input.shape oh, ow = output_size # normalize to [-1, 1] h = torch.arange(0, oh, dtype=torch.float, device=input.device) / (oh - 1) * 2 - 1 w = torch.arange(0, ow, dtype=torch.float, device=input.device) / (ow - 1) * 2 - 1 grid = torch.zeros(oh, ow, 2, dtype=torch.float, device=input.device) grid[:, :, 0] = w.unsqueeze(0).repeat(oh, 1) grid[:, :, 1] = h.unsqueeze(0).repeat(ow, 1).transpose(0, 1) grid = grid.unsqueeze(0).repeat(n, 1, 1, 1) # grid.shape: [n, oh, ow, 2] if input.is_cuda: grid = grid.cuda() return F.grid_sample(input, grid, mode=mode) def get_upsampling_weight(in_channels, out_channels, kernel_size): """Make a 2D bilinear kernel suitable for upsampling""" factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64) weight[range(in_channels), range(out_channels), :, :] = filt return torch.from_numpy(weight).float()
nilq/baby-python
python
class Solution: def XXX(self, s: str) -> bool: # 栈用于存储左括号 stack = [] for char in s: if char in '([{': # 如果是左括号,压入 stack.append(char) else: # 如果是右括号,栈空(无左括号),或左右不匹配,则无效 if not stack or '([{'.find(stack.pop()) != ')]}'.find(char): return False # 如果最后栈空则括号有效,否则栈中还有左括号,即无效 return True if not stack else False undefined for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
nilq/baby-python
python
from setuptools import setup with open("README.md","r") as fh: long_description = fh.read() setup( url="https://github.com/dave-lanigan/kyber-api-python", author="Daithi", author_email="dav.lanigan@gmail.com", name="kybernet", version="0.0.1", description="Unofficial python wrapper for Kyber Network API.", long_description=long_description, long_description_content_type="text/markdown", py_modules=["client"], packages=["kybernet"], classifiers=["Development Status :: 2 - Pre-Alpha", "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License"], python_requires=">=3.6", install_requires=["requests"], extras_require = {"dev": ["pytest>=3.5"]} )
nilq/baby-python
python
import numpy as np list = [np.linspace([1,2,3], 3),\ np.array([1,2,3]),\ np.arange(3),\ np.arange(8).reshape(2,4),\ np.zeros((2,3)),\ np.zeros((2,3)).T,\ np.ones((3,1)),\ np.eye(3),\ np.full((3,3), 1),\ np.random.rand(3),\ np.random.rand(3,3),\ np.random.uniform(5,15,3),\ np.random.randn(3),\ np.random.normal(3, 2.5, 3)] print(list)
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @author: LiuZhi @time: 2019-01-20 00:28 @contact: vanliuzhi@qq.com @software: PyCharm """ from flask import render_template, send_from_directory, abort, request from flask.blueprints import Blueprint from flask_security import login_required index_bp = Blueprint('index', __name__, url_prefix='/index', template_folder='/templates') @index_bp.route('/') # @login_required def index(): return render_template('index.html')
nilq/baby-python
python
# # Copyright 2018 ISP RAS (http://www.ispras.ru) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import globals from minimips_base import MiniMipsBaseTemplate from minimips_base import * from template import * class AlignedCallsTemplate(MiniMipsBaseTemplate): def __init__(self): MiniMipsBaseTemplate.__init__(self) def pre(self): MiniMipsBaseTemplate.pre(self) def run(self): align(4) add(reg(u_()), get_register(), get_register()) sub(reg(u_()), get_register(), get_register()) nop() align(8) add(reg(u_()), get_register(), get_register()) sub(reg(u_()), get_register(), get_register()) nop() align(16) add(reg(u_()), get_register(), get_register()) sub(reg(u_()), get_register(), get_register()) nop() globals.template = AlignedCallsTemplate() globals.template.generate()
nilq/baby-python
python
from sqlalchemy.orm import Session from ..models import users, sensors from .. import routers from .. import schemas def get_sensors_for_user(db: Session, username: str): user_id = db.query(users.User).filter(users.User.username == username).first().id print(user_id) return db.query(sensors.Sensor).filter(sensors.Sensor.username==username).first()
nilq/baby-python
python
PATTERNS = { "precedence": { "afterward(s)": "^afterwards?", "after that": "^after th(at|is)", "eventually": "^eventually", "in turn": "^in turn", "later": "^later", "next": "^next", # followed by pronoun? "thereafter": "^thereafter" }, "succession": { "before that": "^before th(at|is)", "earlier": "^earlier", "previously": "^previously" }, "synchrony": { "in the meantime": "^(in the )?meantime", "meanwhile": "^meanwhile", "simultaneously": "^simultaneously" }, "result": { "accordingly": "^accordingly", "as a result": "^as a result(?! of)", # followed by pronoun? "consequently": "^consequently", "therefore": "^therefore", "thus": "^thus" }, "conjunction": { "additionally": "^additionally", "also": "^also", # followed by a pronoun? "besides": "^besides", "furthermore": "^furthermore", # followed by pronoun if just "further"? "in addition": "^in addition", # followed by , or pronoun? "likewise": "^likewise", "moreover": "^moreover", "similarly": "^similarly" }, "contrast": { "by/in comparison": "^(by|in) comparison", # followed by pronoun/noun "by/in contrast": "^(by|in) contrast", # followed by pronoun/noun "conversely": "^conversely", "nevertheless": "^nevertheless", "on the other hand": "^on the other hand", }, "instantiation": { "for example": "^for example", "for instance": "^for instance", "in particular": "^in particular", }, "alternative": { "instead": "^instead", # followed by pronoun? "rather": "^rather" # followed by pronoun } } INNERS = { # TEMPORAL:Asynchronous.precedence "afterward(s)": ["and afterwards", "but afterwards", "after which", "then"], "after that": ["after that", "after this", "but, after that", "and after this", "after which"], "eventually": ["eventually", "and eventually", "and in turn"], "in turn": ["in turn", "which, in turn", "and then", "and so", "leaving"], "later": ["later", "and later", "but later"], "next": ["next", "before", "followed by", "when"], "thereafter": ["thereafter", "and thereafter", "after which"], # TEMPORAL:Asynchronous.succession "before that": ["before that", "but before that", "although before that", "prior to this"], "earlier": ["earlier", "and earlier", "formerly", "previously", "after"], "previously": ["and previously", "previously", "recently"], # TEMPORAL:Synchrony "in the meantime": ["in the meantime", "but in the meantime", "whilst", "meanwhile", "while in the meantime", "while",], "meanwhile": ["meanwhile", "meanwhile", "while"], "simultaneously": ["simultaneously", "and simultaneously", "while",], # CONTINGENCY:Cause.result "accordingly": ["accordingly", "so", "as such", "and as such"], "as a result": ["as a result", "and as a result", "however", "so that", "resulting in", "so"], # <REV> as a result of? "consequently": ["consequently", "and therefore", "and so", "so"], "therefore": ["therefore", "and so", "which means", "which means that"], "thus": ["thus", "and thus", "thusly"], # COMPARISON:Contrast "by/in comparison": ["by comparison", "in comparison", "while", "compared to", "whilst"], "by/in contrast": ["by contrast", "in contrast", "and in contrast", "while", "although"], "conversely": ["conversely", "and conversely"], "on the other hand": ["on the other hand", "and on the other hand", "but on the other hand", "but", "whereas", "however", "while"], "nevertheless": ["nevertheless", "but", "none the less", "yet", "however"], # EXPANSION:Conjunction "additionally": ["additionally", "and additionally"], "also": ["and also", "and is also"], "in addition": ["in addition to", "and additionally"], "furthermore": ["further", "furthermore", "and furthermore", "and further"], "moreover": ["moreover", "indeed"], "besides": ["besides", "besides this", "and also", "aside from"], "likewise": ["likewise", "and likewise", "and also"], "similarly": ["similarly", "and similarly", "while"], # EXPANSION:Instantiation "for example": ["for example", "such as"], "for instance": ["for instance", "such as"], "in particular": ["in particular"], # EXPANSION:Alternative "instead": ["instead", "but instead", "though"], "rather": ["but rather", "though"], } FORWARDS = { # TEMPORAL:Asynchronous.precedence "afterward(s)": [], "after that": [], "eventually": [], "in turn": [], "later": [], "next": ["before"], "thereafter": [], # TEMPORAL:Asynchronous.succession "before that": [], "earlier": ["after"], "previously": [], # TEMPORAL:Synchrony "in the meantime": ["while"], "meanwhile": ["while"], "simultaneously": ["while"], # CONTINGENCY:Cause.result "accordingly": ["<REV>because",], "as a result": ["<REV>because",], "consequently": ["<REV>because",], "therefore": ["<REV>because",], "thus": ["<REV>because",], # COMPARISON:Contrast "by/in comparison": ["while"], "by/in contrast": ["although", "while"], "conversely": [], "on the other hand": [], "nevertheless": ["<REV>although", "<REV>even though"], # EXPANSION:Conjunction "additionally": [], "also": [], "in addition": ["in addition to"], "furthermore": [], "moreover": [], "besides": ["besides"], "likewise": [], "similarly": ["while"], # EXPANSION:Instantiation "for example": [], "for instance": [], "in particular": [], # EXPANSION:Alternative "instead": [], "rather": [], }
nilq/baby-python
python
#!/usr/bin/python # coding=utf8 from char_rnn_net import char_rnn_net from config import Config from data_utils import get_data import tensorflow as tf from utils import pick_top_n def gen_acrostic(start_words, word2ix, ix2word, prefix_words=None): with tf.Session() as sess: save_path = Config.model_path num_classes = len(word2ix) inputs = tf.placeholder(tf.int32, shape=(1, 1), name="inputs") endpoints = char_rnn_net(inputs, num_classes, is_training=False) output_tensor = endpoints['output'] output_tensor = tf.nn.softmax(output_tensor) results = [] start_word_len = len(start_words) # 手动设置第一个词为<START> pre_word = '<START>' start = [[word2ix[pre_word]]] index = 0 saver = tf.train.Saver(tf.global_variables()) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) latest_ck_file = tf.train.latest_checkpoint(save_path) if latest_ck_file: print('restore from latest checkpoint file : {}'.format(latest_ck_file)) saver.restore(sess, latest_ck_file) else: print('no checkpoint file to restore, exit()') exit() new_state = sess.run(endpoints['initial_state']) if prefix_words: for word in prefix_words: feed = {endpoints['initial_state']: new_state, inputs: start} output, new_state = sess.run([output_tensor, endpoints['hidden']], feed_dict=feed) start = [[word2ix[word]]] for i in range(Config.max_gen_len): feed = {endpoints['initial_state']: new_state, inputs: start} output, new_state = sess.run([output_tensor, endpoints['hidden']], feed_dict=feed) top_index = pick_top_n(output[0], num_classes) w = ix2word[top_index] if pre_word in {u'。', u'!', '<START>'}: # 如果遇到句号,藏头的词送进去生成 if index == start_word_len: # 如果生成的诗歌已经包含全部藏头的词,则结束 break else: # 把藏头的词作为输入送入模型 w = start_words[index] index += 1 start = [[word2ix[w]]] else: # 否则的话,把上一次预测是词作为下一个词输入 start = [[word2ix[w]]] results.append(w) pre_word = w return results def generate(start_words, word2ix, ix2word, prefix_words=None): """ 给定几个词,根据这几个词接着生成一首完整的诗歌 start_words:u'春江潮水连海平' 比如start_words 为 春江潮水连海平,可以生成: """ with tf.Session() as sess: save_path = Config.model_path num_classes = len(word2ix) inputs = tf.placeholder(tf.int32, shape=(1, 1), name="inputs") endpoints = char_rnn_net(inputs, num_classes, is_training=False) output_tensor = endpoints['output'] output_tensor = tf.nn.softmax(output_tensor) # output_tensor = tf.argmax(output_tensor, 1) results = list(start_words) start_word_len = len(start_words) # 手动设置第一个词为<START> start = [[word2ix['<START>']]] saver = tf.train.Saver(tf.global_variables()) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) latest_ck_file = tf.train.latest_checkpoint(save_path) if latest_ck_file: print('restore from latest checkpoint file : {}'.format(latest_ck_file)) saver.restore(sess, latest_ck_file) else: print('no checkpoint file to restore, exit()') exit() new_state = sess.run(endpoints['initial_state']) if prefix_words: for word in prefix_words: feed = {endpoints['initial_state']: new_state, inputs: start} output, new_state = sess.run([output_tensor, endpoints['hidden']], feed_dict=feed) start = [[word2ix[word]]] for i in range(Config.max_gen_len): feed = {endpoints['initial_state']: new_state, inputs: start} output, new_state = sess.run([output_tensor, endpoints['hidden']], feed_dict=feed) if i < start_word_len: w = results[i] start = [[word2ix[w]]] else: index = pick_top_n(output[0], num_classes) w = ix2word[index] results.append(w) start = [[index]] if w == '<EOP>': del results[-1] break return results if __name__ == '__main__': data, word2ix, ix2word = get_data(Config) result = generate(u'春江潮水连海平', word2ix, ix2word, prefix_words=u'郡邑浮前浦,波澜动远空。') print(''.join(result))
nilq/baby-python
python
#! /usr/bin/env python import unittest import ddlib as dd class TestDDLib(unittest.TestCase): def setUp(self): self.words = ["Tanja", "married", "Jake", "five", "years", "ago"] self.lemma = ["Tanja", "marry", "Jake", "five", "years", "ago"] def test_materialize_span(self): span1 = dd.Span(0, 3) materialized_span = dd.materialize_span(self.words, span1) self.assertEqual(list(materialized_span), ["Tanja", "married", "Jake"]) def test_tokens_between_spans(self): span1 = dd.Span(0, 2) span2 = dd.Span(3, 5) words_between = dd.tokens_between_spans(self.words, span1, span2) self.assertEqual([words_between[0], list(words_between[1])], [False, ["Jake"]]) words_between = dd.tokens_between_spans(self.words, span2, span1) self.assertEqual([words_between[0], list(words_between[1])], [True, ["Jake"]]) words_between = dd.tokens_between_spans(self.words, span1, span1) self.assertEqual([words_between[0], list(words_between[1])], [False, []]) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
"""---------------------------------------------------------- Authors: Wilhelm Ågren <wagren@kth.se> Last edited: 12-03-2022 License: MIT ----------------------------------------------------------""" from autograd import Tensor class Module(object): def __call__(self, x): return self.forward(x) def forward(self, x): raise NotImplementedError( f'user defined nn.Module object has not implemented forward pass') def parameters(self): params = [] for attr in self.__dict__.values(): if isinstance(attr, Tensor): if attr.requires_grad: params.append(attr) elif isinstance(attr, list): params.extend([param for param in attr if param.requires_grad]) elif isinstance(attr, Sequential): params.extend([param for param in attr.parameters() if param.requires_grad]) return params class Sequential(object): def __init__(self, *modules): self.modules_ = modules def __call__(self, x): return self.forward(x) def forward(self, x): for module in self.modules_: x = module(x) return x def parameters(self): params = [] for module in self.modules_: params.extend(module.parameters()) return params
nilq/baby-python
python
import math import multiprocessing import itertools import glob import sys import time import re import numpy as np from matplotlib import pyplot as plt from astropy.io import fits as pyfits from scipy.optimize import fmin_powell from scipy.interpolate import RectBivariateSpline from . import kepio, kepmsg, kepkey, kepplot, kepfit, kepfunc from .utils import PyKEArgumentHelpFormatter __all__ = ['kepprfphot'] def kepprfphot(infile, prfdir, columns, rows, fluxes, border=0, background=False, focus=False, ranges='0,0', xtol=1e-4, ftol=1e-2, qualflags=False, outfile=None, plot=False, overwrite=False, verbose=False, logfile='kepprfphot.log'): """ kepprfphot -- Fit a PSF model to time series observations within a Target Pixel File Parameters ---------- nfile : str The name of a MAST standard format FITS file containing Kepler Target Pixel data within the first data extension. columns : str or list A starting guess for the CCD column position(s) of the source(s) that are to be fit. The model is unlikely to converge if the guess is too far away from the correct location. A rule of thumb is to provide a guess within 1 CCD pixel of the true position. If more than one source is being modeled then the column positions of each are separated by a comma. The same number of sources in the columns, rows and fluxes field is a requirement of this task. rows : str or list A starting guess for the CCD row position(s) of the source(s) that are to be fit. The model is unlikely to converge if the guess is too far away from the correct location. A rule of thumb is to provide a guess within 1 CCD pixel of the true position. If more than one source is being modeled then the row positions of each are separated by a comma. The same number of sources in the columns, rows and fluxes field is a requirement of this task. fluxes : str or list A starting guess for the flux(es) of the source(s) that are to be fit. Fit convergence is not particularly reliant on the accuracy of these guesses, but the fit will converge faster the more accurate the guess. If more than one source is being modeled then the row positions of each are separated by a comma. The same number of sources in the columns, rows and fluxes field is a requirement of this task. prfdir : str The full or relative directory path to a folder containing the Kepler PSF calibration. Calibration files can be downloaded from the Kepler focal plane characteristics page at the MAST here: http://archive.stsci.edu/missions/kepler/fpc/prf/. border : int If a background is included in the fit then it is modeled as a two-dimensional polynomial. This parameter is the polynomial order. A zero-order polynomial is generally recommended. background : bool Whether to include a background component in the model. If ``True`` the background will be represented by a two-dimensional polynomial of order border. This functionality is somewhat experimental, with one eye upon potential background gradients across large masks or on those detectors more prone to pattern noise. Generally it is recommended to set background as ``False``. focus : bool Whether to include pixel scale and focus rotation with the fit parameters of the model. This is also an experimental function. This approach does not attempt to deal with inter- or intra-pixel variations. The recommended use is currently to set focus as ``False``. ranges : str The user can choose specific time ranges of data on which to work. This could, for example, avoid removing known stellar flares from a dataset Time ranges are supplied as comma-separated pairs of Barycentric Julian Dates (BJDs). Multiple ranges are separated by a semi-colon. An example containing two time ranges is:: '2455012.48517,2455014.50072;2455022.63487,2455025.08231' If the user wants to correct the entire time series then providing ranges = '0,0' will tell the task to operate on the whole time series. xtol : float The dimensionless, relative model parameter convergence criterion for the fit algorithm. ftol : float The dimensionless, relative model residual convergence criterion for the fit algorithm. qualflags : bool If qualflags is ``False``, archived observations flagged with any quality issue will not be fit. outfile : str kepprfphot creates two types of output file containing fit results and diagnostics. ``outfile.png`` contains a time series plot of fit parameters, residuals and chi-squared. ``outfile.fits`` contains a table of the same properties, consistent with Kepler archive light curve files. The FITS column PSF_FLUX contains the flux time-series in units of e-/s derived by integrating under the best-fit PRF model. PSF_BKG provides the best-fit background (if calculated) averaged over all mask pixels in units of e-/s/pixel. PSF_CENTR1 provides the best-fit PSF centroid position in the CCD column direction, in CCD pixel units. Similarly, PSF_CENTR2 provides the best-fit PSF centroid position in the CCD row direction, in CCD pixel units. If calculated, PSF_FOCUS1 and PSF_FOCUS2 provide scale factors in the column and row dimensions by which the CCD pixel scale is adjusted to approximate focus variation. PSF_ROTATION provides the angle by which the scaled PSF model was rotated on the focal plane in order to yield a best fit. The table column PSF_RESIDUAL provides the sum of all mask pixels after the best-fit model has been subtracted from the data. PSF_CHI2 delivers the best-fit chi-squred statistic for each observation. plot : bool Plot fit results to the screen? verbose : bool Print informative messages and warnings to the shell and logfile? logfile : str Name of the logfile containing error and warning messages. Examples -------- .. code-block:: bash $ kepprfphot kplr012557548-2012004120508_lpd-targ.fits.gz --columns 95 --rows 1020 --fluxes 1.0 --border 0 --prfdir ../kplr2011265_prf --xtol 1e-7 --ftol 1e-7 --plot --verbose -------------------------------------------------------------- KEPPRFPHOT -- infile=kplr012557548-2012004120508_lpd-targ.fits.gz columns=95 rows=1020 fluxes=1.0 border=0 background=False focus=False prfdir=../kplr2011265_prf ranges=0,0 xtol=1e-07 ftol=1e-07 qualflags=False plot=True overwrite=True verbose=True logfile=kepprfphot.log KEPPRFPHOT started at: Wed Jun 14 15:33:30 2017 KepID: 12557548 RA (J2000): 290.96622 Dec (J2000): 51.50472 KepMag: 15.692 SkyGroup: 4 Season: 1 Channel: 32 Module: 10 Output: 4 19% nrow = 740 t = 0.1 sec .. image:: ../_static/images/api/kepprfphot.png """ if outfile is None: outfile = infile.split('.')[0] + "-{}".format(__all__[0]) # log the call hashline = '--------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = ('KEPPRFPHOT -- ' + ' infile={}'.format(infile) + ' outfile={}'.format(outfile) + ' columns={}'.format(columns) + ' rows={}'.format(rows) + ' fluxes={}'.format(fluxes) + ' border={}'.format(border) + ' background={}'.format(background) + ' focus={}'.format(focus) + ' prfdir={}'.format(prfdir) + ' ranges={}'.format(ranges) + ' xtol={}'.format(xtol) + ' ftol={}'.format(ftol) + ' qualflags={}'.format(qualflags) + ' plot={}'.format(plot) + ' overwrite={}'.format(overwrite) + ' verbose={}'.format(verbose) + ' logfile={}'.format(logfile)) kepmsg.log(logfile, call+'\n', verbose) # start time kepmsg.clock('KEPPRFPHOT started at', logfile, verbose) f = fluxes x = columns y = rows nsrc = len(f) if len(x) != nsrc or len(y) != nsrc: errmsg = ("ERROR -- KEPFIT:FITMULTIPRF: Guesses for rows, columns and " "fluxes must have the same number of sources") kepmsg.err(logfile, errmsg, verbose) guess = list(f) + list(x) + list(y) if background: if border == 0: guess.append(0.0) else: for i in range((border + 1) * 2): guess.append(0.0) if focus: guess = guess + [1.0, 1.0, 0.0] # overwrite output file for i in range(nsrc): outfilename = '{0}_{1}.fits'.format(outfile, i) if overwrite: kepio.overwrite(outfilename, logfile, verbose) if kepio.fileexists(outfilename): errmsg = 'ERROR -- KEPPRFPHOT: {} exists. Use --overwrite'.format(outfilename) kepmsg.err(logfile, errmsg, verbose) # open TPF FITS file try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, barytime = \ kepio.readTPF(infile, 'TIME', logfile, verbose) except: message = 'ERROR -- KEPPRFPHOT: is %s a Target Pixel File? ' % infile kepmsg.err(logfile,message,verbose) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, tcorr = \ kepio.readTPF(infile,'TIMECORR', logfile, verbose) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, cadno = \ kepio.readTPF(infile,'CADENCENO',logfile, verbose) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \ kepio.readTPF(infile,'FLUX', logfile, verbose) kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, errpixels = \ kepio.readTPF(infile,'FLUX_ERR', logfile, verbose) try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, poscorr1 = \ kepio.readTPF(infile, 'POS_CORR1', logfile, verbose) except: poscorr1 = np.zeros((len(barytime)), dtype='float32') poscorr1[:] = np.nan try: kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, poscorr2 = \ kepio.readTPF(infile, 'POS_CORR2', logfile, verbose) except: poscorr2 = np.zeros((len(barytime)), dtype='float32') poscorr2[:] = np.nan kepid, channel, skygroup, module, output, quarter, season, \ ra, dec, column, row, kepmag, xdim, ydim, qual = \ kepio.readTPF(infile,'QUALITY',logfile,verbose) struct = pyfits.open(infile) tstart, tstop, bjdref, cadence = kepio.timekeys(struct, infile, logfile, verbose) # input file keywords and mask map cards0 = struct[0].header.cards cards1 = struct[1].header.cards cards2 = struct[2].header.cards maskmap = np.copy(struct[2].data) npix = np.size(np.nonzero(maskmap)[0]) # print target data if verbose: print('') print(' KepID: {}'.format(kepid)) print(' RA (J2000): {}'.format(ra)) print('Dec (J2000): {}'.format(dec)) print(' KepMag: {}'.format(kepmag)) print(' SkyGroup: {}'.format(skygroup)) print(' Season: {}'.format(season)) print(' Channel: {}'.format(channel)) print(' Module: {}'.format(module)) print(' Output: {}'.format(output)) print('') # read PRF file and interpolate result = kepfunc.read_and_interpolate_prf(prfdir=prfdir, module=module, output=output, column=column, row=row, xdim=xdim, ydim=ydim, verbose=verbose, logfile=logfile) splineInterpolation = result[0] DATx = result[1] DATy = result[2] PRFx = result[4] PRFy = result[5] # construct mesh for background model bx = np.arange(1., float(xdim + 1)) by = np.arange(1., float(ydim + 1)) xx, yy = np.meshgrid(np.linspace(bx.min(), bx.max(), xdim), np.linspace(by.min(), by.max(), ydim)) # Get time ranges for new photometry, flag good data barytime += bjdref tstart, tstop = kepio.timeranges(ranges, logfile, verbose) incl = np.zeros((len(barytime)), dtype='int') for rownum in range(len(barytime)): for winnum in range(len(tstart)): if (barytime[rownum] >= tstart[winnum] and barytime[rownum] <= tstop[winnum] and (qual[rownum] == 0 or qualflags) and np.isfinite(barytime[rownum]) and np.isfinite(np.nansum(fluxpixels[rownum, :]))): incl[rownum] = 1 if not np.in1d(1,incl): message = ('ERROR -- KEPPRFPHOT: No legal data within the' ' range {}'.format(ranges)) kepmsg.err(logfile, message, verbose) # filter out bad data n = 0 nincl = (incl == 1).sum() tim = np.zeros((nincl), 'float64') tco = np.zeros((nincl), 'float32') cad = np.zeros((nincl), 'float32') flu = np.zeros((nincl, len(fluxpixels[0])), 'float32') fer = np.zeros((nincl, len(fluxpixels[0])), 'float32') pc1 = np.zeros((nincl), 'float32') pc2 = np.zeros((nincl), 'float32') qua = np.zeros((nincl), 'float32') for rownum in range(len(barytime)): if incl[rownum] == 1: tim[n] = barytime[rownum] tco[n] = tcorr[rownum] cad[n] = cadno[rownum] flu[n,:] = fluxpixels[rownum] fer[n,:] = errpixels[rownum] pc1[n] = poscorr1[rownum] pc2[n] = poscorr2[rownum] qua[n] = qual[rownum] n += 1 barytime = tim * 1.0 tcorr = tco * 1.0 cadno = cad * 1.0 fluxpixels = flu * 1.0 errpixels = fer * 1.0 poscorr1 = pc1 * 1.0 poscorr2 = pc2 * 1.0 qual = qua * 1.0 # initialize plot arrays t = np.array([], dtype='float64') fl, dx, dy, bg, fx, fy, fa, rs, ch = [], [], [], [], [], [], [], [], [] for i in range(nsrc): fl.append(np.array([], dtype='float32')) dx.append(np.array([], dtype='float32')) dy.append(np.array([], dtype='float32')) # Preparing fit data message progress = np.arange(nincl) if verbose: txt = 'Preparing...' sys.stdout.write(txt) sys.stdout.flush() # single processor version oldtime = 0.0 for rownum in range(np.min([80, len(barytime)])): try: if barytime[rownum] - oldtime > 0.5: ftol = 1.0e-10; xtol = 1.0e-10 except: pass args = (fluxpixels[rownum, :], errpixels[rownum, :], DATx, DATy, nsrc, border, xx, yy, PRFx, PRFy, splineInterpolation, guess, ftol, xtol, focus, background, rownum, 80, float(x[i]), float(y[i]), False) guess = PRFfits(args) ftol = ftol xtol = xtol oldtime = barytime[rownum] # Fit the time series: multi-processing anslist = [] cad1 = 0 cad2 = 50 for i in range(int(nincl/50) + 1): try: fluxp = fluxpixels[cad1:cad2, :] errp = errpixels[cad1:cad2, :] progress = np.arange(cad1, cad2) except: fluxp = fluxpixels[cad1:nincl, :] errp = errpixels[cad1:nincl, :] progress = np.arange(cad1, nincl) try: args = itertools.izip(fluxp, errp, itertools.repeat(DATx), itertools.repeat(DATy), itertools.repeat(nsrc), itertools.repeat(border), itertools.repeat(xx), itertools.repeat(yy), itertools.repeat(PRFx), itertools.repeat(PRFy), itertools.repeat(splineInterpolation), itertools.repeat(guess), itertools.repeat(ftol), itertools.repeat(xtol), itertools.repeat(focus), itertools.repeat(background), progress, itertools.repeat(np.arange(cad1,nincl)[-1]), itertools.repeat(float(x[0])), itertools.repeat(float(y[0])), itertools.repeat(True)) p = multiprocessing.Pool() model = [0.0] model = p.imap(PRFfits, args, chunksize=1) p.close() p.join() cad1 += 50; cad2 += 50 ans = np.array([np.array(item) for item in zip(*model)]) try: anslist = np.concatenate((anslist, ans.transpose()), axis=0) except: anslist = ans.transpose() guess = anslist[-1] ans = anslist.transpose() except: pass # single processor version oldtime = 0.0; ans = [] for rownum in range(nincl): proctime = time.time() try: if barytime[rownum] - oldtime > 0.5: ftol = 1.0e-10; xtol = 1.0e-10 except: pass args = (fluxpixels[rownum, :], errpixels[rownum, :], DATx, DATy, nsrc, border, xx, yy, PRFx, PRFy, splineInterpolation, guess, ftol, xtol, focus, background, rownum, nincl, float(x[0]), float(y[0]), True) guess = PRFfits(args) ans.append(guess) ftol = ftol; xtol = xtol; oldtime = barytime[rownum] ans = np.array(ans).transpose() # unpack the best fit parameters flux, OBJx, OBJy = [], [], [] na = np.shape(ans)[1] for i in range(nsrc): flux.append(ans[i, :]) OBJx.append(ans[nsrc + i, :]) OBJy.append(ans[nsrc * 2 + i, :]) try: bterms = border + 1 if bterms == 1: b = ans[nsrc * 3, :] else: b = np.array([]) bkg = [] for i in range(na): bcoeff = np.array([ans[nsrc * 3:nsrc * 3 + bterms, i], ans[nsrc * 3 + bterms:nsrc * 3 + bterms * 2, i]]) bkg.append(kepfunc.polyval2d(xx, yy, bcoeff)) b = np.append(b, np.nanmean(bkg[-1].reshape(bkg[-1].size))) except: b = np.zeros(na) if focus: wx = ans[-3, :] wy = ans[-2, :] angle = ans[-1, :] else: wx = np.ones(na) wy = np.ones(na) angle = np.zeros(na) # constuct model PRF in detector coordinates residual, chi2 = [], [] for i in range(na): f = np.empty(nsrc) x = np.empty(nsrc) y = np.empty(nsrc) for j in range(nsrc): f[j] = flux[j][i] x[j] = OBJx[j][i] y[j] = OBJy[j][i] PRFfit = kepfunc.PRF2DET(f, x, y, DATx, DATy, wx[i], wy[i], angle[i], splineInterpolation) if background and bterms == 1: PRFfit = PRFfit + b[i] if background and bterms > 1: PRFfit = PRFfit + bkg[i] # calculate residual of DATA - FIT xdim = np.shape(xx)[1] ydim = np.shape(yy)[0] DATimg = np.empty((ydim, xdim)) n = 0 for k in range(ydim): for j in range(xdim): DATimg[k,j] = fluxpixels[i, n] n += 1 PRFres = DATimg - PRFfit residual.append(np.nansum(PRFres) / npix) # calculate the sum squared difference between data and model chi2.append(abs(np.nansum(np.square(DATimg - PRFfit) / PRFfit))) # load the output arrays otime = barytime - bjdref otimecorr = tcorr ocadenceno = cadno opos_corr1 = poscorr1 opos_corr2 = poscorr2 oquality = qual opsf_bkg = b opsf_focus1 = wx opsf_focus2 = wy opsf_rotation = angle opsf_residual = residual opsf_chi2 = chi2 opsf_flux_err = np.empty((na)) opsf_flux_err.fill(np.nan) opsf_centr1_err = np.empty((na)) opsf_centr1_err.fill(np.nan) opsf_centr2_err = np.empty((na)) opsf_centr2_err.fill(np.nan) opsf_bkg_err = np.empty((na)) opsf_bkg_err.fill(np.nan) opsf_flux, opsf_centr1, opsf_centr2 = [], [], [] for i in range(nsrc): opsf_flux.append(flux[i]) opsf_centr1.append(OBJx[i]) opsf_centr2.append(OBJy[i]) # load the plot arrays t = barytime for i in range(nsrc): fl[i] = flux[i] dx[i] = OBJx[i] dy[i] = OBJy[i] bg = b fx = wx fy = wy fa = angle rs = residual ch = chi2 # construct output primary extension for j in range(nsrc): hdu0 = pyfits.PrimaryHDU() for i in range(len(cards0)): if cards0[i].keyword not in hdu0.header.keys(): hdu0.header[cards0[i].keyword] = (cards0[i].value, cards0[i].comment) else: hdu0.header.cards[cards0[i].keyword].comment = cards0[i].comment kepkey.history(call, hdu0, outfilename, logfile, verbose) outstr = pyfits.HDUList(hdu0) # construct output light curve extension col1 = pyfits.Column(name='TIME', format='D', unit='BJD - 2454833', array=otime) col2 = pyfits.Column(name='TIMECORR', format='E', unit='d', array=otimecorr) col3 = pyfits.Column(name='CADENCENO', format='J', array=ocadenceno) col4 = pyfits.Column(name='PSF_FLUX', format='E', unit='e-/s', array=opsf_flux[j]) col5 = pyfits.Column(name='PSF_FLUX_ERR', format='E', unit='e-/s', array=opsf_flux_err) col6 = pyfits.Column(name='PSF_BKG', format='E', unit='e-/s/pix', array=opsf_bkg) col7 = pyfits.Column(name='PSF_BKG_ERR', format='E', unit='e-/s', array=opsf_bkg_err) col8 = pyfits.Column(name='PSF_CENTR1', format='E', unit='pixel', array=opsf_centr1[j]) col9 = pyfits.Column(name='PSF_CENTR1_ERR', format='E', unit='pixel', array=opsf_centr1_err) col10 = pyfits.Column(name='PSF_CENTR2', format='E', unit='pixel', array=opsf_centr2[j]) col11 = pyfits.Column(name='PSF_CENTR2_ERR', format='E', unit='pixel', array=opsf_centr2_err) col12 = pyfits.Column(name='PSF_FOCUS1', format='E', array=opsf_focus1) col13 = pyfits.Column(name='PSF_FOCUS2', format='E', array=opsf_focus2) col14 = pyfits.Column(name='PSF_ROTATION', format='E', unit='deg', array=opsf_rotation) col15 = pyfits.Column(name='PSF_RESIDUAL', format='E', unit='e-/s', array=opsf_residual) col16 = pyfits.Column(name='PSF_CHI2', format='E', array=opsf_chi2) col17 = pyfits.Column(name='POS_CORR1', format='E', unit='pixel', array=opos_corr1) col18 = pyfits.Column(name='POS_CORR2', format='E', unit='pixel', array=opos_corr2) col19 = pyfits.Column(name='SAP_QUALITY', format='J', array=oquality) cols = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19]) hdu1 = pyfits.BinTableHDU.from_columns(cols) for i in range(len(cards1)): if (cards1[i].keyword not in hdu1.header.keys() and cards1[i].keyword[:4] not in ['TTYP', 'TFOR', 'TUNI', 'TDIS', 'TDIM', 'WCAX', '1CTY', '2CTY', '1CRP', '2CRP', '1CRV', '2CRV', '1CUN', '2CUN', '1CDE', '2CDE', '1CTY', '2CTY', '1CDL', '2CDL', '11PC', '12PC', '21PC', '22PC']): hdu1.header[cards1[i].keyword] = (cards1[i].value, cards1[i].comment) outstr.append(hdu1) # construct output mask bitmap extension hdu2 = pyfits.ImageHDU(maskmap) for i in range(len(cards2)): if cards2[i].keyword not in hdu2.header.keys(): hdu2.header[cards2[i].keyword] = (cards2[i].value, cards2[i].comment) else: hdu2.header.cards[cards2[i].keyword].comment = cards2[i].comment outstr.append(hdu2) # write output file print("Writing output file {}...\n".format(outfile + '_' + str(j) + '.fits')) outstr.writeto(outfile + '_' + str(j) + '.fits', checksum=True) # close input structure struct.close() # clean up x-axis unit barytime0 = float(int(t[0] / 100) * 100.0) t -= barytime0 t = np.insert(t,[0],[t[0]]) t = np.append(t,[t[-1]]) xlab = 'BJD $-$ %d' % barytime0 # plot the light curves bg = np.insert(bg, [0], [-1.0e10]) bg = np.append(bg, -1.0e10) fx = np.insert(fx, [0], [fx[0]]) fx = np.append(fx, fx[-1]) fy = np.insert(fy, [0], [fy[0]]) fy = np.append(fy, fy[-1]) fa = np.insert(fa, [0], [fa[0]]) fa = np.append(fa, fa[-1]) rs = np.insert(rs, [0], [-1.0e10]) rs = np.append(rs, -1.0e10) ch = np.insert(ch, [0], [-1.0e10]) ch = np.append(ch, -1.0e10) for i in range(nsrc): # clean up y-axis units nrm = math.ceil(math.log10(np.nanmax(fl[i]))) - 1.0 fl[i] /= 10 ** nrm if nrm == 0: ylab1 = 'e$^-$ s$^{-1}$' else: ylab1 = '10$^{%d}$ e$^-$ s$^{-1}$' % nrm xx = np.copy(dx[i]) yy = np.copy(dy[i]) ylab2 = 'offset (pixels)' # data limits xmin = np.nanmin(t) xmax = np.nanmax(t) ymin1 = np.nanmin(fl[i]) ymax1 = np.nanmax(fl[i]) ymin2 = np.nanmin(xx) ymax2 = np.nanmax(xx) ymin3 = np.nanmin(yy) ymax3 = np.nanmax(yy) ymin4 = np.nanmin(bg[1:-1]) ymax4 = np.nanmax(bg[1:-1]) ymin5 = np.nanmin([np.nanmin(fx), np.nanmin(fy)]) ymax5 = np.nanmax([np.nanmax(fx), np.nanmax(fy)]) ymin6 = np.nanmin(fa[1:-1]) ymax6 = np.nanmax(fa[1:-1]) ymin7 = np.nanmin(rs[1:-1]) ymax7 = np.nanmax(rs[1:-1]) ymin8 = np.nanmin(ch[1:-1]) ymax8 = np.nanmax(ch[1:-1]) xr = xmax - xmin yr1 = ymax1 - ymin1 yr2 = ymax2 - ymin2 yr3 = ymax3 - ymin3 yr4 = ymax4 - ymin4 yr5 = ymax5 - ymin5 yr6 = ymax6 - ymin6 yr7 = ymax7 - ymin7 yr8 = ymax8 - ymin8 fl[i] = np.insert(fl[i], [0], [0.0]) fl[i] = np.append(fl[i], 0.0) # define size of plot on monitor screen plt.figure(str(i + 1) + ' ' + str(time.asctime(time.localtime())), figsize=[12,16]) # delete any fossil plots in the matplotlib window plt.clf() # position first axes inside the plotting window ax = plt.axes([0.11, 0.523, 0.78, 0.45]) # force tick labels to be absolute rather than relative plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) # nox-label plt.setp(plt.gca(), xticklabels=[]) # plot flux vs time ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1, len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = np.append(ltime, t[j]) ldata = np.append(ldata, fl[i][j]) else: plt.plot(ltime, ldata, color='#0000ff', linestyle='-', linewidth=1.0) ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') plt.plot(ltime, ldata, color='#0000ff', linestyle='-', linewidth=1.0) # plot the fill color below data time series, with no data gaps plt.fill(t,fl[i],fc='#ffff00',linewidth=0.0,alpha=0.2) # define plot x and y limits plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin1 - yr1 * 0.01 <= 0.0: plt.ylim(1.0e-10, ymax1 + yr1 * 0.01) else: plt.ylim(ymin1 - yr1 * 0.01, ymax1 + yr1 * 0.01) plt.ylabel('Source (' + ylab1 + ')', {'color' : 'k'}) # make grid on plot plt.grid() # plot centroid tracks - position second axes inside the plotting window if focus and background: axs = [0.11, 0.433, 0.78, 0.09] elif background or focus: axs = [0.11, 0.388, 0.78, 0.135] else: axs = [0.11, 0.253, 0.78, 0.27] ax1 = plt.axes(axs) # force tick labels to be absolute rather than relative plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.setp(plt.gca(),xticklabels=[]) # plot dx vs time ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1, len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = np.append(ltime, t[j]) ldata = np.append(ldata, xx[j-1]) else: ax1.plot(ltime, ldata, color='r', linestyle='-', linewidth=1.0) ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') ax1.plot(ltime, ldata, color='r', linestyle='-', linewidth=1.0) # define plot x and y limits plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) plt.ylim(ymin2 - yr2 * 0.03, ymax2 + yr2 * 0.03) # plot labels ax1.set_ylabel('X-' + ylab2, color='k', fontsize=11) # position second axes inside the plotting window ax2 = ax1.twinx() # force tick labels to be absolute rather than relative plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.setp(plt.gca(), xticklabels=[]) # plot dy vs time ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1, len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = np.append(ltime, t[j]) ldata = np.append(ldata, yy[j-1]) else: ax2.plot(ltime, ldata, color='g', linestyle='-', linewidth=1.0) ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') ax2.plot(ltime, ldata, color='g', linestyle='-', linewidth=1.0) # define plot y limits plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) plt.ylim(ymin3 - yr3 * 0.03, ymax3 + yr3 * 0.03) # plot labels ax2.set_ylabel('Y-' + ylab2, color='k',fontsize=11) # background - position third axes inside the plotting window if background and focus: axs = [0.11, 0.343, 0.78, 0.09] if background and not focus: axs = [0.11, 0.253, 0.78, 0.135] if background: ax1 = plt.axes(axs) # force tick labels to be absolute rather than relative plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.setp(plt.gca(), xticklabels=[]) # plot background vs time ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1, len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = np.append(ltime, t[j]) ldata = np.append(ldata, bg[j]) else: ax1.plot(ltime, ldata, color='#0000ff', linestyle='-', linewidth=1.0) ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') ax1.plot(ltime, ldata, color='#0000ff', linestyle='-', linewidth=1.0) # plot the fill color below data time series, with no data gaps plt.fill(t, bg, fc='#ffff00', linewidth=0.0, alpha=0.2) # define plot x and y limits plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) plt.ylim(ymin4 - yr4 * 0.03, ymax4 + yr4 * 0.03) # plot labels ax1.set_ylabel('Background \n(e$^-$ s$^{-1}$ pix$^{-1}$)', multialignment='center', color='k',fontsize=11) plt.grid() # position focus axes inside the plotting window if focus and background: axs = [0.11, 0.253, 0.78, 0.09] if focus and not background: axs = [0.11, 0.253, 0.78, 0.135] if focus: ax1 = plt.axes(axs) # force tick labels to be absolute rather than relative plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.setp(plt.gca(), xticklabels=[]) # plot x-axis PSF width vs time ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = np.append(ltime, t[j]) ldata = np.append(ldata, fx[j]) else: ax1.plot(ltime, ldata, color='r', linestyle='-', linewidth=1.0) ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') ax1.plot(ltime, ldata, color='r', linestyle='-', linewidth=1.0) # plot y-axis PSF width vs time ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = np.append(ltime, t[j]) ldata = np.append(ldata, fy[j]) else: ax1.plot(ltime, ldata, color='g', linestyle='-', linewidth=1.0) ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') ax1.plot(ltime, ldata, color='g', linestyle='-', linewidth=1.0) # define plot x and y limits plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) plt.ylim(ymin5 - yr5 * 0.03, ymax5 + yr5 * 0.03) # plot labels ax1.set_ylabel('Pixel Scale\nFactor', multialignment='center', color='k',fontsize=11) # Focus rotation - position second axes inside the plotting window ax2 = ax1.twinx() # force tick labels to be absolute rather than relative plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.setp(plt.gca(), xticklabels=[]) # plot dy vs time ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = np.append(ltime, t[j]) ldata = np.append(ldata, fa[j]) else: ax2.plot(ltime, ldata, color='#000080', linestyle='-', linewidth=1.0) ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') ax2.plot(ltime, ldata, color='#000080', linestyle='-', linewidth=1.0) # define plot y limits plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) plt.ylim(ymin6 - yr6 * 0.03, ymax6 + yr6 * 0.03) # plot labels ax2.set_ylabel('Rotation (deg)', color='k',fontsize=11) # fit residuals - position fifth axes inside the plotting window axs = [0.11, 0.163, 0.78, 0.09] ax1 = plt.axes(axs) # force tick labels to be absolute rather than relative plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.setp(plt.gca(), xticklabels=[]) # plot residual vs time ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1, len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = np.append(ltime, t[j]) ldata = np.append(ldata, rs[j]) else: ax1.plot(ltime, ldata, color='b', linestyle='-', linewidth=1.0) ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') ax1.plot(ltime, ldata, color='b', linestyle='-', linewidth=1.0) # plot the fill color below data time series, with no data gaps plt.fill(t, rs, fc='#ffff00', linewidth=0.0, alpha=0.2) # define plot x and y limits plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) plt.ylim(ymin7 - yr7 * 0.03, ymax7 + yr7 * 0.03) # plot labels ax1.set_ylabel('Residual \n(e$^-$ s$^{-1}$)', multialignment='center', color='k', fontsize=11) plt.grid() # fit chi square - position sixth axes inside the plotting window axs = [0.11, 0.073, 0.78, 0.09] ax1 = plt.axes(axs) # force tick labels to be absolute rather than relative plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) # plot background vs time ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') dt = 0 work1 = 2.0 * cadence / 86400 for j in range(1,len(t)-1): dt = t[j] - t[j-1] if dt < work1: ltime = np.append(ltime, t[j]) ldata = np.append(ldata, ch[j]) else: ax1.plot(ltime, ldata, color='b', linestyle='-', linewidth=1.0) ltime = np.array([], dtype='float64') ldata = np.array([], dtype='float32') ax1.plot(ltime, ldata, color='b', linestyle='-', linewidth=1.0) # plot the fill color below data time series, with no data gaps plt.fill(t, ch, fc='#ffff00', linewidth=0.0, alpha=0.2) # define plot x and y limits plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) plt.ylim(ymin8 - yr8 * 0.03, ymax8 + yr8 * 0.03) # plot labels ax1.set_ylabel('$\chi^2$ (%d dof)' % (npix - len(guess) - 1), color='k', fontsize=11) plt.xlabel(xlab, {'color' : 'k'}) # make grid on plot plt.grid() # render plot plt.savefig(outfile + '_' + str(i) + '.png') plt.show() # stop time kepmsg.clock('\n\nKEPPRFPHOT ended at',logfile,verbose) def PRFfits(args): # start time proctime = time.time() # extract image from the time series xdim = np.shape(args[6])[1] ydim = np.shape(args[6])[0] DATimg = np.empty((ydim,xdim)) DATerr = np.empty((ydim,xdim)) n = 0 for i in range(ydim): for j in range(xdim): DATimg[i,j] = args[0][n] DATerr[i,j] = args[1][n] n += 1 # minimize data and model if args[14] and args[15]: argm = (args[2], args[3], DATimg, DATerr, args[4], args[5], args[6], args[7], args[10], args[18], args[19]) ans = fmin_powell(kepfunc.PRFwithFocusAndBackground, args[11], args=argm, xtol=args[12], ftol=args[13], disp=False) elif args[14] and not args[15]: argm = (args[2], args[3], DATimg, DATerr, args[4], args[10], args[18], args[19]) ans = fmin_powell(kepfunc.PRFwithFocus, args[11], args=argm, xtol=args[12], ftol=args[13], disp=False) elif args[15] and not args[14]: argm = (args[2], args[3], DATimg, DATerr, args[4], args[5], args[6], args[7], args[10], args[18], args[19]) ans = fmin_powell(kepfunc.PRFwithBackground, args[11], args=argm, xtol=args[12], ftol=args[13], disp=False) else: argm = (args[2], args[3], DATimg, DATerr, args[4], args[10], args[18], args[19]) ans = fmin_powell(kepfunc.PRF, args[11], args=argm, xtol=args[12], ftol=args[13], disp=False) # print progress if args[20]: txt = '\r%3d%% ' % ((float(args[16]) + 1.0) / float(args[17]) * 100.0) txt += 'nrow = %d ' % (args[16]+1) txt += 't = %.1f sec' % (time.time() - proctime) txt += ' ' * 5 sys.stdout.write(txt) sys.stdout.flush() return ans def kepprfphot_main(): import argparse parser = argparse.ArgumentParser( description='Fitting PRF model to Target Pixel time series', formatter_class=PyKEArgumentHelpFormatter) parser.add_argument('infile', help='Name of input target pixel file', type=str) parser.add_argument('--prfdir', help='Folder containing PRF files', type=str) parser.add_argument('--columns', help='Column number of each source to be fit', nargs='+', type=float) parser.add_argument('--rows', help='Row number of each source to be fit', nargs='+', type=float) parser.add_argument('--fluxes', help='Relative flux of each source to be fit', nargs='+', type=float) parser.add_argument('--border', help='Order of background polynmial fit', default=0, type=int) parser.add_argument('--background', action='store_true', help='Fit background?') parser.add_argument('--focus', action='store_true', help='Fit focus changes?') parser.add_argument('--ranges', default='0,0', help='Time ranges to fit', type=str) parser.add_argument('--xtol', default=1.0e-4, help='Fit parameter xtol', type=float) parser.add_argument('--ftol', default=1.0e-2, help='Fit minimization tolerance', type=float) parser.add_argument('--qualflags', action='store_true', help='Fit data that have quality flags?') parser.add_argument('--outfile', help=('Root name of output light curve FITS files.' ' If None, root name is infile-kepprfphot.'), default=None) parser.add_argument('--plot', action='store_true', help='Plot fit results?') parser.add_argument('--overwrite', action='store_true', help='Overwrite output file?') parser.add_argument('--verbose', action='store_true', help='Write to a log file?') parser.add_argument('--logfile', '-l', default='kepprfphot.log', help='Name of ascii log file', type=str) args = parser.parse_args() kepprfphot(args.infile, args.prfdir, args.columns, args.rows, args.fluxes, args.border, args.background, args.focus, args.ranges, args.xtol, args.ftol, args.qualflags, args.outfile, args.plot, args.overwrite, args.verbose, args.logfile)
nilq/baby-python
python
#!/usr/bin/env python3 from __future__ import print_function import os import pythondata_cpu_minerva print("Found minerva @ version", pythondata_cpu_minerva.version_str, "(with data", pythondata_cpu_minerva.data_version_str, ")") print() print("Data is in", pythondata_cpu_minerva.data_location) assert os.path.exists(pythondata_cpu_minerva.data_location) print("Data is version", pythondata_cpu_minerva.data_version_str, pythondata_cpu_minerva.data_git_hash) print("-"*75) print(pythondata_cpu_minerva.data_git_msg) print("-"*75) print() print("It contains:") for root, dirs, files in os.walk(pythondata_cpu_minerva.data_location): dirs.sort() for f in sorted(files): path = os.path.relpath(os.path.join(root, f), pythondata_cpu_minerva.data_location) print(" -", path)
nilq/baby-python
python
#! /usr/bin/python # -*- coding: utf-8 -*- __author__ = "Osman Baskaya" from pprint import pprint from collections import defaultdict as dd from classifier_eval import ChunkEvaluator from logger import ChunkLogger from classifier import * import mapping_utils import sys import os chunk_types = ['semcor', 'uniform', 'random', 'hybrid'] tw_dict = {} sys_ans_dict = {} #system_key_folder = 'hdp-ans/' #system_out_dir = 'def-map-hdp' #exp_part = 1 system_key_folder = sys.argv[1] system_out_dir = sys.argv[2] training_word_list = [line.strip() for line in open(sys.argv[3]).readlines()] gold_dir = sys.argv[4] chunk_path = sys.argv[5] exp_part = sys.argv[6] start = int(sys.argv[7]) end = int(sys.argv[8]) # Development data stuff devfiles = sys.argv[9:] # development files gold_dev = [os.path.join(gold_dir, f + '.key') for f in devfiles] sys_dev = ["{}{}.ans".format(system_key_folder, tw) for tw in devfiles] wrappers = [ SVCWrapper('SVM_Linear', kernel='linear', C=1), SVCWrapper('SVM_Gaussian', kernel='rbf', C=1, gamma=0), DecisionTreeWrapper("DecisionTree-Gini", criterion='gini'), DecisionTreeWrapper("DecisionTree-Entropy", criterion='entropy'), BernoulliNBWrapper(), MultinomialNBWrapper() ] logger = ChunkLogger(3) # quick testing #training_word_list = [ #'horne', #'adams_apple', #'loot', #'para' #] training_word_list.sort() processed = training_word_list[start:end] for tw in processed: ans_file = "{}{}.ans".format(system_key_folder, tw) if tw not in set(devfiles): sys_ans_dict[tw] = ans_file tw_dict[tw] = mapping_utils.get_gold_chunk_filename(tw, chunk_path, chunk_types) devset = [sys_dev, gold_dev] exp_length = len(tw_dict[processed[0]]) optimization = False ### Prints all information for the experiment ### logger.info("Evaluation started for %s" % system_key_folder) logger.info("Total pseudowords: %d" % len(processed)) logger.info("Chunk Path is: %s" % chunk_path) logger.info("Dev. set: %s" % devset[0]) logger.info("Gold Dev. set: %s" % devset[1]) logger.info("Optimization: %s" % optimization) logger.info("Gold key directory: %s" % gold_dir) logger.info("Number of classifiers: %d" % len(wrappers)) for w in wrappers: results = dd(list) predictions = dd(list) for i in range(exp_length): exp = {} for tw in tw_dict: exp[tw] = tw_dict[tw][i] #print "Experiment %d" % (i+1), out = os.path.join(system_out_dir, w.name) if not os.path.exists(out): os.mkdir(out) exp_name, tr_ch, test_chunk = mapping_utils.get_exp_name(exp, tw, w.name, exp_part) if test_chunk not in ['semcor', 'uniform']: continue e = ChunkEvaluator(w, exp, sys_ans_dict, devset, optimization, logger=logger) score, prediction = e.score_and_predict() print system_out_dir, exp_name num_pw = len(score.keys()) avg_score = sum([s[0] for s in score.values()]) / num_pw avg_perp = sum([s[1] for s in score.values()]) / num_pw results[exp_name].append(avg_score) predictions[exp_name].append(prediction) #print "ChunkScore:", exp_name, total_score, total_perp / num_pw #pprint(predictions) cross_res = [sum(res) / len(res) for res in results.values()] #pprint( zip(results.keys(), cross_res) ) #mapping_utils.write_prediction2file(predictions, "def-map-aiku/") mapping_utils.write_prediction2file(predictions, out)
nilq/baby-python
python
""" Chouette storages file. For now it's just a RedisStorage. It could be made more enterprise-y with a Storage interface, but it'll work for now as is. """ import json import logging import os import re from datetime import datetime from typing import Any, Dict, Optional from uuid import uuid4 from redis import Redis, RedisError logger = logging.getLogger("chouette-iot") __all__ = ["RedisStorage", "StoragesFactory"] class StoragesFactory: """ Storages factory that creates a storage of a desired type. At the moment there is a single storage type that is Redis. """ @staticmethod def get_storage(storage_type: str): """ Generates a storage. Returns: RedisStorage instance or None if redis is not reachable. """ if storage_type.lower() == "redis": redis_host = os.environ.get("REDIS_HOST", "redis") redis_port = int(os.environ.get("REDIS_PORT", "6379")) redis_storage = RedisStorage(host=redis_host, port=redis_port) return redis_storage return None class RedisStorage(Redis): """ RedisStorage is a wrapper around Redis that stores data into its queues. """ metrics_queue = "chouette:metrics:raw" logs_queue = "chouette:logs:wrapped" def store_metric(self, metric: Dict[str, Any]) -> Optional[str]: """ Stores a metric to Redis. Args: metric: Metric as a dictionary. Return: Message key or None if message was not stored successfully. """ collected_at = metric["timestamp"] return self._store(metric, self.metrics_queue, collected_at) def store_log(self, log_message: Dict[str, Any]) -> Optional[str]: """ Stores a log message to Redis. Args: log_message: Log message as a dictionary. Return: Message key or None if message was not stored successfully. """ py36_date = re.sub(r"\+(\d{2}):(\d{2})", r"+\1\2", log_message["date"]) collected_at = datetime.strptime( py36_date, "%Y-%m-%dT%H:%M:%S.%f%z" ).timestamp() return self._store(log_message, self.logs_queue, collected_at) def _store( self, record: Dict[str, Any], queue: str, timestamp: float ) -> Optional[str]: """ Actually stores a message to Redis. It generates a key as a unique string, casts a record into json and stores it to a specified queue in Redis under a specified timestamp. Args: record: Record to store as a dict. queue: Queue name. timestamp: Unix timestamp for a keys sorted set. Return: Message key or None if message was not stored successfully. """ key = str(uuid4()) value = json.dumps(record) pipeline = self.pipeline() pipeline.zadd(f"{queue}.keys", {key: timestamp}) pipeline.hset(f"{queue}.values", key, value) try: pipeline.execute() except (RedisError, OSError) as error: logger.warning( "Could not store a record %s: %s to queue %s. Error: %s", key, value, queue, error, ) return None logger.debug( "Successfully stored a record %s: %s to queue %s.", key, value, queue ) return key
nilq/baby-python
python
# state ChrMarineStartingLeft # autogenerated by SmartBody stateManager = scene.getStateManager() stateChrMarineStartingLeft = stateManager.createState1D("mocapStartingLeft") stateChrMarineStartingLeft.setBlendSkeleton('ChrBackovic.sk') motions = StringVec() motions.append("ChrMarine@Idle01_ToWalk01") motions.append("ChrMarine@Idle01_ToWalk01_Turn90Lf01") motions.append("ChrMarine@Idle01_ToWalk01_Turn180Lf01") paramsX = DoubleVec() paramsX.append(0) # ChrMarine@Idle01 X paramsX.append(-90) # ChrMarine@Idle01_ToWalk01_Turn90Lf01 X paramsX.append(-180) # ChrMarine@Idle01_ToWalk01_Turn180Lf01 X for i in range(0, len(motions)): stateChrMarineStartingLeft.addMotion(motions[i], paramsX[i]) points0 = DoubleVec() points0.append(0) # ChrBrad_ChrMarine@Idle01 0 points0.append(0) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Rt01 0 points0.append(0) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Rt01 0 stateChrMarineStartingLeft.addCorrespondencePoints(motions, points0) points1 = DoubleVec() points1.append(0.73) # ChrBrad_ChrMarine@Idle01 1 points1.append(1.42) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Rt01 1 points1.append(1.37) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Rt01 1 stateChrMarineStartingLeft.addCorrespondencePoints(motions, points1) points2 = DoubleVec() points2.append(1.32) # ChrBrad_ChrMarine@Idle01 2 points2.append(2.08) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Rt01 2 points2.append(2.06) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Rt01 2 stateChrMarineStartingLeft.addCorrespondencePoints(motions, points2) points3 = DoubleVec() points3.append(1.56) # ChrBrad_ChrMarine@Idle01 2 points3.append(2.43) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn90Rt01 2 points3.append(2.46) # ChrBrad_ChrMarine@Idle01_ToWalk01_Turn180Rt01 2 stateChrMarineStartingLeft.addCorrespondencePoints(motions, points3)
nilq/baby-python
python
"""Valid URL Configuration for testing purposes""" from django.views.generic import RedirectView GITHUB = RedirectView.as_view( url="https://github.com/jambonsw/django-url-check" )
nilq/baby-python
python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A node transformer that includes utilities for SCT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import gast import six from tensorflow.contrib.py2tf.pyct import pretty_printer class PyFlowParseError(SyntaxError): pass class Base(gast.NodeTransformer): """Base class for specialized transformers.""" def __init__(self, context): """Initialize the transformer. Subclasses should call this. Args: context: An EntityContext. """ self._lineno = 0 self._col_offset = 0 self.context = context def visit(self, node): try: source_code = self.context.source_code source_file = self.context.source_file if source_code and hasattr(node, 'lineno'): self._lineno = node.lineno self._col_offset = node.col_offset return super(Base, self).visit(node) except (ValueError, AttributeError, NotImplementedError) as e: msg = '%s: %s\nOccurred at node:\n%s' % (e.__class__.__name__, str(e), pretty_printer.fmt(node)) if source_code: line = source_code.splitlines()[self._lineno - 1] else: line = '<no source available>' six.reraise(PyFlowParseError, PyFlowParseError( msg, (source_file, self._lineno, self._col_offset + 1, line)), sys.exc_info()[2])
nilq/baby-python
python
#!/usr/bin/env python3 import os import sys import argparse import tempfile import shutil from saturation.utils import (normalize_args, export_to_file, get_macs_command_line, parse_outputs, save_plot) def get_parser(): parser = argparse.ArgumentParser(description='SatScript', add_help=True) parser.add_argument("-b", "--bam", type=str, help="Path to the BAM file", required=True) parser.add_argument("-m", "--macs", type=str, help="Path to the MACS2 log file", required=True) parser.add_argument("-o", "--output", type=str, help="Output filename prefix", default="./") parser.add_argument("-s", "--suffix", type=str, help="Output suffixes for reads, islands, surface, frip and saturation files", nargs=5, default=["reads.png", "islands.png", "surface.png", "frip.png", "saturation.txt"]) parser.add_argument("-p", "--percentage", type=str, help="Target percentage", nargs="*", default=["25", "50", "75", "90", "95", "98", "99", "99.5", "100"]) parser.add_argument("-t", "--temp", type=str, help="Temp folder", default=".") parser.add_argument("-r", "--resolution", type=int, help="Output picture resolution, dpi", default=85) return parser def export_results(args, output_data): percent = [line[0] for line in output_data] total_mapped = [line[1] for line in output_data] macs2_reads = [line[2] for line in output_data] islands = [line[3] for line in output_data] surface = [line[4] for line in output_data] frip_score = [line[5] for line in output_data] save_plot(filename=args.output + args.suffix[0], res_dpi=args.resolution, title="Reads", x_data=percent, y_data=[total_mapped, macs2_reads], labels=["Total mapped reads", "Reads used by MACS"], styles=["ro-", "bo-"], axis=["%", "reads"]) save_plot(filename=args.output + args.suffix[1], res_dpi=args.resolution, title="Islands", x_data=percent, y_data=[islands], labels=["islands"], styles=["bo-"], axis=["%", "islands"]) save_plot(filename=args.output + args.suffix[2], res_dpi=args.resolution, title="Surface", x_data=percent, y_data=[surface], labels=["surface"], styles=["bo-"], axis=["%", "surface, bp"]) save_plot(filename=args.output + args.suffix[3], res_dpi=args.resolution, title="Fraction of Reads in Peaks", x_data=percent, y_data=[frip_score], labels=["FRIP Score"], styles=["bo-"], axis=["%", "FRIP Score, %"], y_max=100) export_to_file(args.output + args.suffix[4], "\n".join([" ".join(map(str, line)) for line in output_data])) def main(argsl=None): if argsl is None: argsl = sys.argv[1:] args,_ = get_parser().parse_known_args(argsl) args = normalize_args(args, ["percentage", "suffix", "output", "resolution"]) print(args) macs_command_line = get_macs_command_line(args.macs) temp_folder = tempfile.mkdtemp(prefix=os.path.join(args.temp, "tmp_")) try: output_data = [] for target_percent in args.percentage: randsample_output = os.path.join(temp_folder, target_percent + ".bed") callpeak_output = os.path.join(temp_folder, target_percent) bedmap_output = os.path.join(temp_folder, target_percent + "_reads_at_peaks.txt") randsample_cmd = " ".join(["macs2", "randsample", "-t", args.bam, "-p", target_percent, "-o", randsample_output]) print("Run:", randsample_cmd) os.system(randsample_cmd) callpeak_cmd = " ".join(["macs2", macs_command_line, "-t", randsample_output, "-n", callpeak_output]) print("Run:", callpeak_cmd) os.system(callpeak_cmd) broad_peak_file = callpeak_output + "_peaks.broadPeak" narrow_peak_file = callpeak_output + "_peaks.narrowPeak" peak_file = broad_peak_file if os.path.exists(broad_peak_file) else narrow_peak_file bedmap_cmd = " ".join(["bedmap --bp-ovr 1 --count", randsample_output, peak_file, " | awk '{s=($1>0)?s+1:s}; END{print s}' > ", bedmap_output]) print("Run:", bedmap_cmd) os.system(bedmap_cmd) result = parse_outputs(xlsfile=callpeak_output + "_peaks.xls", bedmap_output=bedmap_output, target_percent=target_percent) output_data.append(result) export_results(args, output_data) except Exception as err: print("Error", err) raise finally: shutil.rmtree(temp_folder) if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
nilq/baby-python
python
def decorate(func): def decorated(): print("==" * 20) print("before") func() print("after") return decorated @decorate def target(): print("target 함수") target() ## output """ ======================================== before target 함수 after """ def target2(): print("target2 함수 실행함") target2 = decorate(target2) target2() ## output """ ======================================== before target2 함수 실행함 after """
nilq/baby-python
python
"""This script is used to plot a gene2vec embedding""" # imports import argparse import pandas as pd import numpy as np import plotly.express as px import mygene import math import os # describe program parser = argparse.ArgumentParser(description='Plots an embedding of a gene2vec hidden layer.') # arguments parser.add_argument('--embedding', type=str, help='File path of the gene2vec embedding to be plotted.') parser.add_argument('--out', type=str, help='File path of output plot.', default=None) parser.add_argument('--plot-title', dest='plot_title', type=str, help='Custom title for plot.', default=None) parser.add_argument('--alg', type=str, choices=['umap', 'pca', 'mds', 'tsne'], default='umap', help='The dimension reduction algorithm to used to produce the embedding.') parser.add_argument('--species', default=9606, help='Species name or taxid used to generate the gene embedding.') parser.add_argument('--dim', type=int, default=2, help='Dimension of the embedding.') # parse args args = parser.parse_args() # user defined functions def load_embedding(filename): geneList = list() vectorList = list() f = open(filename) for line in f: values = line.split() gene = values[0] vector = np.asarray(values[1:], dtype="float32") geneList.append(gene) vectorList.append(vector) f.close() return np.asarray(vectorList), np.asarray(geneList) def infer_gene_rep(x) -> str: # check for entrez id if type(x) == int: return 'Entrez ID' elif type(x) == str: # check for ensembl id if 'ENS' in x: return 'Ensembl ID' else: # default it gene symbol return 'Gene Symbol' def query_gene_info(gene_ids, species=9606): # infer type of gene id gene_rep = infer_gene_rep(gene_ids[0].item()) # build querying object mg = mygene.MyGeneInfo() # excute query based upon species and gene rep if gene_rep == "Gene Symbol": gene_info = mg.querymany(gene_ids, scopes='symbol', species=species, as_dataframe=True) gene_info = gene_info.groupby("symbol").agg(unique_non_null) gene_info["symbol"] = gene_info.index return gene_info elif gene_rep == "Entrez ID": gene_info = mg.querymany(gene_ids, scopes='entrezgene', species=species, as_dataframe=True) gene_info = gene_info.groupby("entrezgene").agg(unique_non_null) gene_info["entrezgene"] = gene_info.index return gene_info elif gene_rep == "Ensembl ID": gene_info = mg.getgenes(gene_ids, fields='name,symbol,entrezgene,taxid', as_dataframe=True) gene_info = gene_info.groupby("query").agg(unique_non_null) gene_info["query"] = gene_info.index return gene_info def unique_non_null(x): # drop na entry and get unique values y = x.dropna().unique() if y.size == 1: return y.item() elif y.size == 0: return pd.NA else: return y if __name__=="__main__": # load gene2vec embedding print("\nRunning:") print(f"\t[*] Loading the Gene2vec embedding: {os.path.abspath(args.embedding)}...") wv, vocabulary = load_embedding(args.embedding) print(f"\t\t- Number of Genes: {'{:,}'.format(vocabulary.size)}.") print(f"\t\t- Embedding Dimension: {wv.shape[1]}.") # find gene info print(f"\t[*] Querying NCBI for gene info...") gene_info = query_gene_info(vocabulary, args.species) # define dimension reduction algorithm if args.alg == 'umap': from umap import UMAP reduce = UMAP(n_components=args.dim) elif args.alg == 'pca': from sklearn.decomposition import PCA reduce = PCA(n_components=args.dim, whiten=True) # reduce dimension print(f"\t[*] Reducing the dimension of Gene2vec embedding with {args.alg.upper()}(dim={args.dim})...") wv_red = reduce.fit_transform(wv) # create dataframe for plotting gene_rep = infer_gene_rep(vocabulary[0].item()) df = pd.DataFrame(index=vocabulary, data=wv_red) df.loc[gene_info.index.values, "Gene Symbol"] = gene_info['symbol'] df.loc[gene_info.index.values, "Tax ID"] = gene_info['taxid'] df.loc[gene_info.index.values, "Entrez ID"] = gene_info['entrezgene'] df.loc[gene_info.index.values, "Name"] = gene_info['name'] if gene_rep == "Ensembl ID": df.loc[vocabulary, "Ensembl ID"] = vocabulary elif gene_rep == "Gene Symbol": df.loc[vocabulary, "Gene Symbol"] = vocabulary elif gene_rep == "Entrez ID": df.loc[vocabulary, "Entrez ID"] = vocabulary # replace na df.fillna('NA', inplace=True) # generate hover data hover_data = df.filter(regex="Symbol|ID|Name").columns hover_data = {col: True for col in hover_data} # format columns col_dict = {0: f'{args.alg.upper()} 1', 1: f'{args.alg.upper()} 2', 2: f'{args.alg.upper()} 3'} df.rename(columns=col_dict, inplace=True) # plot print("\t[*] Generating interactive plot via plotly...") if args.dim == 2: fig = px.scatter(df, x=col_dict[0], y=col_dict[1], hover_data=hover_data, #color_continuous_scale="RdBu", #opacity=.7, size_max=8) fig.update_traces(marker=dict(color='rgba(255, 255, 255, 0.1)')) if args.dim == 3: fig = px.scatter_3d(df, x=col_dict[0], y=col_dict[1], z=col_dict[2], hover_data=hover_data, #color_continuous_scale="RdBu", #opacity=.7, size_max=8) fig.update_traces(marker=dict(color='rgba(10, 10, 10, 0.01)')) # update plot layout if args.plot_title is None: args.plot_title = f"Gene2vec Embedding using {args.alg.upper()}" fig.update_layout(template='plotly_dark', title=args.plot_title, font=dict(size=18)) # save to file if args.out is None: embedding_name = os.path.basename(args.embedding).rstrip('.txt') args.out = f"../figures/{embedding_name}_{args.alg}_{args.dim}.html" fig.write_html(args.out) fig.write_json(args.out.replace('.html', '.json')) print(f"\t[*] Plot saved to {os.path.abspath(args.out)}(.json).") print("Complete!\n")
nilq/baby-python
python
class MedianFinder: def __init__(self): def addNum(self, num: int) -> None: def findMedian(self) -> float: # Your MedianFinder object will be instantiated and called as such: # obj = MedianFinder() # obj.addNum(num) # param_2 = obj.findMedian()
nilq/baby-python
python
from sys import stdin mb_per_month = int(stdin.readline()) n_of_months = int(stdin.readline()) current_num_of_mb = mb_per_month for n in range(n_of_months): current_num_of_mb = (current_num_of_mb - int(stdin.readline())) + mb_per_month print current_num_of_mb
nilq/baby-python
python
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.test import Client, TestCase # noqa: 401 from django.urls import reverse from django.utils import timezone from .models import Choice, Question class PollViewTests(TestCase): def setUp(self): question = Question( question_text="This is a test question", pub_date=timezone.now() ) question.save() self.question = question choice = Choice( choice_text="This is a test choice", votes=0 ) choice.question = question choice.save() self.choice = choice self.client = Client() def test_index_view(self): response = self.client.get('/') assert response.status_code == 200 assert self.question.question_text in str(response.content) def test_detail_view(self): response = self.client.get( reverse('polls:detail', args=(self.question.id,))) assert response.status_code == 200 assert self.question.question_text in str(response.content) assert self.choice.choice_text in str(response.content) def test_results_view(self): response = self.client.get( reverse('polls:results', args=(self.question.id,))) assert response.status_code == 200 assert self.question.question_text in str(response.content) assert self.choice.choice_text in str(response.content)
nilq/baby-python
python
from typing import Any, Callable, List, TypeVar from vkwave.bots.core.dispatching.filters.base import ( BaseFilter, AsyncFuncFilter, SyncFuncFilter, ) from vkwave.bots.core.dispatching.handler.base import BaseHandler from vkwave.bots.core.dispatching.handler.record import HandlerRecord F = TypeVar("F", bound=Callable[..., Any]) class HandlerRegistrar: def __init__(self): self.default_filters: List[BaseFilter] = [] self.handlers: List[BaseHandler] = [] def add_default_filter(self, filter: BaseFilter): if isinstance(filter, (AsyncFuncFilter, SyncFuncFilter)): raise ValueError( "You should add custom filters derived from `BaseFilter` for using default as filter" ) self.default_filters.append(filter) def with_decorator(self, *filters: BaseFilter): def decorator(func: Callable[..., Any]): record = self.new() record.with_filters(*filters) record.handle(func) handler = record.ready() self.register(handler) return func return decorator def new(self) -> HandlerRecord: record = HandlerRecord() return record def register(self, handler: BaseHandler): for dfilter in self.default_filters: to_include: bool = True for afilter in handler.filter_manager.filters: if type(dfilter) is type(afilter): to_include = False break if to_include: handler.filter_manager.add_filter(dfilter) self.handlers.append(handler)
nilq/baby-python
python
#!/usr/bin/python import os import re from optparse import OptionParser SUFFIX=".out" def main () : global filename parser = OptionParser() parser.add_option("-f", "--file", dest="filename", help="the file to update", metavar="FILE") parser.add_option("-n", "--name", dest="name", help="the name to replace the original name with", metavar="NAME") parser.add_option("-c", "--fromname", dest="fromname", help="the name be replaced", metavar="FROMNAME") (options, args) = parser.parse_args() if not options.filename : print "You must specify the file to modify" exit(-1) if not options.name : print "You must specify the name to replace Tim with" exit(-1) if not options.fromname : print "You must specify the name to be replaced" exit(-1) fin = open(options.filename, 'r') fout = open(options.filename + SUFFIX, 'w') for line in fin : fout.write(re.sub(options.fromname, options.name, line)) fin.close() fout.close() main()
nilq/baby-python
python
import vvx_nego if __name__ == "__main__": #hogeの部分をエンジンが有るpathに変更して実行してください vvn = vvx_nego.VoicevoxNegotiation("hoge\\run.exe") vvn.request_audio_query("これは", speaker=1) vvn.request_synthesis(vvn.audio_query, speaker=1) vvn.multi_synthesis.append(vvn.synthesis) vvn.request_audio_query("読み上げを実行する", speaker=3) vvn.request_synthesis(vvn.audio_query, speaker=3) vvn.multi_synthesis.append(vvn.synthesis) vvn.request_audio_query("サンプルコードです", speaker=5) vvn.request_synthesis(vvn.audio_query, speaker=5) vvn.multi_synthesis.append(vvn.synthesis) vvn.request_connect_waves(vvn.multi_synthesis) #音が出ます vvn.local_play_synthesis(vvn.synthesis) input()
nilq/baby-python
python
__author__ = 'Aditya Roy' import unittest from time import sleep from WebAutomation.Test.TestUtility.ScreenShot import SS from WebAutomation.Src.PageObject.Pages.ConfirmationPage import Confirmation from WebAutomation.Src.PageObject.Pages.HomePage import Home from WebAutomation.Src.TestBase.EnvironmentSetUp import EnvironmentSetup from WebAutomation.Src.PageObject.Pages.RegistrationPage import Register class MercuryTours_Registration(EnvironmentSetup): def test_RegistrationFlow(self): # Screenshots relative paths ss_path = "/Test_MercuryTours_Registration/" driver = self.driver self.driver.get("http://newtours.demoaut.com") self.driver.set_page_load_timeout(20) # Creating object of SS screenshots utility ss = SS(driver) #calling home page object to click on Register Link home = Home(driver) if home.getRegister().is_displayed(): print("Register Link displaying") home.getRegister().click() sleep(4) #calling registration page object to proceed with registration flow reg = Register(driver) if reg.getRegis_txt().is_displayed(): print(reg.regis_txt.text) ss.ScreenShot(ss_path+"Registration.png") else: print("Registration page not loaded") try: reg.setFirstName("Aditya") reg.setLastName("Roy") reg.setPhone("7501498896") reg.setEmail("aditya.qa14@gmail.com") reg.setCountry("INDIA") reg.setUserName("aditya.qa14@gmail.com") reg.setPassword(123456) reg.setConfirmPassword(123456) sleep(2) ss.ScreenShot(ss_path+"RegistrationData.png") reg.submitRegistration() sleep(4) ss.ScreenShot(ss_path+"PostRegistration.png") except Exception as e: print("Exception occurred "+e) #calling Post Registration check post = Confirmation(driver) print(post.thankYou.text) if (post.UserID.text).find("aditya.qa14@gmail.com"): print("Registration Process Successful") else: print("User Failed to register properly") if __name__ == '__main__': unittest.main()
nilq/baby-python
python
# 写入csv文件 import csv with open('data.csv', 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(['id', 'name', 'age']) writer.writerow(['10001', 'Mike', 20]) writer.writerow(['10002', 'Bob', 22]) writer.writerow(['10003', 'Jordan', 21])
nilq/baby-python
python
#!/usr/bin/env python import decimal import hashlib import json import sys # Parse the query. query = json.load(sys.stdin) # Build the JSON template. boolean_keys = [ 'ActionsEnabled', ] list_keys = [ 'AlarmActions', 'Dimensions', 'InsufficientDataActions', 'OKActions', ] alarm = {} for key, value in query.items(): if key in boolean_keys: value = value.lower() in ('1', 'true') elif key in list_keys: value = json.loads(value) if value: alarm[key] = value content = json.dumps(alarm, indent=2, sort_keys=True) etag = hashlib.md5(content.encode('utf-8')).hexdigest() # Output the result to Terraform. json.dump({ 'key': etag, 'content': content, 'etag': etag, }, sys.stdout, indent=2) sys.stdout.write('\n')
nilq/baby-python
python
from django.urls import path from .ajax import CustomerRequirementAjaxView urlpatterns = [ path('customer/', CustomerRequirementAjaxView.as_view(), name='customerRequirementAjax'), ]
nilq/baby-python
python
import settings from PyQt5.QtCore import QObject, QEvent from PyQt5.QtCore import Qt from enum import Enum import cv2 import numpy as np from skimage.draw import rectangle, line # # class Mode(Enum): # SHOW = 1 # DRAW = 2 # ERASE = 3 class GrabCutToolInteractor(QObject): def __init__(self, viewer, parent=None): super().__init__(parent) self.viewer = viewer # self.mode = Mode.SHOW self.rect_start = () self.rect_end = () self.c = 0 self.bgd_model = np.zeros((1, 65), np.float64) self.fgd_model = np.zeros((1, 65), np.float64) self.m_pos = () def eventFilter(self, watched_obj, e): if e.type() == QEvent.MouseButtonPress: self.on_mouse_pressed(e) return True elif e.type() == QEvent.MouseMove: self.on_mouse_moved(e) return True elif e.type() == QEvent.MouseButtonRelease: self.on_mouse_released(e) return True else: return super().eventFilter(watched_obj, e) def on_mouse_pressed(self, e): if not (self.viewer.has_image() and self.viewer.is_over_image(e.pos())): return image_coords = self.viewer.pos_to_image_coords(e.pos()) self.rect_start = (image_coords[0], image_coords[1]) self.m_pos = (image_coords[0], image_coords[1]) if e.buttons() == Qt.LeftButton: self.viewer.tool_mask[image_coords[0], image_coords[1]] = [0, 128, 255, 255] elif e.buttons() == Qt.RightButton: self.viewer.tool_mask[image_coords[0], image_coords[1]] = [255, 0, 0, 255] def on_mouse_moved(self, e): if not self.rect_start: return if not (self.viewer.has_image() and self.viewer.is_over_image(e.pos())): return image_coords = self.viewer.pos_to_image_coords(e.pos()) # self.draw_rect(image_coords[0], image_coords[1]) rr, cc = line(self.m_pos[0], self.m_pos[1], image_coords[0], image_coords[1]) if e.buttons() == Qt.LeftButton: self.viewer.tool_mask[rr, cc] = [0, 128, 255, 255] elif e.buttons() == Qt.RightButton: self.viewer.tool_mask[rr, cc] = [255, 0, 0, 255] self.m_pos = (image_coords[0], image_coords[1]) self.mask_grab_cut() self.viewer.update_scaled_combined_image() def draw_rect(self, row, col): rr, cc = rectangle(self.rect_start, end=(row, col), shape=self.viewer.tool_mask.shape[:2]) self.viewer.tool_mask[rr, cc] = [255, 0, 0, 255] def on_mouse_released(self, e): if not self.rect_start: return if not (self.viewer.has_image() and self.viewer.is_over_image(e.pos())): return image_coords = self.viewer.pos_to_image_coords(e.pos()) self.rect_end = (image_coords[0], image_coords[1]) # self.grab_cut() # if self.c == 1: self.mask_grab_cut() # self.grab_cut() # self.c = 1 self.rect_start = () # self.draw_brush_event(e) # Erase tool mask # self.viewer.tool_mask.fill(0) self.viewer.update_scaled_combined_image() def mask_grab_cut(self): print('mask_grab_cut') # wherever it is marked white (sure foreground), change mask=1 # wherever it is marked black (sure background), change mask=0 mask = np.zeros(self.viewer.image.shape[:2], np.uint8) mask.fill(2) # print('before', mask.shape) # aaa = (self.viewer.tool_mask == [0, 128, 255, 255]).all(axis=2) # print(aaa.shape) # print(aaa) # print('bbb') mask[np.where((self.viewer.tool_mask == settings.TOOL_FOREGROUND).all(axis=2))] = 1 mask[np.where((self.viewer.tool_mask == settings.TOOL_BACKGROUND).all(axis=2))] = 0 print(np.unique(mask)) # print('after') try: mask, self.bgd_model, self.fgd_model = cv2.grabCut(self.viewer.image, mask, None, self.bgd_model, self.fgd_model, 1, cv2.GC_INIT_WITH_MASK) # mask, self.bgd_model, self.fgd_model = cv2.grabCut(self.viewer.image, mask, None, self.bgd_model, # self.fgd_model, 5, cv2.GC_INIT_WITH_MASK) except: print('exception') print(np.unique(mask)) self.viewer.mask[np.where(((mask == 1) | (mask == 3)))] = settings.MASK_COLOR self.viewer.mask[np.where(((mask == 0) | (mask == 2)))] = settings.NO_MASK_COLOR def grab_cut(self): bgd_model = np.zeros((1, 65), np.float64) fgd_model = np.zeros((1, 65), np.float64) mask = np.zeros(self.viewer.image.shape[:2], np.uint8) print(mask.shape) rect_width = self.rect_end[1] - self.rect_start[1] rect_height = self.rect_end[0] - self.rect_start[0] rect = (self.rect_start[1], self.rect_start[0], rect_width, rect_height) print(rect) try: cv2.grabCut(self.viewer.image, mask, rect, bgd_model, fgd_model, 5, cv2.GC_INIT_WITH_RECT) except: print('exception grabCut') # cv2.GC_PR_BGD # cv2.GC_FGD # print(np.where((mask == 2) | (mask == 0))) # self.viewer.mask = np.where((mask == 2) | (mask == 0), settings.MASK_COLOR) # # print(mask) # print(mask.shape) # mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype(np.uint8) self.viewer.mask[np.where(((mask == 1) | (mask == 3)))] = settings.MASK_COLOR self.viewer.mask[np.where(((mask == 0) | (mask == 2)))] = settings.NO_MASK_COLOR # self.viewer.mask = np.where((mask == 1) | (mask == 3), settings.MASK_COLOR, settings.NO_MASK_COLOR) # mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') # img = img * mask2[:, :, np.newaxis] def draw_brush_event(self, e): if not (self.viewer.has_image() and self.viewer.is_over_image(e.pos())): return image_coords = self.viewer.pos_to_image_coords(e.pos()) self.update_mode(e) self.draw_brush(image_coords[0], image_coords[1]) self.viewer.update_scaled_combined_image() def draw_brush(self, row, col): # Erase old tool mask self.viewer.tool_mask.fill(0) rr, cc = circle(row, col, 22, self.viewer.tool_mask.shape) # self.tool_mask[rr, cc] = [0, 255, 0, 255] samples = self.viewer.image[rr, cc][:, 0] # use only first channel samples = samples.astype(np.float32) number_of_clusters = 2 if number_of_clusters > samples.size: return criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) ret, label, center = cv2.kmeans(samples, number_of_clusters, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) label = label.ravel() # 2D array (one column) to 1D array without copy center_pixel_indexes = np.where(np.logical_and(rr == row, cc == col))[0] if center_pixel_indexes.size != 1: # there are situations, when the center pixel is out of image return center_pixel_index = center_pixel_indexes[0] center_pixel_label = label[center_pixel_index] if self.mode == Mode.ERASE: self.viewer.tool_mask[rr, cc] = [0, 0, 255, 255] else: brush_circle = self.viewer.tool_mask[rr, cc] brush_circle[label == center_pixel_label] = [0, 128, 255, 255] brush_circle[label != center_pixel_label] = [255, 0, 0, 255] self.viewer.tool_mask[rr, cc] = brush_circle if self.mode == Mode.DRAW: brush_circle = self.viewer.mask[rr, cc] brush_circle[label == center_pixel_label] = settings.MASK_COLOR self.viewer.mask[rr, cc] = brush_circle elif self.mode == Mode.ERASE: self.viewer.mask[rr, cc] = [0, 0, 0, 0]
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Wed Oct 3 14:12:10 2018 @author: joshcole #F1_Data Analysis Fake Loan Company """ import pandas as pd loan_data=pd.read_csv("drop_location/train_loan data.csv") print (loan_data)
nilq/baby-python
python
import tensorflow_hub as hub import tensorflow as tf import numpy as np def predict_image(im): model = tf.keras.models.load_model('CNN_model.h5', custom_objects={'KerasLayer': hub.KerasLayer}) im = np.asarray(im) image = tf.image.resize(im, (256, 256)) img = image/255.0 image = tf.expand_dims(img, axis=0) preds = model.predict(image) probs, class_idx = tf.math.top_k(preds, k=1) class_names = ['Tomato___Bacterial_spot','Tomato___Early_blight' ,'Tomato___Late_blight','Tomato___Leaf_Mold' ,'Tomato___Septoria_leaf_spot' ,'Tomato___Spider_mites Two-spotted_spider_mite' ,'Tomato___Target_Spot' ,'Tomato___Tomato_Yellow_Leaf_Curl_Virus' ,'Tomato___Tomato_mosaic_virus' ,'Tomato___healthy'] classes=[] for i in class_idx.numpy()[0]: classes.append(class_names[i]) return classes[0]
nilq/baby-python
python
#!/usr/bin/env python3 import os, sys, json import numpy as np import pandas as pd import functools as fct import collections as cols from alignclf import create_clf_data if __name__ == '__main__': result_dnames = [ 'clst-2018-12-generic_50-inc0-net1', 'clst-2018-12-generic_50-inc0-net2', # 'clst-2018-12-generic_50-inc0-net3', # 'clst-2018-12-generic_50-inc0-net4', # 'clst-2018-12-generic_50-inc0-net5', 'clst-2018-12-sese_25-inc0-net1', 'clst-2018-12-sese_25-inc0-net2', # 'clst-2018-12-sese_25-inc0-net3', # 'clst-2018-12-sese_25-inc0-net4', # 'clst-2018-12-sese_25-inc0-net5' ] # find out the subset of logs for result_dname in result_dnames: result_dir = os.path.join('.', 'results-agg', result_dname) print('Processing {}'.format(result_dname)) model_log_sets = [] dir_map = dict() for d in os.listdir(result_dir): dirpath = os.path.join(result_dir, d) if not os.path.isdir(dirpath): continue model_log_set = set() for replay_d in os.listdir(dirpath): replay_dirpath = os.path.join(dirpath, replay_d) if not os.path.isdir(replay_dirpath): continue configs_fp = os.path.join(replay_dirpath, 'configs.json') with open(configs_fp) as f: configs_dict = json.load(f) log = configs_dict['log'] model = configs_dict['model'] if 'recomposeStrategy' in configs_dict: algo_type = 'recomp' + '-' + configs_dict['algorithmType'] else: algo_type = configs_dict['algorithmType'] if model not in dir_map: dir_map[model] = cols.defaultdict(list) dir_map[model][log].append((algo_type, replay_dirpath)) model_log_set.add((model, log)) model_log_sets.append(model_log_set) model_logs = list(fct.reduce(lambda s1, s2: s1.intersection(s2), model_log_sets)) model_log_dict = cols.defaultdict(list) for model, log in model_logs: model_log_dict[model].append(log) # print('Model and logs: {}'.format(model_logs)) # print('Model log set: {}'.format(model_log_sets)) clf_df_list = list() for model, logs in model_log_dict.items(): if not logs: continue for log in logs: result_df_dict = dict() for algo_type, dirpath in dir_map[model][log]: is_mono = 'recomp' not in algo_type # print('algo_type: {}'.format(algo_type)) if is_mono: result_fp = os.path.join(dirpath, 'trace-stats-enriched.csv') result_df = pd.read_csv(result_fp) result_df[create_clf_data.RESULT_DIR] = dirpath result_df = create_clf_data.process_df(result_df) else: result_fp = os.path.join(dirpath, 'trace-stats.csv') result_df = pd.read_csv(result_fp) result_df[create_clf_data.RESULT_DIR] = dirpath result_df = create_clf_data.process_recomposing_df(result_df) result_df_dict[algo_type] = result_df clf_df = create_clf_data.to_clf_df(result_df_dict) columns = list(clf_df.columns) clf_df['model'] = model clf_df['log'] = log columns = [('model', ''), ('log', '')] + columns clf_df = clf_df[columns] clf_df_list.append(clf_df) clf_df = pd.concat(clf_df_list, axis=0) out_fp = os.path.join(result_dir, '{}-predictive-output.csv'.format(result_dname)) clf_df.to_csv(out_fp, index=False)
nilq/baby-python
python
from setuptools import setup from setuptools import find_packages version = '0.0.1' classifiers = """ Development Status :: 3 - Alpha Intended Audience :: Developers Operating System :: OS Independent Programming Language :: JavaScript Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 """.strip().splitlines() setup( name='dataportal_pmr_services', version=version, description='Services of PMR', long_description=open('README.md').read(), classifiers=classifiers, keywords='', author='Auckland Bioengineering Institute', url='https://github.com/alan-wu/dataportal_pmr_services', packages=find_packages('src', exclude=['ez_setup']), package_dir={'': 'src'}, namespace_packages=['dataportal_map'], zip_safe=False, install_requires=[ 'setuptools>=12', 'requests', 'pmr2.client @ https://api.github.com/repos/alan-wu/pmr2.client/tarball/scaffold', 'pmr2.wfctrl @ https://api.github.com/repos/PMR2/pmr2.wfctrl/tarball/master', ], include_package_data=True, python_requires='>=3.5', # test_suite="", )
nilq/baby-python
python
#====creating a function for insertion sort========== def insertion_sort(list1): #===outer loop================ for i in range(1, len(list1)): value = list1[i] j = i-1 while j >= 0 and value < list1[j]: list1[j+1] = list1[j] j -= 1 list1[j+1] = value return list1 #=====drive code=========== list1 = [10, 7, 5, 4, 15] print("The unsorted list", list1) print("The sorted list1 is", insertion_sort(list1))
nilq/baby-python
python
import pandas as pd import requests url = 'http://localhost:9696/predict' sample_data_points = [ {'timestamp': '2016-12-22 08:00:00', 't1': 5.0, 't2': 2.0, 'hum': 100.0, 'wind_speed': 13.0, 'weather_code': 4, 'is_holiday': 0, 'is_weekend': 0, 'season': 3}, # actual=2510 {'timestamp': '2016-08-11 15:00:00', 't1': 22.5, 't2': 22.5, 'hum': 51.5, 'wind_speed': 22.0, 'weather_code': 2, 'is_holiday': 0, 'is_weekend': 0, 'season': 1}, # actual=1862 {'timestamp': '2016-12-30 10:00:00', 't1': 4.0, 't2': 1.5, 'hum': 100.0, 'wind_speed': 10.0, 'weather_code': 4, 'is_holiday': 0, 'is_weekend': 0, 'season': 3}, # actual=601 {'timestamp': '2016-12-07 06:00:00', 't1': 10.5, 't2': 10.0, 'hum': 94.0, 'wind_speed': 12.0, 'weather_code': 3, 'is_holiday': 0, 'is_weekend': 0, 'season': 3}, # actual=592 {'timestamp': '2016-11-22 22:00:00', 't1': 8.5, 't2': 7.5, 'hum': 87.0, 'wind_speed': 8.0, 'weather_code': 7, 'is_holiday': 0, 'is_weekend': 0, 'season': 2}, # actual=571 {'timestamp': '2016-12-25 23:00:00', 't1': 13.0, 't2': 13.0, 'hum': 79.5, 'wind_speed': 28.0, 'weather_code': 4, 'is_holiday': 0, 'is_weekend': 1, 'season': 3}, # actual=662 {'timestamp': '2016-12-28 20:00:00', 't1': 3.5, 't2': 1.5, 'hum': 96.5, 'wind_speed': 7.0, 'weather_code': 1, 'is_holiday': 0, 'is_weekend': 0, 'season': 3}, # acutal=414 {'timestamp': '2016-12-26 08:00:00', 't1': 8.0, 't2': 5.0, 'hum': 82.0, 'wind_speed': 22.0, 'weather_code': 1, 'is_holiday': 1, 'is_weekend': 0, 'season': 3}, # actual=263 ] details = sample_data_points[3] prediction = requests.post(url,json=details).json() print(f"input data: {details}") print(f"predicted bike shares: {prediction}")
nilq/baby-python
python
import random from vprasanja import slovar, vprasanja_multiple_izbire, riziki #====================================================================================== #Definicija konstant #====================================================================================== STEVILO_DOVOLJENIH_NAPAK = 5 STEVILO_PRAVILNIH = 9 STEVILO_KVIZ_MULTIPLE = 4 STEVILO_KVIZ_RIZIKI = 8 PRAVILEN_ODGOVOR = "+" NI_ODGOVORA = "0" NAPACEN_ODGOVOR = "-" ZMAGA = "W" PORAZ = "X" ZACETEK = "S" KVIZ_MULTIPLE = "M" KVIZ_RIZIKI = "R" #============================================================================================= #Razred Igra #============================================================================================= class Igra: def __init__(self, st_vprasanj): self.trenutno_vprasanje_idx = 0 self.pravilni_odgovori = 0 self.vprasanja_mul = random.sample(list(vprasanja_multiple_izbire), st_vprasanj) #[1, 2,...] self.vprasanja = random.sample(list(slovar), st_vprasanj) #[5, 7, ...] self.vprasanja_riziki = random.sample(list(riziki), 1) #želim da bo na eno igro samo en video (vrne npr [1]) def trenutno_vprasanje(self): if self.pravilni_odgovori >= STEVILO_KVIZ_RIZIKI: # želim da izpiše vseh 5 (oz 4) vprašanja vpr_2 = int(self.vprasanja_riziki[0]) # vrne npr 1 return riziki.get(vpr_2) # vrne {"tip": "tip_2", "vprasanje": [{'vpr':'', 'odg': [odg]}, {:[]}, ], "mozni_odg": [], "video": "https"} if self.pravilni_odgovori in range(STEVILO_KVIZ_MULTIPLE, STEVILO_KVIZ_RIZIKI): vpr_1 = self.vprasanja_mul[self.trenutno_vprasanje_idx] #vrne npr 18 return vprasanja_multiple_izbire.get(vpr_1) #{'tip': 'tip_1', 'vprasanje': 'Koliko je vredna težina na sliki 18?', 'odgovor': '0.4', 'mozni_odg': [0.4, 0.5, 0.6], 'slika': 'http'} else: vpr_0 = self.vprasanja[self.trenutno_vprasanje_idx] #vrne npr 4 return slovar.get(vpr_0) #{'tip': 'tip_0', 'vprasanje': '?', 'primer_odg':'', 'odgovor': ''} def stevilo_napacnih(self): return self.trenutno_vprasanje_idx - self.pravilni_odgovori def stevilo_pravilnih(self): return self.pravilni_odgovori def tip_2(self): return self.pravilni_odgovori == STEVILO_KVIZ_RIZIKI def tip_1(self): return self.pravilni_odgovori in range(STEVILO_KVIZ_MULTIPLE, STEVILO_KVIZ_RIZIKI) def zmaga(self): return self.pravilni_odgovori == STEVILO_PRAVILNIH def poraz(self): return self.stevilo_napacnih() > STEVILO_DOVOLJENIH_NAPAK def enakost_odgovorov(self, odgovor): if self.pravilni_odgovori >= STEVILO_KVIZ_RIZIKI: seznam_vpr = self.trenutno_vprasanje().get('vprasanje') # [{'vpr':'','odg':[]}, {vpr:odg}, ...] pravilen_odgovor = [] for slovar_vpr in seznam_vpr: for odg in slovar_vpr.get('odg'): pravilen_odgovor.append(odg) self.trenutno_vprasanje_idx += 1 #iz serverja odgovori: [('odgovor_0', '2 rotaciji'), ('odgovor_1', '3 rotacije')] samo_odgovori = [] for polje, vrednost in odgovor: samo_odgovori.append(vrednost) #['2 rotaciji', '3 rotacije'] #za preverjanje #print('test_2') #print(samo_odgovori) #print('test_3') #print(pravilen_odgovor) return samo_odgovori == pravilen_odgovor if self.pravilni_odgovori in range(STEVILO_KVIZ_MULTIPLE, STEVILO_KVIZ_RIZIKI): pravilen_odgovor = self.trenutno_vprasanje().get("odgovor") # vrne npr 0.4 self.trenutno_vprasanje_idx += 1 return odgovor == pravilen_odgovor #vrne True else: pravilen_odgovor = self.trenutno_vprasanje().get("odgovor") # vrne list self.trenutno_vprasanje_idx += 1 return any(x.upper().replace(" ","") == odgovor.upper().replace(" ","") for x in pravilen_odgovor) #odgovorom, ki pridejo iz serverja ostranim space in jih dam v velike črke, #to naredim še za odgovore iz slovarja, če bo kdo slučajno kdaj dodajal vprašanja def ugibaj(self, odgovor): if odgovor == "": return NI_ODGOVORA #vrne "0" if self.enakost_odgovorov(odgovor) == True: self.pravilni_odgovori += 1 if self.tip_2(): return KVIZ_RIZIKI elif self.tip_1(): return KVIZ_MULTIPLE if self.zmaga(): return ZMAGA return PRAVILEN_ODGOVOR else: if self.poraz(): return PORAZ return NAPACEN_ODGOVOR #=========================================================================================== #Funkcija, ki vrne novo igro. #=========================================================================================== def nova_igra(): return Igra(STEVILO_PRAVILNIH + STEVILO_DOVOLJENIH_NAPAK) # STEVILO_PRAVILNIH + STEVILO_DOVOLJENIH_NAPAK ne sme biti večje od št vprašanj v slovarjih #================================================================================================ #Razred Kviz #================================================================================================ class Kviz: def __init__(self): self.igre = {} def prost_id_igre(self): if self.igre == {}: return 0 else: return max(self.igre.keys()) + 1 #dict_keys([1, 2]), max vrne 2, prost_id_igre vrne 3 def nova_igra(self): igra = nova_igra() #Igra(st_vprasanj) id_igre = self.prost_id_igre() self.igre[id_igre] = (igra, ZACETEK) #igre[id_igre] vrne vrednosti pri tem ključu return id_igre def ugibaj(self, id_igre, odgovor): igra = self.igre[id_igre][0] stanje = igra.ugibaj(odgovor) self.igre[id_igre] = (igra, stanje) # stanje "R", "M", "W", "X" in "0", "-", "+"
nilq/baby-python
python
from .p2pnet import build # build the P2PNet model # set training to 'True' during training def build_model(args, training=False): return build(args, training)
nilq/baby-python
python
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import abc import json import os import random from collections import OrderedDict from pprint import pformat import pydash as ps import torch import torch.nn as nn import numpy as np import tensorflow as tf from ray.tune.trial import Trial, json_to_resources # -------------------- Seed:Global -------------------- # def set_global_seeds(seed): random.seed(seed) np.random.seed(seed) tf.set_random_seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # -------------------- Info:Describe -------------------- # def get_cls_name(obj, lower=False): r""" Get the class name of an object """ class_name = obj.__class__.__name__ if lower: class_name = class_name.lower() return class_name def get_cls_attr(obj): r""" Get the class attr of an object as dict """ attr_dict = {} for k, v in obj.__dict__.items(): if hasattr(v, '__dict__'): val = str(v) else: val = v attr_dict[k] = val return attr_dict def describe(cls): desc_list = [f'{get_cls_name(cls)}:'] for k, v in get_cls_attr(cls).items(): if k == 'config': continue elif ps.is_dict(v) or ps.is_dict(ps.head(v)): desc_v = pformat(v) else: desc_v = v desc_list.append(f'- {k} = {desc_v}') # \t| type -> {type(desc_v)}') desc = '\n'.join(desc_list) return desc # -------------------- Parser:Create -------------------- # def make_parser(parser_creator=None, **kwargs): """Returns a base argument parser for the ray.tune tool. Args: parser_creator: A constructor for the parser class. kwargs: Non-positional args to be passed into the parser class constructor. """ if parser_creator: parser = parser_creator(**kwargs) else: parser = argparse.ArgumentParser(**kwargs) # Note: keep this in sync with rllib/train.py parser.add_argument( "--run", default=None, type=str, help="The algorithm or model to train. This may refer to the name " "of a built-on algorithm (e.g. RLLib's DQN or PPO), or a " "user-defined trainable function or class registered in the " "tune registry.") parser.add_argument( "--stop", default="{}", type=json.loads, help="The stopping criteria, specified in JSON. The keys may be any " "field returned by 'train()' e.g. " "'{\"time_total_s\": 600, \"training_iteration\": 100000}' to stop " "after 600 seconds or 100k iterations, whichever is reached first.") parser.add_argument( "--config", default="{}", type=json.loads, help="Algorithm-specific configuration (e.g. env, hyperparams), " "specified in JSON.") parser.add_argument( "--resources-per-trial", default=None, type=json_to_resources, help="Override the machine resources to allocate per trial, e.g. " "'{\"cpu\": 64, \"gpu\": 8}'. Note that GPUs will not be assigned " "unless you specify them here. For RLlib, you probably want to " "leave this alone and use RLlib configs to control parallelism.") parser.add_argument( "--num-samples", default=1, type=int, help="Number of times to repeat each trial.") parser.add_argument( "--checkpoint-freq", default=0, type=int, help="How many training iterations between checkpoints. " "A value of 0 (default) disables checkpointing.") parser.add_argument( "--checkpoint-at-end", action="store_true", help="Whether to checkpoint at the end of the experiment. " "Default is False.") parser.add_argument( "--keep-checkpoints-num", default=None, type=int, help="Number of last checkpoints to keep. Others get " "deleted. Default (None) keeps all checkpoints.") parser.add_argument( "--checkpoint-score-attr", default="training_iteration", type=str, help="Specifies by which attribute to rank the best checkpoint. " "Default is increasing order. If attribute starts with min- it " "will rank attribute in decreasing order. Example: " "min-validation_loss") parser.add_argument( "--export-formats", default=None, help="List of formats that exported at the end of the experiment. " "Default is None. For RLlib, 'checkpoint' and 'model' are " "supported for TensorFlow policy graphs.") parser.add_argument( "--max-failures", default=3, type=int, help="Try to recover a trial from its last checkpoint at least this " "many times. Only applies if checkpointing is enabled.") parser.add_argument( "--scheduler", default="FIFO", type=str, help="FIFO (default), MedianStopping, AsyncHyperBand, " "HyperBand, or HyperOpt.") parser.add_argument( "--scheduler-config", default="{}", type=json.loads, help="Config options to pass to the scheduler.") # Note: this currently only makes sense when running a single trial parser.add_argument( "--restore", default=None, type=str, help="If specified, restore from this checkpoint.") return parser # -------------------- Parser:Convert -------------------- # class DotDict(dict): """ Dictionary to access attributes """ __getattr__ = dict.get __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__
nilq/baby-python
python
import re import geopandas as gpd from ...tessellation import tilers import shapely import pytest poly = [[[116.1440758191, 39.8846396072], [116.3449987678, 39.8846396072], [116.3449987678, 40.0430521004], [116.1440758191, 40.0430521004], [116.1440758191, 39.8846396072]]] geom = [shapely.geometry.Polygon(p) for p in poly] bbox = gpd.GeoDataFrame(geometry=geom, crs="EPSG:4326") @pytest.mark.parametrize('tiler_type', ["squared", "h3_tessellation"]) @pytest.mark.parametrize('base_shape', ['Beijing, China', bbox]) @pytest.mark.parametrize('meters', [15000]) def test_tiler_get(tiler_type, base_shape, meters): tessellation = tilers.tiler.get(tiler_type, base_shape=base_shape, meters=meters) assert isinstance(tessellation, gpd.GeoDataFrame) # Arrange @pytest.fixture() def h3_tess(): return tilers.H3TessellationTiler() @pytest.mark.parametrize("input_meters, expected_res", [(500, 8), (1500, 7), (5000, 6)]) def test__meters_to_res(h3_tess, input_meters, expected_res): assert h3_tess._meters_to_res(input_meters) == expected_res def test__get_appropriate_res(h3_tess): assert h3_tess._get_appropriate_res(bbox, 5000) == 8 # test UserWarning is triggered for input hexs # that are larger than the base_shape def test_warning(h3_tess): with pytest.warns(UserWarning) as uws: pattern=r".*Try something smaller.*" h3_tess._get_appropriate_res(bbox, 50000) # check that 2 warnings were raised assert len(uws) == 2 # check that the message matches assert re.match(pattern, uws[1].message.args[0])
nilq/baby-python
python