content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
[ { 'inputs': ['formula'], 'output': 'Property Band gap' }, { 'inputs': ['formula', 'Temperature (Property Band gap)'], 'output': 'Property Band gap' }, { 'inputs': ['formula'], 'output': 'Property Color' },{ 'inputs': ['formula', 'Property Band gap'], 'output': 'Property Color' }, ]
nilq/baby-python
python
#-*- encoding:utf-8 -*- import json import unittest import responses try: from unittest import mock except: import mock from nta import ( NaverTalkApi ) from nta.models import( CompositeContent, Composite, ElementData, ElementList, ButtonText, ButtonLink, ButtonCalendar, QuickReply ) class TestNaverTalkAPI(unittest.TestCase): def setUp(self): self.tested = NaverTalkApi('test_naver_talk_access_token') @responses.activate def test_send_composite(self): responses.add( responses.POST, NaverTalkApi.DEFAULT_API_ENDPOINT, json={ "success": True, "resultCode": "00" }, status=200 ) counter = mock.MagicMock() def test_callback(res, payload): self.assertEqual(res.result_code, "00") self.assertEqual(res.success, True) self.assertEqual( payload.as_json_dict(), { 'event': 'send', 'user': 'test_user_id', 'compositeContent': { 'compositeList': [ { 'title': 'test_title', 'description': 'test_descript', 'image': { 'imageUrl': 'test_image' }, 'elementList':{ 'type': 'LIST', 'data': [ { 'title': 'test_ed_title', 'description': 'test_ed_descript', 'subDescription': 'test_ed_subdescript', 'image': { 'imageUrl': 'test_ed_image' }, 'button':{ 'type': 'TEXT', 'data': { 'title': 'test' } } } ] }, 'buttonList': None } ] }, 'options': { 'notification': False } } ) counter() self.tested.send( 'test_user_id', message=CompositeContent( composite_list=[ Composite( title='test_title', description='test_descript', image='test_image', element_list=ElementList([ ElementData( title='test_ed_title', description='test_ed_descript', sub_description='test_ed_subdescript', image='test_ed_image', button=ButtonText('test') ) ]) ) ] ), callback=test_callback ) self.assertEqual(counter.call_count, 1) @responses.activate def test_send_composite_with_quick_reply(self): responses.add( responses.POST, NaverTalkApi.DEFAULT_API_ENDPOINT, json={ "success": True, "resultCode": "00" }, status=200 ) counter = mock.MagicMock() def test_callback(res, payload): self.assertEqual(res.result_code, "00") self.assertEqual(res.success, True) self.assertEqual( payload.as_json_dict(), { 'event': 'send', 'user': 'test_user_id', 'compositeContent': { 'compositeList': [ { 'title': 'test_title', 'description': None, 'elementList': None, 'buttonList': None } ], 'quickReply': { 'buttonList': [{ 'data': { 'code': 'PAYLOAD', 'title': 'text'}, 'type': 'TEXT'}, { 'data': { 'mobileUrl': None, 'title': 'text', 'url': 'PAYLOAD'}, 'type': 'LINK'}]} }, 'options': { 'notification': False } } ) counter() self.tested.send( 'test_user_id', message=CompositeContent( composite_list=[ Composite( title='test_title' ) ] ), quick_reply=QuickReply( [ {'type': 'TEXT', 'title': 'text', 'value': 'PAYLOAD'}, {'type': 'LINK', 'title': 'text', 'value': 'PAYLOAD'} ] ), callback=test_callback ) self.assertEqual(counter.call_count, 1) self.tested.send( 'test_user_id', message=CompositeContent( composite_list=[ Composite( title='test_title' ) ], quick_reply=[ ButtonText('text', 'PAYLOAD'), ButtonLink('text', 'PAYLOAD') ] ), callback=test_callback ) self.assertEqual(counter.call_count, 2) @responses.activate def test_composite_with_calendar(self): responses.add( responses.POST, NaverTalkApi.DEFAULT_API_ENDPOINT, json={ "success": True, "resultCode": "00" }, status=200 ) counter = mock.MagicMock() def test_callback(res, payload): target = { "event": "send", "user": "test_user_id", "compositeContent": { "compositeList": [ { "title": "톡톡 레스토랑", "description": "파스타가 맛있는집", 'elementList': None, "buttonList": [ { "type": "CALENDAR", "data": { "title": "방문 날짜 선택하기", "code": "code_for_your_bot", "options": { "calendar": { "placeholder": "방문 날짜를 선택해주세요.", "start": "20180301", "end": "20180430", "disables": "1,20180309,20180315-20180316" } } } } ] } ] }, 'options': { 'notification': False } } self.assertEqual(target, payload.as_json_dict()) counter() self.tested.send( "test_user_id", message=CompositeContent( composite_list=[ Composite( title= "톡톡 레스토랑", description="파스타가 맛있는집", button_list=[ ButtonCalendar( title="방문 날짜 선택하기", code="code_for_your_bot", placeholder="방문 날짜를 선택해주세요.", start="20180301", end="20180430", disables="1,20180309,20180315-20180316" ) ] ) ] ), callback=test_callback ) self.assertEqual(counter.call_count, 1)
nilq/baby-python
python
from flask import Flask, request, Response import requests, json app = Flask(__name__) @app.route('/webhook', methods=["POST"]) def webhook(): print("Request received!") print(request.json); return relay(request.json) def relay(data): print("Relaying Request with data :" + json.dumps(data)) response = requests.post('REPLACEWITHDISCORDWEBHOOKURL_KEEPITSECRET', json=data) print(response.status_code) print(response.text) return Response(status=response.status_code)
nilq/baby-python
python
import unittest.mock as mock from mtsync.action import ActionKind from mtsync.connection import Connection from mtsync.imagined import Imagined from mtsync.settings import Settings from mtsync.synchronizer import Synchronizer from rich.console import Console from testslide import StrictMock from testslide.dsl import context @context def SynchronizerTest(context): @context.before async def prepare(self): self.console = Console() self.settings = Settings() self.connection = StrictMock(template=Connection) self.synchronizer = Synchronizer( console=self.console, connection=self.connection, ) @context.sub_context def score(context): @context.sub_context def test_equality(context): @context.example async def simple(self): self.assertTrue( Synchronizer._test_equality( a={ ".id": "1", "field-a": "a", "field-b": "b", }, b={ ".id": "1", "field-a": "a", "field-b": "b", }, ) ) self.assertFalse( Synchronizer._test_equality( a={ ".id": "1", "field-a": "a", "field-b": "b", }, b={ ".id": "1", "field-a": "a", "field-b": "bbb", }, ) ) @context.example async def without_id(self): self.assertTrue( Synchronizer._test_equality( a={ ".id": "1", "field-a": "a", "field-b": "b", }, b={ "field-a": "a", "field-b": "b", }, ) ) @context.sub_context def analyze(context): @context.sub_context def triage(context): @context.before async def prepare(self): self.m_analyze_list = mock.patch.object( self.synchronizer, "_analyze_list" ).__enter__() self.m_analyze_dict = mock.patch.object( self.synchronizer, "_analyze_dict" ).__enter__() @context.sub_context def empty(context): @context.example async def dict(self): self.assertEqual( await self.synchronizer._analyze(current_path="", tree={}), [], ) self.m_analyze_list.assert_not_called() self.m_analyze_dict.assert_not_called() @context.example async def list(self): with self.assertRaises(Exception): await self.synchronizer._analyze(current_path="", tree=[]) self.m_analyze_list.assert_not_called() self.m_analyze_dict.assert_not_called() @context.example async def none(self): self.assertEqual( await self.synchronizer._analyze(current_path="", tree=None), [], ) self.m_analyze_list.assert_not_called() self.m_analyze_dict.assert_not_called() @context.sub_context def list(context): @context.example async def simple(self): inner_list = [ { "interface": "bridge", "address": "2010::7/64", "disabled": "false", }, { "interface": "bridge", "address": "2010::1/64", "disabled": "false", "comment": "Hello worldd!", }, ] await self.synchronizer._analyze( current_path="", tree={ "ipv6": { "address": inner_list, } }, ) self.m_analyze_list.assert_called_with( current_path="/ipv6/address", analyzed_list=inner_list, ) @context.sub_context def dict(context): @context.example async def simple(self): inner_dict = {"rp-filter": "no"} await self.synchronizer._analyze( current_path="", tree={ "ip": { "settings": inner_dict, } }, ) self.m_analyze_dict.assert_called_with( current_path="/ip/settings", analyzed_dict=inner_dict, ) @context.sub_context def dict(context): @context.example async def simple(self): desired_dict = { "rp-filter": "no", "other-setting": "no", } self.mock_async_callable(self.connection, "get").to_return_value( { "rp-filter": "yes", "other-setting": "no", } ).and_assert_called_once() response = await self.synchronizer._analyze_dict( current_path="/ip/settings", analyzed_dict=desired_dict, ) self.assertEqual(len(response), 1) self.assertEqual(response[0].set_dict["rp-filter"], "no") self.assertEqual(response[0].set_dict["other-setting"], "no") @context.example async def no_op(self): desired_dict = { "rp-filter": "no", "other-setting": "no", } self.mock_async_callable(self.connection, "get").to_return_value( { "rp-filter": "no", "other-setting": "no", } ).and_assert_called_once() response = await self.synchronizer._analyze_dict( current_path="/ip/settings", analyzed_dict=desired_dict, ) self.assertEqual(len(response), 0) @context.sub_context def list(context): @context.sub_context def triage(context): pass # @TODO @context.sub_context def add_remove(context): pass # @TODO @context.sub_context def reorder(context): @context.example async def simple(self): actions = await self.synchronizer._analyze_list_reorder( current_path="/ip/example", imagined_items=Imagined( initial_state=[ {"field": "value2", ".id": "1"}, {"field": "value3", ".id": "2"}, {"field": "value1", ".id": "3"}, ] ), desired_items=[ {"field": "value1"}, {"field": "value2"}, {"field": "value3"}, ], ) self.assertEqual(len(actions), 1, f"Got actions: {actions}") action = actions[0] self.assertEqual(action.kind, ActionKind.POST) self.assertEqual(action.path, "/ip/example/move") self.assertEqual( action.set_dict, { "numbers": "3", "destination": "1", }, ) @context.example async def same(self): actions = await self.synchronizer._analyze_list_reorder( current_path="/ip/example", imagined_items=Imagined( initial_state=[ {"field": "value", ".id": "1"}, {"field": "value", ".id": "2"}, ] ), desired_items=[ {"field": "value"}, {"field": "value"}, ], ) self.assertEqual(len(actions), 0, f"Got actions: {actions}") @context.example async def long(self): actions = await self.synchronizer._analyze_list_reorder( current_path="/ip/example", imagined_items=Imagined( initial_state=[ {"field": "value2", ".id": "1"}, {"field": "value3", ".id": "2"}, {"field": "value1", ".id": "3"}, {"field": "value5", ".id": "4"}, {"field": "value4", ".id": "5"}, {"field": "value6", ".id": "6"}, ] ), desired_items=[ {"field": "value1"}, {"field": "value2"}, {"field": "value3"}, {"field": "value4"}, {"field": "value5"}, {"field": "value6"}, ], ) self.assertEqual(len(actions), 2) self.assertEqual( actions[0].set_dict, {"numbers": "3", "destination": "1"}, ) self.assertEqual( actions[1].set_dict, {"numbers": "5", "destination": "4"}, )
nilq/baby-python
python
# Generated by Django 2.0.2 on 2018-08-15 16:17 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0040_page_draft_title'), ('pymba', '0006_auto_20180316_1857'), ] operations = [ migrations.CreateModel( name='PymbaIndexPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('introduction', models.TextField(blank=True, help_text='Text to describe the page')), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.AlterField( model_name='pymbafinishingpage', name='color', field=models.CharField(blank=True, help_text='Accepts hex (#ffffff) or HTML color', max_length=250, null=True), ), migrations.AlterField( model_name='pymbafinishingpage', name='image', field=models.ForeignKey(blank=True, help_text='Sets the finishing general appearance', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'), ), migrations.AlterField( model_name='pymbafinishingpage', name='intro', field=models.CharField(blank=True, help_text='Finishing description', max_length=250, null=True), ), migrations.AlterField( model_name='pymbafinishingpage', name='pattern', field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'), ), migrations.AlterField( model_name='pymbafinishingpage', name='skirting_color', field=models.CharField(default='white', help_text='Accepts hex (#ffffff) or HTML color', max_length=250), ), migrations.AlterField( model_name='pymbafinishingpage', name='skirting_height', field=models.CharField(default='0', help_text='Skirting height from in cm', max_length=250), ), migrations.AlterField( model_name='pymbafinishingpage', name='skirting_image', field=models.ForeignKey(blank=True, help_text='Sets the skirting general appearance', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'), ), migrations.AlterField( model_name='pymbafinishingpage', name='skirting_pattern', field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'), ), migrations.AlterField( model_name='pymbafinishingpage', name='tiling_color', field=models.CharField(default='white', help_text='Accepts hex (#ffffff) or HTML color', max_length=250), ), migrations.AlterField( model_name='pymbafinishingpage', name='tiling_height', field=models.CharField(default='0', help_text='Tiling height from floor in cm', max_length=250), ), migrations.AlterField( model_name='pymbafinishingpage', name='tiling_image', field=models.ForeignKey(blank=True, help_text='Sets the tiling general appearance', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'), ), migrations.AlterField( model_name='pymbafinishingpage', name='tiling_pattern', field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'), ), migrations.AlterField( model_name='pymbapage', name='double_face', field=models.BooleanField(default=False, help_text='Planes are visible on both sides?'), ), migrations.AlterField( model_name='pymbapage', name='dxf_file', field=models.ForeignKey(help_text='CAD file of your project', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'), ), migrations.AlterField( model_name='pymbapage', name='equirectangular_image', field=models.ForeignKey(blank=True, help_text='Landscape surrounding your project', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'), ), migrations.AlterField( model_name='pymbapage', name='fly_camera', field=models.BooleanField(default=False, help_text='Vertical movement of camera?'), ), migrations.AlterField( model_name='pymbapage', name='intro', field=models.CharField(blank=True, help_text='Project description', max_length=250, null=True), ), migrations.AlterField( model_name='pymbapage', name='shadows', field=models.BooleanField(default=False, help_text='Want to cast shadows?'), ), migrations.AlterField( model_name='pymbapagematerialimage', name='color', field=models.CharField(default='white', help_text='Accepts hex (#ffffff) or HTML color', max_length=250), ), migrations.AlterField( model_name='pymbapagematerialimage', name='image', field=models.ForeignKey(blank=True, help_text='Sets general appearance of material', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'), ), migrations.AlterField( model_name='pymbapagematerialimage', name='layer', field=models.CharField(default='0', help_text='Layer name in CAD file', max_length=250), ), migrations.AlterField( model_name='pymbapagematerialimage', name='pattern', field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'), ), migrations.AlterField( model_name='pymbapartitionpage', name='color', field=models.CharField(blank=True, help_text='Accepts hex (#ffffff) or HTML color', max_length=250, null=True), ), migrations.AlterField( model_name='pymbapartitionpage', name='image', field=models.ForeignKey(blank=True, help_text='Sets the partition general appearance', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'), ), migrations.AlterField( model_name='pymbapartitionpage', name='intro', field=models.CharField(blank=True, help_text='Partition description', max_length=250, null=True), ), migrations.AlterField( model_name='pymbapartitionpage', name='pattern', field=models.BooleanField(default=False, help_text='Is it a 1x1 meter pattern?'), ), migrations.AlterField( model_name='pymbapartitionpagelayers', name='material', field=models.CharField(default='brick', help_text='Material description', max_length=250), ), migrations.AlterField( model_name='pymbapartitionpagelayers', name='thickness', field=models.CharField(default='0', help_text='In centimeters', max_length=250), ), migrations.AlterField( model_name='pymbapartitionpagelayers', name='weight', field=models.CharField(default='0', help_text='In kilos per cubic meter', max_length=250), ), ]
nilq/baby-python
python
''' This script has functions in it which are used in network which evaluate images. If this script here is run it returns the object_dc-score of each segmented object by the predicition with respect to the groundtruth ''' import os import skimage import scipy import numpy as np import matplotlib.pyplot as plt ##################################### # Plotting functions # ##################################### def plot_img_and_hist(image, axes, bins=256): """Plot an image along with its histogram and cumulative histogram. Source: https://scikit-image.org/docs/stable/auto_examples/color_exposure/plot_equalize.html#sphx-glr-auto-examples-color-exposure-plot-equalize-py """ image = skimage.img_as_float(image) ax_img, ax_hist = axes ax_cdf = ax_hist.twinx() # Display image ax_img.imshow(image, cmap=plt.cm.gray) ax_img.set_axis_off() # Display histogram ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black') ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0)) ax_hist.set_xlabel('Pixel intensity') ax_hist.set_xlim(0, 1) ax_hist.set_yticks([]) # Display cumulative distribution img_cdf, bins = skimage.exposure.cumulative_distribution(image, bins) ax_cdf.plot(bins, img_cdf, 'r') ax_cdf.set_yticks([]) return ax_img, ax_hist, ax_cdf def plot_img_and_segmentations(imgs_dict, names_list, color_list): fig, axs = plt.subplots(1, len(names_list), figsize=(5 * len(names_list),5)) #plt.title('Visualization of data and prediction') for ax, img_name, colormap in zip(axs, names_list, color_list): pic = imgs_dict[img_name] ax.imshow(pic, cmap=colormap) ax.axis('off') ax.set_title(img_name.capitalize()) plt.show() return def plot_img_and_segm_overlayed(img, msks_dict, msk_names_list, color_list, change_bg_color_list): fig, axs = plt.subplots(len(msk_names_list), 1, figsize=(15, 15 * len(msk_names_list))) for ax, msk_name, colormap, change_bg in zip(axs, msk_names_list, color_list, change_bg_color_list): ax.imshow(img) if change_bg: overlay_mask = msks_dict[msk_name] else: overlay_mask = np.ma.masked_array(msks_dict[msk_name], msks_dict[msk_name] == 0) ax.imshow(overlay_mask, colormap, alpha=0.5) ax.axis('off') ax.set_title(msk_name.capitalize()) plt.show() def plot_segmentations_dice(imgs_dict, names_list, label_list): fig, axs = plt.subplots(1, len(names_list), figsize=(len(names_list) * 10, 10)) handles = label_list # plt.title('Visualization of data and prediction') for ax, msk_name, in zip(axs, names_list): pic = imgs_dict[msk_name] ax.imshow(pic * 255) ax.axis('off') subtitle = msk_name + " comparison" ax.set_title(subtitle.capitalize()) ax.legend(handles=handles) plt.show() return #################################### # Metric, Micron extraction # #################################### def dice_coeff_numpy(y_true, y_pred): intersection = np.sum(y_true * y_pred) score = (2 * intersection + 1.) / (y_true.sum() + y_pred.sum() + 1.) return score def get_micron_info(pathtofile, filename): """ Returns the pixel per micron ratio for x and y. Works with .tif images from ImageJ Parameters: ----------- pathtofile: string path of the folder where the file is in filename: string name of the file Returns: -------- (pix mic x, pix mic y) tuple Tuple with the pixel per micron ratio for x and y """ # Load microns unit with skimage.external.tifffile.TiffFile(os.path.join(pathtofile, filename)) as tif: metadata = tif.info() # Find info about pixels per micron x_pos = metadata.find("* 282 x_resolution") y_pos = metadata.find("* 283 y_resolution") pixel_per_micron_x = float(metadata[x_pos + 25: x_pos + 32]) * 0.000001 pixel_per_micron_y = float(metadata[y_pos + 25: y_pos + 32]) * 0.000001 if pixel_per_micron_x != pixel_per_micron_y: print("Error. The resolution in micron in x and y are different. ", "Please check the image. If there is no error in the image, this has to be implemented!", "get_micron_info will return nothing.") return return (pixel_per_micron_x, pixel_per_micron_y) #################################### # Area analyis of images # #################################### def get_zero_area_in_img(image, area_threshold=0.1): """ Finds the sliced away area in an image Parameters: ----------- image: array with shape e.g. (1024, 1024, 3) values in [0,1] area_threshold: float values in [0,1] percentage of zero_area size necessary to define it as cropped_img_area Returns: -------- cropped_img_area: array with same shape as image values: True or False """ # Reduce image to grayscale image grayscale_image = skimage.color.rgb2gray(image) # Set all values which are 0 to 1 in a new array cropped_img_area = np.zeros(grayscale_image.shape) cropped_img_area[grayscale_image == 0] = 1 # Find connected components labelled_image, count_image = scipy.ndimage.label(cropped_img_area) refined_cropped_img_area = cropped_img_area.copy() # Filter out all connected components with size smaller or equal area_threshold for label in range(1, count_image + 1): if len(refined_cropped_img_area[labelled_image == label]) <= area_threshold * cropped_img_area.size: refined_cropped_img_area[labelled_image == label] = 0 # count_refined_mask -= 1 # Return a boolean array final_cropped_img_area = np.array(refined_cropped_img_area > 0) # Debug: if np.max(final_cropped_img_area) > 0: print("zero area in image detected") print("Percentage of cropped area:", np.sum(final_cropped_img_area) / final_cropped_img_area.size) return final_cropped_img_area def get_count_and_area(mask, filter_th, keep_only_largest_label=False, verbose=False): labelled_mask, count_mask = scipy.ndimage.label(mask) # Keep only the biggest connected component if keep_only_largest_label: refined_mask = mask.copy() len_largest_label = 0 id_largest_label = 0 for label in range(1, count_mask + 1): if len(refined_mask[labelled_mask == label]) > len_largest_label: len_largest_label = len(refined_mask[labelled_mask == label]) id_largest_label = label refined_mask[:] = 0 refined_mask[labelled_mask == id_largest_label] = 1 count_mask = 1 if verbose: print(refined_mask.shape, refined_mask.min(), refined_mask.max()) print("Kept only the largest region and set count_mask to 1.") else: # count_refined_mask = count_mask refined_mask = mask.copy() # Filter out all connected components with size smaller or equal filter_th for label in range(1, count_mask + 1): if len(refined_mask[labelled_mask == label]) <= filter_th: refined_mask[labelled_mask == label] = 0 # count_refined_mask -= 1 # refined_mask has to be relabeled now. relabelled_mask, recounted_mask = scipy.ndimage.label(refined_mask) if recounted_mask < count_mask and verbose: print("Removed ", count_mask - recounted_mask, " regions because they are smaller or equal ", filter_th, " pixels.") filtered_mask = np.array(relabelled_mask > 0) return relabelled_mask, recounted_mask, filtered_mask def get_count_and_area_rmv_podo_outside(cfg, mask, filter_mask, filter_th, verbose=False): # Outputs the labelled_mask, the mask_count and the filtered_mask # The mask is labeled, then cropped by the filter_mask # Afterwards, all labels which are contained in the mask are not removed in the labelled_mask labelled_mask, count_mask = scipy.ndimage.label(mask) if cfg.GLOM_POSTPROCESSING_KEEP_ONLY_LARGEST is True: labeled_filter_mask, dataset_filter_mask_count, filtered_filter_mask = get_count_and_area\ (filter_mask, cfg.FILTER_CLASSES[0], keep_only_largest_label=True, verbose=verbose) else: labeled_filter_mask, dataset_filter_mask_count, filtered_filter_mask = get_count_and_area\ (filter_mask, cfg.FILTER_CLASSES[0], verbose=verbose) labelled_mask_copy = labelled_mask.copy() labelled_mask_copy2 = labelled_mask.copy() labelled_mask_copy[filtered_filter_mask == 0] = 0 if verbose: print(labelled_mask_copy.max(), labelled_mask_copy.min()) labels_not_cropped = np.unique(labelled_mask_copy) labels_not_cropped = np.trim_zeros(labels_not_cropped) if verbose: print(labels_not_cropped) final_mask = np.isin(labelled_mask_copy2, labels_not_cropped) if verbose: print(final_mask.max(), final_mask.min()) return get_count_and_area(final_mask, filter_th, verbose=verbose) def image_to_label_image(img): label, count = scipy.ndimage.label(img) return label, count def coregistrate_and_get_object_dc_score(label_pred, count_pred, label_mask, count_mask, verbose=0): def dice_coeff_with_intersect_matrix(matrix, tensor): intersection_matrices = matrix * tensor intersection_sum_array = np.sum(intersection_matrices, axis=(1,2)) score_array = (2 * intersection_sum_array + 1.) / (np.sum(matrix) + np.sum(tensor, axis=(1,2)) + 1.) return score_array, intersection_sum_array def get_true_positives_and_false_negatives_all_cells(): true_positives = [] false_negatives = [] array_dim = label_pred.shape prediction_array = np.empty((count_pred, array_dim[0], array_dim[1])) score_arrays = np.zeros((count_mask, count_pred)) for i in range(count_pred): prediction_array[i,:,:] = np.array([label_pred == i+1]) if verbose: print(prediction_array.shape) print(np.max(prediction_array)) print(np.min(prediction_array)) for k in range(1, count_mask + 1): score_arr, intersection_sum_arr = dice_coeff_with_intersect_matrix(np.array([label_mask == k]), prediction_array) if verbose: print("Intersection array: ") print(intersection_sum_arr) print("Score array: ") print(score_arr) if np.max(intersection_sum_arr) == 0: if verbose: print("cell ", k, " in the groundtruth colocalizes with no cell in the prediction") false_negatives.append((k, 0)) elif np.max(intersection_sum_arr > 0): score_arrays[k-1, :] = score_arr cells_to_process = min(count_mask - len(false_negatives), count_pred) while cells_to_process: i, j = np.unravel_index(score_arrays.argmax(), score_arrays.shape) cell_mask = i + 1 cell_pred = j + 1 if verbose: print("Cells to process: ", cells_to_process) print("cell ", cell_mask, " in groundtruth colocalizes the BEST with cell ", cell_pred, " in the prediction") true_positives.append((cell_mask, cell_pred, np.max(score_arrays))) score_arrays[i, :] = 0 score_arrays[:, j] = 0 cells_to_process -= 1 true_positives.sort() list_tp= [x[0] for x in true_positives] list_mask = list(range(1, count_mask + 1)) for element in false_negatives: list_mask.remove(element[0]) additional_false_negs = list(set(list_mask) - set(list_tp)) additional_false_negs = [(x, 0) for x in additional_false_negs] additional_false_negs.sort() if verbose: print("The cells ", additional_false_negs, " in the groundtruth colocalize with prediction cells that " "match better to other cells. Thus this cells will be counted " "as false negative.") false_negatives = false_negatives + additional_false_negs return true_positives, false_negatives def get_false_positives(tp): list_tp = [x[1] for x in tp] list_pred = list(range(1, count_pred + 1)) false_positives = list(set(list_pred) - set(list_tp)) false_positives = [(0, x) for x in false_positives] false_positives.sort() return false_positives if np.max(label_pred) > 0: # True positives, false negatives tp, fn = get_true_positives_and_false_negatives_all_cells() # False positives fp = get_false_positives(tp) else: print("Warning. label_pred is a zero array. Thus TP = 0, FP = 0.") tp, fp = [], [] fn = [(k, 0) for k in range(1,count_mask+1)] # object_dc-score if len(tp) > 0: object_dc_score = (2 * len(tp)) / (len(fp) + len(fn) + 2 * len(tp)) else: object_dc_score = 0 return object_dc_score, tp, fp, fn def run_script(): import yaml with open("config/parameters_train.yml", 'r') as ymlfile: cfg = yaml.load(ymlfile) path = "/source/" mask = skimage.io.imread(path + 'groundtruth/podocytes/A_mask_podo.tif') pred = skimage.io.imread(path + 'imagej/podocytes/A_mask_podo.tif') label_pred, count_pred = image_to_label_image(pred) label_mask, count_mask = image_to_label_image(mask) print("The pred image has ", count_pred, " cells.") print("The mask image has ", count_mask, " cells.") object_dc, tp, fp, fn = coregistrate_and_get_object_dc_score(label_pred, count_pred, label_mask, count_mask, verbose=1) print("The object_dc-score is: ", object_dc) print("There are ", len(tp), " TP cells: ", tp) print("There are ", len(fp), " FP cells: ", fp) print("There are ", len(fn), " FN cells: ", fn) return if __name__ == '__main__': from config import Config # Uncomment to test object_dv, tp, fp, fn #run_script() # Uncomment to do no testing of Remove podocytes outside glom #""" cfg = Config() # Create a dict containing the masks msks_dict = {} mask_list = cfg.NAMES_CLASSES # Load img and masks path = '/data/test_postprocessing' img = skimage.io.imread(os.path.join(path, 'images', 'A.tif')) mask_glom_name = 'A_mask_glom.tif' mask_podo_name = 'A_mask_podo.tif' mask_glom = skimage.io.imread(os.path.join(path, 'masks', mask_glom_name)) mask_podo = skimage.io.imread(os.path.join(path, 'masks', mask_podo_name)) # Display img and masks msks_dict[mask_list[0]] = mask_glom msks_dict[mask_list[1]] = mask_podo plot_img_and_segm_overlayed(img[:, :, (1,2,0)], msks_dict, mask_list, ['Set1', 'hot'], [False, True]) # Remove podocytes outside glom filter_th = 0 relabelled_mask, recounted_mask, filtered_mask = get_count_and_area_rmv_podo_outside( cfg, mask_podo, mask_glom, filter_th, verbose=False) # Plot img and processed masks msks_dict[mask_list[0]] = mask_glom msks_dict[mask_list[1]] = filtered_mask plot_img_and_segm_overlayed(img[:, :, (1, 2, 0)], msks_dict, mask_list, ['Set1', 'hot'], [False, True])
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Wed Jan 13 06:41:54 2016 @author: piotr at nicecircuits.com """ from libraryManager.library import libraryClass from libraryManager.part import part from footprints.footprintSmdQuad import footprintQfp from footprints.footprintSmdDualRow import footprintTssop from libraryManager.footprintPrimitive import * from libraryManager.defaults import * from symbols.symbolsIC import symbolIC from libraryManager.symbolPrimitive import * from parts.icGenerator import icGenerator import os.path from libraryManager.generateLibraries import generateLibraries class librarySTM32(libraryClass): """ """ def __init__(self): super().__init__("niceSTM32") # ============== STM32 LQFP64 ============== pinNames=[ [None,"14","15","16","17","20","21","22","23","41","42","43","44","45","46","49","50",None,"54"], [None,"26","27","28","55","56","57","58","59","61","62","29","30","33","34","35","36"], ["7","60",None,"1",None,"13",None,"32","64","48","19",None,None,"12",None,"31","63","47","18",], ["8","9","10","11","24","25","37","38","39","40","51","52","53","2","3","4",None,"5","6"] ] footprints = [footprintQfp(64, 0.5, density=density) for density in ["N", "L", "M"]] path=os.path.join(os.path.dirname(__file__),"STM32_LQFP64.ods") #generate symbols configured by pinNames self.parts.extend(icGenerator.generate(path,pinNames,footprints,size=3000)) #generate quad pin-by-pin symbols self.parts.extend(icGenerator.generate(path,pinNames=None,\ footprints=footprints,symbolType="quad",namePosfix="_q",size=3100)) # ============== STM32 TSSOP20 ============== pinNames=[ ["4","1",None,"16","5",None,None,None,None,"2","3",None,"15"], ["6","7","8","9","10","11","12","13","17","18","19","20","14"] ] footprints = [footprintTssop(20, density=density) for density in ["N", "L", "M"]] path=os.path.join(os.path.dirname(__file__),"STM32_TSSOP20.ods") #generate symbols configured by pinNames self.parts.extend(icGenerator.generate(path,pinNames,footprints,size=2000)) # ============== STM32 LQFP48 ============== path=os.path.join(os.path.dirname(__file__),"STM32_LQFP48.ods") self.parts.extend(icGenerator.generate_advanced(path)) # ============== STM32 LQFP32 ============== path=os.path.join(os.path.dirname(__file__),"STM32_LQFP32.ods") self.parts.extend(icGenerator.generate_advanced(path)) if __name__ == "__main__": generateLibraries([librarySTM32()])
nilq/baby-python
python
''' Pattern: Enter the number of rows: 5 A A B A A B C A B A B C D A B C A B C D E A B C D ''' print('Alphabet Pattern: ') number_rows=int(input('Enter number of rows: ')) for row in range(1,number_rows+1): print(' '*(number_rows-row),end=' ') for column in range(1,row+1): print(chr(column+64),end=' ') for column in range(1,row): print(chr(64+column),end=' ') print()
nilq/baby-python
python
disliked_ids = { "items" : [ { "track" : { "album" : { "name" : "W Hotel (feat. Smokepurpp, Blueface)" }, "id" : "3Ap32KanuR59wfKcs9j2pb", "name" : "W Hotel (feat. Smokepurpp, Blueface)" } }, { "track" : { "album" : { "name" : "Me Molesta" }, "id" : "0eVnN0I8WCMGOeID68Dx6n", "name" : "Me Molesta" } }, { "track" : { "album" : { "name" : "Un Trago" }, "id" : "51fKrxgweK5TqUEuzYXswm", "name" : "Un Trago" } }, { "track" : { "album" : { "name" : "Big Tales" }, "id" : "2Os3uq6WxoQmhdUJkbToIq", "name" : "Orange Evening" } }, { "track" : { "album" : { "name" : "Good Intentions" }, "id" : "2clQy4kJpJeypgc365VW4H", "name" : "She Hurtin" } }, { "track" : { "album" : { "name" : "BACK TO ROCKPORT" }, "id" : "7b9kVs2a1ljoTqZp6TRezW", "name" : "VAMONOS" } }, { "track" : { "album" : { "name" : "Ya Supérame (En Vivo)" }, "id" : "6HIIuuUIEzH1meVdGbMXyf", "name" : "Ya Supérame (En Vivo)" } }, { "track" : { "album" : { "name" : "DEV, Vol. 2" }, "id" : "4H3t8C59tXjKf1R2iKtc5M", "name" : "Ya Acabo" } }, { "track" : { "album" : { "name" : "Cruisin' with Junior H" }, "id" : "3pQYf90V5idOGrXzosm9rt", "name" : "Se Amerita" } }, { "track" : { "album" : { "name" : "Loco" }, "id" : "6PDlkWmrq2ZKiUuFt2aQsH", "name" : "Loco" } }, { "track" : { "album" : { "name" : "Dueles Tan Bien" }, "id" : "75ncCwXqalTnnl6t1ruQRq", "name" : "Dueles Tan Bien" } }, { "track" : { "album" : { "name" : "Dr. Feelgood" }, "id" : "7GonnnalI2s19OCQO1J7Tf", "name" : "Kickstart My Heart" } }, { "track" : { "album" : { "name" : "Everything to Lose" }, "id" : "1QD631AEGHmUoP6qxF5wb8", "name" : "Everything to Lose - Single Edit" } }, { "track" : { "album" : { "name" : "Reach Out / Olympus" }, "id" : "5hVKXeJg1R9qGbrGW5eHNl", "name" : "Reach Out" } }, { "track" : { "album" : { "name" : "Terrace Rain/Grid Search" }, "id" : "3IneYkIxkwFCdb68hICqWA", "name" : "Terrace Rain" } }, { "track" : { "album" : { "name" : "Campfire" }, "id" : "41ME5dAx2Qe1pfZ0ypuCBu", "name" : "Campfire" } }, { "track" : { "album" : { "name" : "Te Odio" }, "id" : "7sUA9Z7am1vHV7BGwNB8h8", "name" : "Te Odio" } }, { "track" : { "album" : { "name" : "Índigo" }, "id" : "4knc1Fp3kbuq8bH2byOvLu", "name" : "Índigo" } }, { "track" : { "album" : { "name" : "Sukutubla" }, "id" : "0ue1fotUAGcDwl3XWoaxxM", "name" : "Sukutubla" } }, { "track" : { "album" : { "name" : "Mi Canción" }, "id" : "53OGZ25nljLVQAVYaDw0r5", "name" : "De Noche" } }, { "track" : { "album" : { "name" : "Mi Canción" }, "id" : "6inmGRvhbtgrBoWYxau3wU", "name" : "Ser Yo" } }, { "track" : { "album" : { "name" : "A Dream I Have" }, "id" : "04IEe7T9LrB5tnrydpSHFg", "name" : "A Dream I Have" } }, { "track" : { "album" : { "name" : "Quince Mil Días" }, "id" : "70vhN2B10N0pLcUIe2bARB", "name" : "Quince Mil Días" } }, { "track" : { "album" : { "name" : "Santé" }, "id" : "3vXnuFnC5RhPGwsFi0ORcI", "name" : "Santé" } }, { "track" : { "album" : { "name" : "Canciones Mamalonas 2" }, "id" : "1nvygjj05E6AK7qR44AP8i", "name" : "Siempre Es Lo Mismo" } }, { "track" : { "album" : { "name" : "Préndete Un Blunt (feat. Zimple) [Remix]" }, "id" : "1Fjuba2hK1V3IRFHAqFyX6", "name" : "Préndete Un Blunt (feat. Zimple) - Remix" } }, { "track" : { "album" : { "name" : "Led Zeppelin II (1994 Remaster)" }, "id" : "0hCB0YR03f6AmQaHbwWDe8", "name" : "Whole Lotta Love - 1990 Remaster" } }, { "track" : { "album" : { "name" : "A Town Called Paradise" }, "id" : "5L2l7mI8J1USMzhsmdjat9", "name" : "Red Lights" } }, { "track" : { "album" : { "name" : "End Of The World" }, "id" : "25tekS8txsCQov85px1xm2", "name" : "End Of The World" } }, { "track" : { "album" : { "name" : "Punk" }, "id" : "0AkI0KKi2cSfIKGyMMu7iZ", "name" : "Peepin Out The Window (with Future & Bslime)" } }, { "track" : { "album" : { "name" : "Se Me Olvidó" }, "id" : "7xLYLM5K6S1TwiSdfuhZQg", "name" : "Se Me Olvidó" } }, { "track" : { "album" : { "name" : "DIE FOR MY BITCH" }, "id" : "58k32my5lKofeZRtIvBDg9", "name" : "HONEST" } }, { "track" : { "album" : { "name" : "Se Me Pasó" }, "id" : "7q6uwjL8IQ4cTJplzwdqu6", "name" : "Se Me Pasó" } }, { "track" : { "album" : { "name" : "Mil Vueltas a Este Pantalón" }, "id" : "6GhcDZBtpfIrEZb0Yk0dZY", "name" : "Mil Vueltas a Este Pantalón" } }, { "track" : { "album" : { "name" : "La Sinvergüenza" }, "id" : "1xO7tp4J5Wj0NeKrzIpd1V", "name" : "La Sinvergüenza" } }, { "track" : { "album" : { "name" : "Somebody's Watching Me (Syzz Halloween Remix)" }, "id" : "4dKgJOFyPs5qMTC925ikc3", "name" : "Somebody's Watching Me - Syzz Halloween Remix" } }, { "track" : { "album" : { "name" : "Oohla Oohla" }, "id" : "3g36KmRGI8hmnCcTFak4Wn", "name" : "Oohla Oohla" } }, { "track" : { "album" : { "name" : "Control (feat. Ty Dolla $ign)" }, "id" : "0AUo7RatplZTIoZaRkQWDz", "name" : "Control (feat. Ty Dolla $ign)" } }, { "track" : { "album" : { "name" : "Faces" }, "id" : "40dlJFdqfm8CayhmmS9UB7", "name" : "Here We Go" } }, { "track" : { "album" : { "name" : "Real One" }, "id" : "7hwBuXZkPzNUTNhBQPyTxu", "name" : "Real One" } }, { "track" : { "album" : { "name" : "Lumbre" }, "id" : "59ilCs0OhtM96JNFqWS0yW", "name" : "Lumbre" } }, { "track" : { "album" : { "name" : "Necesitaba Estar Hecho" }, "id" : "7DJnWboNefoXfb7kySFldt", "name" : "Por Ti" } }, { "track" : { "album" : { "name" : "Balenciaga" }, "id" : "6Tcb2f0TY9VgVmJ8qoHzn4", "name" : "Balenciaga" } }, { "track" : { "album" : { "name" : "Endgame" }, "id" : "5sG3G54H21hNfd5etlheoe", "name" : "Satellite" } }, { "track" : { "album" : { "name" : "Vampire Weekend" }, "id" : "5dKBaysNJtfpyNTRa5lqDb", "name" : "A-Punk" } }, { "track" : { "album" : { "name" : "Say Amen for Silver Linings" }, "id" : "4qSsjDGXplb6422X2INvFW", "name" : "Say Amen (Saturday Night)" } }, { "track" : { "album" : { "name" : "Fear Inoculum" }, "id" : "03sEzk1VyrUZSgyhoQR0LZ", "name" : "Pneuma" } }, { "track" : { "album" : { "name" : "A Beautiful Lie" }, "id" : "0Dx3pLp5cHb5RKvCNHKdlK", "name" : "From Yesterday" } }, { "track" : { "album" : { "name" : "Time To Tango" }, "id" : "168P6e9mrfugeE9nKhEE8C", "name" : "Bomba" } }, { "track" : { "album" : { "name" : "Love Gun" }, "id" : "0SPBrxOUEMIKugXR4bFhxs", "name" : "Love Gun" } }, { "track" : { "album" : { "name" : "Number Three" }, "id" : "6VtcgrVYo2xfygcWAfRpd1", "name" : "The World Is Ugly" } }, { "track" : { "album" : { "name" : "Firepower" }, "id" : "4CONJphSrdS0vIAGDrThGS", "name" : "Firepower" } }, { "track" : { "album" : { "name" : "Appeal To Reason" }, "id" : "3asFGFY3uLjMDmML1p0tYm", "name" : "Savior" } }, { "track" : { "album" : { "name" : "Atoma" }, "id" : "4HlVUapocBDBqkPtET2knz", "name" : "Atoma" } }, { "track" : { "album" : { "name" : "If You Have Ghost" }, "id" : "0PSWSiRXsxsLAEdEhaJAId", "name" : "Crucified" } }, { "track" : { "album" : { "name" : "Bloody Kisses (Top Shelf Edition)" }, "id" : "710B9xFjNOisQtKtppZE9p", "name" : "Black No. 1 (Little Miss Scare -All)" } }, { "track" : { "album" : { "name" : "TalkTalk" }, "id" : "47ih1BN9dECO0Gu0yPeMyD", "name" : "TalkTalk" } }, { "track" : { "album" : { "name" : "White Pony" }, "id" : "51c94ac31swyDQj9B3Lzs3", "name" : "Change (In the House of Flies)" } }, { "track" : { "album" : { "name" : "October Rust (Special Edition)" }, "id" : "1Yb9Nq9PTEegiOUGwyGHuP", "name" : "Wolf Moon (Including Zoanthropic Paranoia)" } }, { "track" : { "album" : { "name" : "The Money Store" }, "id" : "7nCONy10IHp7XD3oYZ0lcx", "name" : "I've Seen Footage" } }, { "track" : { "album" : { "name" : "The Money Store" }, "id" : "7y8X0Z04gJCKtfrnSAMywJ", "name" : "Hacker" } }, { "track" : { "album" : { "name" : "No Love Deep Web" }, "id" : "5fDj1YVNR04RtQNP4iYapO", "name" : "No Love" } }, { "track" : { "album" : { "name" : "Infinity Overhead" }, "id" : "1sFMp92IOMEXvza2liF4DZ", "name" : "Cold Company" } }, { "track" : { "album" : { "name" : "October Rust (Special Edition)" }, "id" : "58RDwkonFMOkoytBtIQetc", "name" : "Love You to Death" } }, { "track" : { "album" : { "name" : "Viva La Vida or Death and All His Friends" }, "id" : "1mea3bSkSGXuIRvnydlB5b", "name" : "Viva La Vida" } }, { "track" : { "album" : { "name" : "Dreaming Out Loud" }, "id" : "1NrJYpdAi7uosDRPmSYrsG", "name" : "Apologize" } }, { "track" : { "album" : { "name" : "A Rush of Blood to the Head" }, "id" : "75JFxkI2RXiU7L9VXzMkle", "name" : "The Scientist" } }, { "track" : { "album" : { "name" : "Indiana" }, "id" : "5uNEaSgkkPw6vLCUh3KsdQ", "name" : "Beautiful Disaster" } }, { "track" : { "album" : { "name" : "Ocean Eyes" }, "id" : "3DamFFqW32WihKkTVlwTYQ", "name" : "Fireflies" } }, { "track" : { "album" : { "name" : "Overexposed" }, "id" : "1LmN9SSHISbtp9LoaR5ZVJ", "name" : "Payphone" } }, { "track" : { "album" : { "name" : "VHS" }, "id" : "7lGKEWMXVWWTt3X71Bv44I", "name" : "Unsteady" } }, { "track" : { "album" : { "name" : "All I Ever Wanted" }, "id" : "4Dm32oO01YpIubCHaAtKkN", "name" : "My Life Would Suck Without You" } }, { "track" : { "album" : { "name" : "Science & Faith" }, "id" : "49kjlZP49LMD1MrrcvXDET", "name" : "For the First Time" } }, { "track" : { "album" : { "name" : "Götterdämmerung" }, "id" : "2af26RNEV5okdhwPSet5b5", "name" : "Götterdämmerung" } }, { "track" : { "album" : { "name" : "Barren Cloth Mother" }, "id" : "1LtFsJIocrUsFXTzdilfNM", "name" : "Barren Cloth Mother" } } ] }
nilq/baby-python
python
# Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Searching on a business tasks. Provides all the search and retrieval from the business filings datastore. """ from datetime import datetime from http import HTTPStatus import datedelta from flask import jsonify from flask_restplus import Resource, cors from legal_api.models import Business, Filing from legal_api.services.filings import validations from legal_api.utils.util import cors_preflight from .api_namespace import API @cors_preflight('GET,') @API.route('/<string:identifier>/tasks', methods=['GET', 'OPTIONS']) class TaskListResource(Resource): """Business Tasks service - Lists all incomplete filings and to-dos.""" @staticmethod @cors.crossdomain(origin='*') def get(identifier): """Return a JSON object with meta information about the Service.""" business = Business.find_by_identifier(identifier) if not business: return jsonify({'message': f'{identifier} not found'}), HTTPStatus.NOT_FOUND rv = TaskListResource.construct_task_list(business) return jsonify(tasks=rv) @staticmethod def construct_task_list(business): """ Return all current pending tasks to do. First retrieves filings that are either drafts, or incomplete, then populate AR filings that have not been started for years that are due. Rules for AR filings: - Co-ops must file one AR per year. The next AR date must be AFTER the most recent AGM date. The calendar year of the filing is the first contiguous year following the last AGM date - Corporations must file one AR per year, on or after the anniversary of the founding date """ tasks = [] order = 1 check_agm = validations.annual_report.requires_agm(business) # If no filings exist in legal API db (set after this line), use the business' next anniversary date todo_start_date = business.next_anniversary.date() # Retrieve filings that are either incomplete, or drafts pending_filings = Filing.get_filings_by_status(business.id, [Filing.Status.DRAFT.value, Filing.Status.PENDING.value, Filing.Status.ERROR.value]) # Create a todo item for each pending filing for filing in pending_filings: task = {'task': filing.json, 'order': order, 'enabled': True} tasks.append(task) order += 1 last_ar_date = business.last_ar_date if last_ar_date and check_agm: # If this is a CO-OP, set the start date to the first day of the year, since an AR filing # is available as of Jan/01 todo_start_date = (datetime(last_ar_date.year + 1, 1, 1)).date() # Retrieve all previous annual report filings. If there are existing AR filings, determine # the latest date of filing annual_report_filings = Filing.get_filings_by_type(business.id, 'annualReport') if annual_report_filings: # get last AR date from annualReportDate; if not present in json, try annualGeneralMeetingDate and # finally filing date last_ar_date = \ annual_report_filings[0].filing_json['filing']['annualReport'].get('annualReportDate', None) if not last_ar_date: last_ar_date = annual_report_filings[0].filing_json['filing']['annualReport']\ .get('annualGeneralMeetingDate', None) if not last_ar_date: last_ar_date = annual_report_filings[0].filing_date last_ar_date = datetime.fromisoformat(last_ar_date) if check_agm: todo_start_date = (datetime(last_ar_date.year+1, 1, 1)).date() else: todo_start_date = (last_ar_date+datedelta.YEAR).date() start_year = todo_start_date.year while todo_start_date <= datetime.now().date(): enabled = not pending_filings and todo_start_date.year == start_year tasks.append(TaskListResource.create_todo(business, todo_start_date.year, order, enabled)) todo_start_date += datedelta.YEAR order += 1 return tasks @staticmethod def create_todo(business, todo_year, order, enabled): """Return a to-do JSON object.""" todo = { 'task': { 'todo': { 'business': business.json(), 'header': { 'name': 'annualReport', 'ARFilingYear': todo_year, 'status': 'NEW' } } }, 'order': order, 'enabled': enabled } return todo
nilq/baby-python
python
#! /usr/bin/python import sys import os import sqlite3 import datetime import threading import Queue class SQLiteThread(threading.Thread): def __init__(self, config, logger): threading.Thread.__init__(self) self.setDaemon(True) self.filename = config.get_setting("sqlite_filename", "") self.logger = logger self.msgport = Queue.Queue() def run(self): self.logger.info("SQLite uses filename %r" % (self.filename, )) self.db = sqlite3.connect(self.filename) self.create_default_tables() while True: msg = self.msgport.get() msg[0](*msg[1:]) def create_default_tables(self): # a user is a mailbox on the system. a system can have any number of mailboxes. self.db.execute("""CREATE TABLE IF NOT EXISTS users ( name TEXT PRIMARY KEY, password TEXT, homedir TEXT, perm TEXT, msg_login TEXT, msg_quit TEXT);""") self.db.commit() def select(self, stmt, params, result): result.put([line for line in self.db.execute(stmt, params)]) def commit(self, stmt, params, result): t = self.db.execute(stmt, params) self.db.commit() result.put(t) def execute_many(self, stmt, params, result): t = self.db.executemany(stmt, params) self.db.commit() result.put(t) def disconnect(self): self.db.close() self.db = None class SQLite3Database(object): def __init__(self, config, logger): self.sqlite_thread = SQLiteThread(config, logger) self.logger = logger self.sqlite_thread.start() def select(self, stmt, params): result = Queue.Queue() self.sqlite_thread.msgport.put([self.sqlite_thread.select, stmt, params, result]) return result.get() def commit(self, stmt, params): result = Queue.Queue() self.sqlite_thread.msgport.put([self.sqlite_thread.commit, stmt, params, result]) return result.get() def execute_many(self, stmt, params): result = Queue.Queue() self.sqlite_thread.msgport.put([self.sqlite_thread.execute_many, stmt, params, result]) return result.get() def add_user(self, username, password, homedir, perm, msg_login, msg_quit): stmt = "INSERT INTO users (name, password, homedir, perm, msg_login, msg_quit) VALUES (?,?,?,?,?,?);" args = (username, password, homedir, perm, msg_login, msg_quit, ) return self.commit(stmt, args) def remove_user(self, username): stmt = "DELETE FROM users WHERE name=?;" args = (username, ) return self.commit(stmt, args) result = self.db.identify(username) if result is None: self.logger.warn("Warning, validate_authentication(%r) failed: no such user" % (username, )) return False def identify(self, username): stmt = "SELECT password,homedir,perm,msg_login,msg_quit FROM users WHERE name=?" args = (username, ) for row in self.select(stmt, args): return tuple(row) return None def has_user(self, username): stmt = "SELECT password FROM users WHERE name=?" args = (username, ) for row in self.select(stmt, args): return True return False def list_users(self): return self.select("SELECT name,password,homedir,perm,msg_login,msg_quit FROM users ORDER BY name", []) if __name__ == "__main__": import eftepede_server eftepede_server.main()
nilq/baby-python
python
""" Unit tests for our validators """ from dbas.database.discussion_model import ReviewDelete from dbas.tests.utils import TestCaseWithConfig, construct_dummy_request from dbas.validators.core import has_keywords_in_json_path, spec_keyword_in_json_body from dbas.validators.reviews import valid_not_executed_review class TestHasKeywords(TestCaseWithConfig): def test_has_one_keyword(self): request = construct_dummy_request(json_body={'string': 'foo'}) response = has_keywords_in_json_path(('string', str))(request) self.assertTrue(response) self.assertIn('string', request.validated) def test_has_multiple_keywords(self): request = construct_dummy_request(json_body={ 'string': 'foo', 'bool': True }) response = has_keywords_in_json_path(('string', str), ('bool', bool))(request) self.assertTrue(response) self.assertIn('string', request.validated) self.assertIn('bool', request.validated) def test_has_number_keywords(self): request = construct_dummy_request(json_body={ 'int': 4, 'float': 4.0 }) response = has_keywords_in_json_path(('int', int), ('float', float))(request) self.assertTrue(response) self.assertIn('int', request.validated) self.assertIn('float', request.validated) def test_has_list_keywords(self): request = construct_dummy_request(json_body={'list': ['<:)']}) response = has_keywords_in_json_path(('list', list))(request) self.assertTrue(response) self.assertIn('list', request.validated) def test_has_keywords_with_wrong_type(self): request = construct_dummy_request(json_body={'int': 4}) response = has_keywords_in_json_path(('int', float))(request) self.assertFalse(response) self.assertNotIn('int', request.validated) def test_has_keywords_without_keyword(self): request = construct_dummy_request(json_body={'foo': 42}) response = has_keywords_in_json_path(('bar', int))(request) self.assertFalse(response) self.assertNotIn('bar', request.validated) class TestExecutedReviews(TestCaseWithConfig): def test_valid_not_executed_review(self): request = construct_dummy_request(json_body={'ruid': 4}) response = valid_not_executed_review('ruid', ReviewDelete)(request) self.assertTrue(response) def test_valid_not_executed_review_error(self): request = construct_dummy_request(json_body={'ruid': 1}) response = valid_not_executed_review('ruid', ReviewDelete)(request) self.assertFalse(response) class TestSpecKeywords(TestCaseWithConfig): def test_empty_dummy_request_should_fail(self): request = construct_dummy_request() fn = spec_keyword_in_json_body((int, 'foo', lambda foo, varType: isinstance(foo, varType))) response = fn(request) self.assertIsInstance(response, bool) self.assertFalse(response) def test_provided_string_expected_int_should_fail(self): request = construct_dummy_request(json_body={'foo': 'bar'}) fn = spec_keyword_in_json_body((int, 'foo', lambda foo, varType: isinstance(foo, varType))) response = fn(request) self.assertIsInstance(response, bool) self.assertFalse(response) def test_provided_int_expected_int_should_succed(self): request = construct_dummy_request(json_body={'foo': 2}) fn = spec_keyword_in_json_body((int, 'foo', lambda foo, varType: isinstance(foo, varType))) response = fn(request) self.assertIsInstance(response, bool) self.assertTrue(response) def test_provided_empty_string_should_fail(self): request = construct_dummy_request(json_body={'foo': ''}) fn = spec_keyword_in_json_body((str, 'foo', lambda foo, varType: isinstance(foo, varType) and foo != '')) response = fn(request) self.assertIsInstance(response, bool) self.assertFalse(response) def test_provided_string_should_succed(self): request = construct_dummy_request(json_body={'foo': 'bar'}) fn = spec_keyword_in_json_body((str, 'foo', lambda foo, varType: isinstance(foo, varType) and foo != '')) response = fn(request) self.assertIsInstance(response, bool) self.assertTrue(response)
nilq/baby-python
python
#!/usr/bin/python import pymysql import config def add_feedback(email,f_text): conn,cursor=config.connect_to_database() sql="insert into feedbacks(email,f_text) values('%s','%s')"%(email,f_text); try: cursor.execute(sql) conn.commit() return "11" except: conn.rollback() return "0" def check_same_feedback(email,f_text): conn,cursor=config.connect_to_database() sql="select * from feedbacks where email='%s'"%email try: cursor.execute(sql) results=cursor.fetchall() for row in results: got_f_text=str(row[2]).lower() f_text=str(f_text).lower() if (f_text==got_f_text): return '1' return "11" except: conn.rollback() return "0"
nilq/baby-python
python
import unittest from point import Point class PointTests(unittest.TestCase): """Tests for Point.""" def test_attributes(self): point = Point(1, 2, 3) self.assertEqual((point.x, point.y, point.z), (1, 2, 3)) point.x = 4 self.assertEqual(point.x, 4) def test_string_representation(self): point = Point(1, 2, 3) self.assertEqual(str(point), 'Point(x=1, y=2, z=3)') self.assertEqual(repr(point), 'Point(x=1, y=2, z=3)') point.y = 4 self.assertEqual(str(point), 'Point(x=1, y=4, z=3)') self.assertEqual(repr(point), 'Point(x=1, y=4, z=3)') def test_equality_and_inequality(self): p1 = Point(1, 2, 3) p2 = Point(1, 2, 4) p3 = Point(1, 2, 3) self.assertNotEqual(Point(1, 2, 3), Point(1, 2, 4)) self.assertEqual(Point(1, 2, 3), Point(1, 2, 3)) self.assertFalse(Point(1, 2, 3) != Point(1, 2, 3)) self.assertNotEqual(p1, p2) self.assertEqual(p1, p3) p3.x, p3.z = p3.z, p3.x self.assertNotEqual(p1, p3) self.assertTrue(p1 != p3) self.assertFalse(p1 == p3) # To test the Bonus part of this exercise, comment out the following line # @unittest.expectedFailure def test_shifting(self): p1 = Point(1, 2, 3) p2 = Point(4, 5, 6) p3 = p2 + p1 p4 = p3 - p1 self.assertEqual((p3.x, p3.y, p3.z), (5, 7, 9)) self.assertEqual((p4.x, p4.y, p4.z), (p2.x, p2.y, p2.z)) # To test the Bonus part of this exercise, comment out the following line # @unittest.expectedFailure def test_scale(self): p1 = Point(1, 2, 3) p2 = p1 * 2 self.assertEqual((p2.x, p2.y, p2.z), (2, 4, 6)) p3 = 3 * p1 self.assertEqual((p3.x, p3.y, p3.z), (3, 6, 9)) # To test the Bonus part of this exercise, comment out the following line # @unittest.expectedFailure def test_iterable_point(self): point = Point(x=1, y=2, z=3) x, y, z = point self.assertEqual((x, y, z), (1, 2, 3)) if __name__ == "__main__": unittest.main(verbosity=2)
nilq/baby-python
python
from bigchaindb_driver import BigchainDB from bigchaindb_driver.crypto import generate_keypair from time import sleep from sys import exit def asset_creation(farmer, tomatos, tomatos_metadata, bdb): prepare_cr_tx = bdb.transactions.prepare( operation = 'CREATE', signers = farmer.public_key, asset = tomatos, metadata = tomatos_metadata ) fulfilled_cr_tx = bdb.transactions.fulfill( prepare_cr_tx, private_keys = farmer.private_key ) sent_cr_tx = bdb.transactions.send_commit(fulfilled_cr_tx) print("Creation done (status): ",fulfilled_cr_tx == sent_cr_tx) return sent_cr_tx def asset_transfer(fulfilled_cr_tx, buyer, farmer, bdb): transfer_asset = {'id':fulfilled_cr_tx['id']} output = fulfilled_cr_tx['outputs'][0] transfer_input = { 'fulfillment': output['condition']['details'], 'fulfills': { 'output_index': 0, 'transaction_id': fulfilled_cr_tx['id'] }, 'owners_before': output['public_keys'] } prepared_transfer_tx = bdb.transactions.prepare( operation='TRANSFER', asset=transfer_asset, inputs=transfer_input, recipients=buyer.public_key, ) fulfilled_transfer_tx = bdb.transactions.fulfill( prepared_transfer_tx, private_keys=farmer.private_key, ) sent_transfer_tx = bdb.transactions.send_commit(fulfilled_transfer_tx) print("Transfer done (status): ", fulfilled_transfer_tx == sent_transfer_tx) return sent_transfer_tx def run(tomato, tomato_metadata, farmer, buyer, bdb): fulfilled_cr = asset_creation(farmer, tomatos,tomatos_metadata, bdb) sent_trans = asset_transfer(fulfilled_cr, buyer, farmer, bdb) #print("Is Buyer the owner?", # sent_transfer_tx['outputs'][0]['public_keys'][0] == farmer.public_key) if __name__ == '__main__': bdb = BigchainDB("https://test.bigchaindb.com") farmer, trader, buyer = generate_keypair(), generate_keypair(), generate_keypair() while(input("Press q (quit) to stop ") != 'q'): tomatos = { 'data': { input("Product name: ") : { 'price_euros' : float(input("Product price: ")), 'quantity_kilos': float(input("Product quantity: ")) }, }, } tomatos_metadata = {'plant' : 'farm'} run(tomatos, tomatos_metadata, farmer, buyer, bdb)
nilq/baby-python
python
from __future__ import print_function from memorytestgame.lib.game import Game import time import unittest class GameTestCase(unittest.TestCase): LEDS = () SWITCHES = () COUNTDOWN = 0 GAME_TIME = 0 SCORE_INCREMENT = 1 game = None def setUp(self): self.game = Game(self.LEDS, self.SWITCHES, self.COUNTDOWN, self.GAME_TIME, self.SCORE_INCREMENT) def test__init__(self): self.assertIsInstance(self.game, Game) def test_start(self): self.game.GAME_TIME = 2 self.assertIs(self.game.start(True), None) def test_get_score(self): self.assertIs(self.game.get_score(), 0) def test_print_information(self): self.game.FINISH_TIME = time.time() self.assertIs(self.game.print_information(), None) def test_print_score(self): self.assertIs(self.game.print_score(), None) self.assertIs(self.game.print_score(True), None) def test_flash_buttons(self): self.assertIs(self.game.flash_buttons(self.game.BUTTON_ACTION_ALL), None) self.assertIs(self.game.flash_buttons(self.game.BUTTON_ACTION_SNAKE), None) def test_finish(self): self.assertIs(self.game.finish(), None) def test_reset(self): self.assertIs(self.game.reset(), None) def test_cleanup(self): self.assertIs(self.game.cleanup(), None) def test__exit__(self): self.assertIs(self.game.__exit__(), None) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
""" Otrzymujesz liste liczb oraz liczbe n. Lista reprezentuje ceny sznurka o dlugosci rownej indeksowi powiekszonemu o 1. Zaleznosc miedzy cenami i dlugoscia sznurka jest przypadkowa. Przykladowo sznurek o dlugosci rownej 2 jednostkom moze kosztowac tyle samo co sznurek o dlugosci rownej 3 jednostkom i byc piec razy drozszy niz sznurek o dlugosci 1 jednostce. Liczba n to dlugosc sznurka jaki mamy szprzedac. Na ile czesci powinnismy podzielic nasz sznurek, aby zarobic jak najwiecej na sprzedazy. """ # Zlozonosc czasowa: O(n^n) # Zlozonosc pamieciowa: O(1) def podziel_sznurek_v1(ceny, n): if len(ceny) < n: raise ValueError( "Dlugosc sznurka do sprzedazy nie moze byc wieksza od liczby elementow listy ceny." ) if n == 0: return 0 maks = 0 for i in range(n): cena = ceny[i] + podziel_sznurek_v1(ceny, n - i - 1) if cena > maks: maks = cena return maks # Zlozonosc czasowa: O(n^2) # Zlozonosc pamieciowa: O(n) def podziel_sznurek_v2(ceny, n): if len(ceny) < n: raise ValueError( "Dlugosc sznurka do sprzedazy nie moze byc wieksza od liczby elementow listy ceny." ) pom = [0] * (n + 1) for i in range(n): for j in range(i + 1): pom[i + 1] = max(pom[i + 1], ceny[j] + pom[i - j]) return pom[n] # Testy Poprawnosci def test_1(): ceny = [1, 5, 8, 9, 10, 17, 17, 20] n = 4 wynik = 10 assert podziel_sznurek_v1(ceny, n) == wynik assert podziel_sznurek_v2(ceny, n) == wynik def test_2(): ceny = [3, 9, 10, 20] n = 5 for funkcja in [podziel_sznurek_v1, podziel_sznurek_v2]: try: funkcja(ceny, n)() assert False except ValueError: assert True def test_3(): ceny = [5] n = 1 wynik = 5 assert podziel_sznurek_v1(ceny, n) == wynik assert podziel_sznurek_v2(ceny, n) == wynik def main(): test_1() test_2() test_3() if __name__ == "__main__": main()
nilq/baby-python
python
# PhotoBot 0.8 beta - last updated for NodeBox 1rc4 # Author: Tom De Smedt <tomdesmedt@trapdoor.be> # Manual: http://nodebox.net/code/index.php/PhotoBot # Copyright (c) 2006 by Tom De Smedt. # Refer to the "Use" section on http://nodebox.net/code/index.php/Use from __future__ import print_function ALL = ['canvas', 'Layers', 'Layer', 'invertimage', 'cropimage', 'aspectRatio', 'normalizeOrientationImage', 'insetRect', 'cropImageToRatioHorizontal', 'scaleLayerToHeight', 'placeImage', 'resizeImage', 'hashFromString', 'makeunicode', 'datestring', 'label' ] import sys import os import random import math sqrt = math.sqrt pow = math.pow sin = math.sin cos = math.cos degrees = math.degrees radians = math.radians asin = math.asin import fractions Fraction = fractions.Fraction import datetime import time import hashlib import unicodedata import colorsys import io import PIL import PIL.ImageFilter as ImageFilter import PIL.Image as Image import PIL.ImageChops as ImageChops import PIL.ImageEnhance as ImageEnhance import PIL.ImageOps as ImageOps import PIL.ImageDraw as ImageDraw import PIL.ImageStat as ImageStat import PIL.ImageFont as ImageFont # disable large image warning old = Image.MAX_IMAGE_PIXELS Image.MAX_IMAGE_PIXELS = None # 200000000 # print( "MAX_IMAGE_PIXELS: %i" % old) import pdb import pprint pp = pprint.pprint kwdbg = 0 kwlog = 0 import traceback # py3 stuff py3 = False try: unicode('') punicode = unicode pstr = str punichr = unichr except NameError: punicode = str pstr = bytes py3 = True punichr = chr long = int xrange = range # PIL interpolation modes NEAREST = Image.NEAREST BILINEAR = Image.BILINEAR BICUBIC = Image.BICUBIC LANCZOS = Image.LANCZOS INTERPOLATION = Image.BICUBIC LAYERS = [] # blend modes NORMAL = "normal" MULTIPLY = "multiply" SCREEN = "screen" OVERLAY = "overlay" HUE = "hue" COLOR = "color" ADD = "add" SUBTRACT = "subtract" ADD_MODULO = "add_modulo" SUBTRACT_MODULO = "subtract_modulo" DIFFERENCE = "difference" HORIZONTAL = 1 VERTICAL = 2 SOLID = "solid" LINEAR = "linear" RADIAL = "radial" DIAMOND = "diamond" SCATTER = "scatter" COSINE = "cosine" SINE = "sine" ROUNDRECT = "roundrect" RADIALCOSINE = "radialcosine" QUAD = "quad" class Canvas: """Implements a canvas with layers. A canvas is an empty Photoshop document, where layers can be placed and manipulated. """ def __init__(self, w, h): """Creates a new canvas. Creates the working area on which to blend layers. The canvas background is transparent, but a background color could be set using the fill() function. """ self.interpolation = INTERPOLATION self.layers = Layers() self.w = w self.h = h img = Image.new("RGBA", (w,h), (255,255,255,0)) self.layer(img, name="_bg") del img def layer(self, img, x=0, y=0, name=""): """Creates a new layer from file, Layer, PIL Image. If img is an image file or PIL Image object, Creates a new layer with the given image file. The image is positioned on the canvas at x, y. If img is a Layer, uses that layer's x and y position and name. """ if isinstance(img, Image.Image): img = img.convert("RGBA") self.layers.append( Layer(self, img, x, y, name) ) return len(self.layers) - 1 if isinstance(img, Layer): img.canvas = self self.layers.append(img) return len(self.layers) - 1 if type(img) in (pstr, punicode): try: img = Image.open(img) img = img.convert("RGBA") self.layers.append( Layer(self, img, x, y, name) ) del img return len(self.layers) - 1 except Exception as err: print( "Canvas.layer( %s ) FAILED." %repr( img ) ) print(err) print() exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb) print() return None def fill(self, rgb, x=0, y=0, w=None, h=None, name=""): """Creates a new fill layer. Creates a new layer filled with the given rgb color. For example, fill((255,0,0)) creates a red fill. The layers fills the entire canvas by default. """ if w == None: w = self.w - x if h == None: h = self.h - y img = Image.new("RGBA", (w,h), rgb) result = self.layer(img, x, y, name) del img return result def makegradientimage(self, style, w, h): """Creates the actual gradient image. This has been factored out of gradient() so complex gradients like ROUNDRECT which consist of multiple images can be composed. """ if type(w) == float: w *= self.w if type(h) == float: h *= self.h # prevent some div by 0 errors if w < 0: w = -w if h < 0: h = -h w = int( round( max(1,w) )) h = int( round( max(1,h) )) w2 = w // 2 h2 = h // 2 if kwlog: print( (style, self.w,self.h,w,h) ) if style in (RADIALCOSINE,): #, SCATTER): img = Image.new("L", (w, h), 0) elif style in (SCATTER, ): img = Image.new("L", (w, h), 0) # img = Image.new("RGBA", (w, h), (0,0,0,0)) else: img = Image.new("L", (w, h), 255) draw = ImageDraw.Draw(img) if style == SOLID: draw.rectangle((0, 0, w, h), fill=255) if style == LINEAR: for i in xrange( w ): k = int( round( 255.0 * i / w )) draw.rectangle((i, 0, i, h), fill=k) if style == RADIAL: r = min(w,h) / 2.0 r0 = int( round( r )) for i in xrange( r0 ): k = int( round( 255 - 255.0 * i/r )) draw.ellipse((w/2-r+i, h/2-r+i, w/2+r-i, h/2+r-i), fill=k) if style == RADIALCOSINE: r = max(w,h) / 2.0 rx = w / 2.0 ry = h / 2.0 r0 = int( round( r )) deg = 90 base = 90 - deg deltaxdeg = deg / rx deltaydeg = deg / ry deltadeg = deg / r step = min(deltaxdeg, deltaydeg) for i in xrange( r0 ): # k = 255.0 * i/r k = int( round( 256 * sin( radians( base + i * deltadeg ) ) )) ix = i * (rx / r) iy = i * (ry / r) draw.ellipse((0 + ix, 0 + iy, w - ix, h - iy), fill=k) if style == DIAMOND: maxwidthheight = int( round( max(w,h) )) widthradius = w * 0.5 heightradius = h * 0.5 for i in xrange( maxwidthheight ): ratio = i / float( maxwidthheight ) x = int( round( ratio * widthradius ) ) y = int( round( ratio * heightradius ) ) k = int( round( 256.0 * ratio )) draw.rectangle((x, y, w-x, h-y), outline=k) if style == SCATTER: # scatter should be some circles randomly across WxH # img, draw maxwidthheight = int( round( max(w,h) )) minwidthheight = int( round( min(w,h) )) def rnd( w, offset ): r = random.random() o2 = offset / 2.0 result = o2 + r * (w - (offset * 2)) return result # circles at 12.5% circleplacemin = int( round( minwidthheight / 9.0 ) ) circleplacemax = int( round( maxwidthheight / 9.0 ) ) c2 = 2 * circleplacemin for count in xrange( 511 ): tempimage = Image.new("L", (w, h), (0,) ) draw2 = ImageDraw.Draw( tempimage ) x = int( round( rnd( w, circleplacemin ) )) y = int( round( rnd( h, circleplacemin ) )) k = min(255, int( round( 33 + random.random() * 127)) ) r = (circleplacemin / 4.0) + random.random() * (circleplacemin / 4.0) bottom = int(round(y + r)) right = int(round(x + r)) draw2.ellipse( (x, y, right, bottom), fill=( k ) ) if 0: print( (x, y, bottom, right) ) # merge img = ImageChops.add(img, tempimage) del draw2 img = img.convert("L") if style in (SINE, COSINE): # sin/cos 0...180 left to right action = sin deg = 180.0 base = 0 if style == COSINE: action = cos deg = 90.0 base = 90.0 - deg deltadeg = deg / w for i in xrange( w ): k = int( round( 256.0 * action( radians( base + i * deltadeg ) ) )) draw.line( (i,0,i, h), fill=k, width=1) result = img.convert("RGBA") del img del draw return result def gradient(self, style=LINEAR, w=1.0, h=1.0, name="", radius=0, radius2=0): """Creates a gradient layer. Creates a gradient layer, that is usually used together with the mask() function. All the image functions work on gradients, so they can easily be flipped, rotated, scaled, inverted, made brighter or darker, ... Styles for gradients are LINEAR, RADIAL, DIAMOND, SCATTER, SINE, COSINE and ROUNDRECT """ w0 = self.w h0 = self.h if type(w) == float: w = int( round( w * w0 )) if type(h) == float: h = int( round( h * h0 )) img = None if style in (SOLID, LINEAR, RADIAL, DIAMOND, SCATTER, SINE, COSINE, RADIALCOSINE): img = self.makegradientimage(style, w, h) img = img.convert("RGBA") return self.layer(img, 0, 0, name=name) if style == QUAD: # make a rectangle with softened edges result = Image.new("L", ( w, h ), 255) mask = Image.new("L", ( w, h ), 255) draw = ImageDraw.Draw(mask) if radius == 0 and radius2 == 0: radius = w / 4.0 radius2 = w / 10.0 r1 = int(round( radius )) r2 = int(round( radius2 )) if r1 == 0: r1 = 1 if r2 == 0: r2 = 1 d1 = 2 * r1 d2 = 2 * r2 # create the base rect baserect = self.makegradientimage(SOLID, w-d1, h-d2) # create the vertical gradients verleft = self.makegradientimage(COSINE, r1, h) verleft = verleft.transpose(Image.FLIP_LEFT_RIGHT) vertright = verleft.rotate( 180 ) # create the horizontal gradients # since LINEAR goes from left to right, horup = self.makegradientimage(COSINE, r2, w) horup = horup.transpose(Image.FLIP_LEFT_RIGHT) hordown = horup.rotate( -90, expand=1 ) horup = hordown.rotate( 180 ) # assemble result.paste( baserect, box=( r1, 0) ) result.paste( verleft, box=( 0, 0) ) result.paste( vertright,box=( w-r1, 0) ) mask.paste( hordown, box=( 0, 0) ) mask.paste( horup, box=( 0, h-r2) ) result = ImageChops.darker(result, mask) result = result.convert("RGBA") del mask, horup, hordown del baserect, verleft, vertright return self.layer(result, 0, 0, name=name) if style == ROUNDRECT: result = Image.new("L", ( w, h ), 255) r1 = int( round( radius )) r2 = int( round( radius2 )) if r1 == 0: r1 = 1 if r2 == 0: r2 = 1 d1 = 2 * r1 d2 = 2 * r2 # take 1 radial grad for the 4 corners corners = self.makegradientimage(RADIALCOSINE, d1, d2) # top left b = corners.copy() tl = b.crop( box=(0,0,r1,r2) ) # top right b = corners.copy() tr = b.crop( box=(r1,0,d1,r2) ) # bottom left b = corners.copy() bl = b.crop( box=(0,r2,r1,d2) ) # bottom right b = corners.copy() br = b.crop( box=(r1,r2,d1,d2) ) # create the base rect brw = w - d1 brh = h - d2 baserect = self.makegradientimage(SOLID, brw, brh) # create the vertical gradients verleft = self.makegradientimage(COSINE, r1, brh) verleft = verleft.transpose(Image.FLIP_LEFT_RIGHT) vertright = verleft.rotate( 180 ) # create the horizontal gradients # since LINEAR goes from left to right, horup = self.makegradientimage(COSINE, r2, brw) horup = horup.transpose(Image.FLIP_LEFT_RIGHT) hordown = horup.rotate( -90, expand=1 ) horup = hordown.rotate( 180 ) # assemble result.paste( baserect, box=( r1, r2) ) result.paste( hordown, box=( r1, 0) ) result.paste( horup, box=( r1, brh+r2) ) result.paste( verleft, box=( 0, r2) ) result.paste( vertright,box=( brw+r1, r2) ) result.paste( tl, box=( 0, 0) ) result.paste( tr, box=( brw+r1, 0) ) result.paste( bl, box=( 0, brh+r2) ) result.paste( br, box=( brw+r1, brh+r2) ) img = result.convert("RGBA") del corners, tl, tr, bl, br, b del horup, hordown del baserect del verleft, vertright return self.layer(img, 0, 0, name=name) def merge(self, layers): """Flattens the given layers on the canvas. Merges the given layers with the indices in the list on the bottom layer in the list. The other layers are discarded. """ layers.sort() if layers[0] == 0: del layers[0] self.flatten(layers) def flatten(self, layers=[]): """Flattens all layers according to their blend modes. Merges all layers to the canvas, using the blend mode and opacity defined for each layer. Once flattened, the stack of layers is emptied except for the transparent background (bottom layer). """ # When the layers argument is omitted, # flattens all the layers on the canvas. # When given, merges the indexed layers. # Layers that fall outside of the canvas are cropped: # this should be fixed by merging to a transparent background # large enough to hold all the given layers' data # (=time consuming). if kwlog: start = time.time() if layers == []: layers = xrange(1, len(self.layers)) background = self.layers._get_bg() background.name = "Background" for i in layers: layer = self.layers[i] # Determine which portion of the canvas # needs to be updated with the overlaying layer. x = max(0, layer.x) y = max(0, layer.y) w = min(background.w, layer.x+layer.w) h = min(background.h, layer.y+layer.h) baseimage = background.img.crop( (x, y, w, h) ) # Determine which piece of the layer # falls within the canvas. x = max(0, -layer.x) y = max(0, -layer.y) w -= layer.x h -= layer.y blendimage = layer.img.crop( (x, y, w, h) ) lblend = blendimage.convert("L") bwblend = lblend.convert("1") # Buffer layer blend modes: # the base below is a flattened version # of all the layers below this one, # on which to merge this blended layer. if layer.blend == NORMAL: buffimage = blendimage elif layer.blend == MULTIPLY: buffimage = ImageChops.multiply(baseimage, blendimage) elif layer.blend == SCREEN: buffimage = ImageChops.screen(baseimage, blendimage) elif layer.blend == OVERLAY: buffimage = Blend().overlay(baseimage, blendimage) elif layer.blend == HUE: buffimage = Blend().hue(baseimage, blendimage) elif layer.blend == COLOR: buffimage = Blend().color(baseimage, blendimage) elif layer.blend == ADD: buffimage = ImageChops.add(baseimage, blendimage) elif layer.blend == SUBTRACT: img1 = baseimage.convert("RGB") img2 = blendimage.convert("RGB") buffimage = ImageChops.subtract(img1, img2) buffimage = buffimage.convert("RGBA") del img1, img2 # buffimage = ImageChops.subtract(baseimage, blendimage) # buffimage = Blend().subtract(baseimage, blendimage) elif layer.blend == ADD_MODULO: buffimage = ImageChops.add_modulo(baseimage, blendimage) elif layer.blend == SUBTRACT_MODULO: buffimage = Blend().subtract_modulo(baseimage, blendimage) elif layer.blend == DIFFERENCE: # buffimage = ImageChops.difference(baseimage, blendimage) img1 = baseimage.convert("RGB") img2 = blendimage.convert("RGB") buffimage = ImageChops.difference(img1, img2) buffimage = buffimage.convert("RGBA") del img1, img2 # Buffer a merge between the base and blend # according to the blend's alpha channel: # the base shines through where the blend is less opaque. # Merging the first layer to the transparent canvas # works slightly different than the other layers. # buffalpha = buffimage.split()[3] buffalpha = buffimage.getchannel("A") basealpha = baseimage.getchannel("A") if i == 1: buffimage = Image.composite(baseimage, buffimage, basealpha) else: buffimage = Image.composite(buffimage, baseimage, buffalpha) # The alpha channel becomes a composite of this layer and the base: # the base's (optional) tranparent background # is retained in arrays where the blend layer # is transparent as well. buffalpha = ImageChops.lighter(buffalpha, basealpha) #baseimage.split()[3]) try: buffimage.putalpha(buffalpha) except Exception as err: if kwdbg: pdb.set_trace() # TBD This needs fixing print("PILLOW ERROR:", err) # Apply the layer's opacity, # merging the buff to the base with # the given layer opacity. baseimage = Image.blend(baseimage, buffimage, layer.alpha) # Merge the base to the flattened canvas. x = max(0, int( round( layer.x )) ) y = max(0, int( round( layer.y )) ) background.img.paste(baseimage, (x,y) ) del baseimage, buffimage, buffalpha, basealpha, blendimage layers = list(layers) layers.reverse() for i in layers: del self.layers[i].img del self.layers[i] img = Image.new("RGBA", (self.w,self.h), (255,255,255,0)) self.layers._set_bg( Layer(self, img, 0, 0, name="_bg") ) if len(self.layers) == 1: self.layers.append(background) else: self.layers.insert(layers[-1], background) del img if kwlog: stop = time.time() print("Canvas.flatten( %s ) in %.3fsec." % (repr(layers), stop-start)) def export(self, name, ext=".png", format="PNG", unique=False): """Exports the flattened canvas. Flattens the canvas. PNG retains the alpha channel information. Other possibilities are JPEG and GIF. """ start = time.time() if not name: name = "photobot_" + datestring() if os.sep in name: name = os.path.abspath( os.path.expanduser( name )) folder, name = os.path.split( name ) if not folder: folder = os.path.abspath( os.curdir ) folder = os.path.join( folder, "exports" ) folder = os.path.abspath( folder ) filename = name + ext if name.endswith( ext ): filename = name if not os.path.exists( folder ): try: os.makedirs( folder ) except: pass try: path = os.path.join( folder, filename ) path = os.path.abspath( path ) except: pass if unique or os.path.exists( path ): path = uniquepath(folder, name, ext, nfill=2, startindex=1, sep="_", always=unique) if kwdbg and 0: # if debugging is on export each layer separately basename = "photobot_" + datestring() + "_layer_%i_%s" + ext background = self.layers._get_bg() background.name = "Background" layers = xrange(1, len(self.layers) ) for i in layers: layer = self.layers[i] # Determine which portion of the canvas # needs to be updated with the overlaying layer. x = max(0, layer.x) y = max(0, layer.y) w = min(background.w, layer.x+layer.w) h = min(background.h, layer.y+layer.h) base = background.img.crop((0, 0, background.w, background.h)) # Determine which piece of the layer # falls within the canvas. x = max(0, -layer.x) y = max(0, -layer.y) w -= layer.x h -= layer.y blend = layer.img.crop((x, y, w, h)) # alpha = blend.split()[3] alpha = blend.getchannel("A") buffer = Image.composite(blend, base, alpha) n = basename % (i, layer.name) path = os.path.join( folder, n ) buffer.save( path, format=format, optimize=False) print( "export() DBG: '%s'" % path.encode("utf-8") ) self.flatten() if format in ("JPEG",): if self.layers[1].img.mode == "RGBA": self.layers[1].img = self.layers[1].img.convert("RGB") self.layers[1].img.save(path, format=format, optimize=False) if kwlog: print( "export() %s" % path.encode("utf-8") ) if kwlog: stop = time.time() print("Canvas.export(%s) in %.3f sec." % (name, stop-start)) return path def draw(self, x=0, y=0, name="", ext=".png", format='PNG'): """Places the flattened canvas in NodeBox. Exports to a temporary PNG file. # Draws the PNG in NodeBox using the image() command. # Removes the temporary file. """ #if not name: # name = "photobot_" + datestring() #if not ext: # ext = ".png" #folder = os.path.abspath( os.curdir ) #folder = os.path.join( folder, "exports" ) #if not os.path.exists( folder ): # try: # os.makedirs( folder ) # except: # pass try: #filename = os.path.join( folder, name + ext ) #filename = os.path.abspath(filename) # path = self.export(filename) path = self.export(name, ext, format) try: #if nodeboxlib: _ctx.image(path, x, y) except NameError as err: pass if 0: os.unlink( path ) return path except Exception as err: print(err) print() exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb) print() def preferences(self, interpolation=INTERPOLATION): """Settings that influence image manipulation. Currently, only defines the image interpolation, which can be set to NEAREST, BICUBIC, BILINEAR or LANCZOS. """ self.interpolation = interpolation # # Some stack operations # # some inspiration from a forth wiki page # dup ( a -- a a ) # drop ( a -- ) # swap ( a b -- b a ) # over ( a b -- a b a ) # rot ( a b c -- b c a ) # nip ( a b -- b ) swap drop ; # tuck ( a b -- b a b ) swap over ; @property def top(self): """Interface to top layer. """ return self.layers[-1] @property def topindex(self): """get index of top layer. """ return len(self.layers)-1 @property def dup(self): """Duplicate top layer/stackelement. """ layer = self.top.copy() layer.canvas = self self.layers.append( layer ) return self.top def copy(self): """Returns a copy of the canvas. """ _canvas = canvas( self.w, self.h ) _canvas.interpolation = self.interpolation _canvas.layers = Layers() _canvas.w = self.w _canvas.h = self.h for layer in self.layers: layercopy = layer.copy() layercopy.canvas = self _canvas.layer( layercopy ) return _canvas def canvas(w, h): return Canvas(w, h) class Layers(list): """Extends the canvas.layers[] list so it indexes layers names. When the index is an integer, returns the layer at that index. When the index is a string, returns the first layer with that name. The first element, canvas.layers[0], is the transparent background and must remain untouched. """ def __getitem__(self, index): if type(index) in (int, long): return list.__getitem__(self, index) elif type(index) in (pstr, punicode): for layer in self: if layer.name == index: return layer return None def _get_bg(self): return list.__getitem__(self, 0) def _set_bg(self, layer): list.__setitem__(self, 0, layer) class Layer: """Implements a layer on the canvas. A canvas layer stores an image at a given position on the canvas, and all the Photoshop transformations possible for this layer: duplicate(), desature(), overlay(), rotate(), and so on. """ def __init__(self, canvas, img, x=0, y=0, name=""): self.canvas = canvas self.name = name self.img = img self.x = x self.y = y self.w = img.size[0] self.h = img.size[1] self.alpha = 1.0 self.blend = NORMAL self.pixels = Pixels(self.img, self) def prnt(self): # for debugging print("-" * 20) print( "name: '%s' " % self.name.encode("utf-8") ) print("xy: %i %i" % (self.x, self.y) ) print("wh: %i %i" % (self.w, self.h) ) print("alpha: %.2f" % self.alpha) print("blend: %.2f" % self.blend) print("-" * 20) def index(self): """Returns this layer's index in the canvas.layers[]. Searches the position of this layer in the canvas' layers list, return None when not found. """ for i in xrange(len(self.canvas.layers)): if self.canvas.layers[i] == self: break if self.canvas.layers[i] == self: return i else: return None def copy(self): """Returns a copy of the layer. This is different from the duplicate() method, which duplicates the layer as a new layer on the canvas. The copy() method returns a copy of the layer that can be added to a different canvas. """ layer = Layer(None, self.img.copy(), self.x, self.y, self.name) layer.w = self.w layer.h = self.h layer.alpha = self.alpha layer.blend = self.blend return layer def delete(self): """Removes this layer from the canvas. """ i = self.index() if i != None: del self.canvas.layers[i] def up(self): """Moves the layer up in the stacking order. """ i = self.index() if i != None: del self.canvas.layers[i] i = min(len(self.canvas.layers), i+1) self.canvas.layers.insert(i, self) def down(self): """Moves the layer down in the stacking order. """ i = self.index() if i != None: del self.canvas.layers[i] i = max(0, i-1) self.canvas.layers.insert(i, self) def bounds(self): """Returns the size of the layer. This is the width and height of the bounding box, the invisible rectangle around the layer. """ return self.img.size def select(self, path, feather=True): """Applies the polygonal lasso tool on a layer. The path paramater is a list of points, either [x1, y1, x2, y2, x3, y3, ...] or [(x1,y1), (x2,y2), (x3,y3), ...] The parts of the layer that fall outside this polygonal area are cut. The selection is not anti-aliased, but the feather parameter creates soft edges. """ w, h = self.img.size mask = Image.new("L", (w,h), 0) draw = ImageDraw.Draw(mask) draw.polygon(path, fill=255) if feather: mask = mask.filter(ImageFilter.SMOOTH_MORE) mask = mask.filter(ImageFilter.SMOOTH_MORE) mask = ImageChops.darker(mask, self.img.getchannel("A")) #self.img.split()[3]) self.img.putalpha(mask) def mask(self): """Masks the layer below with this layer. Commits the current layer to the alpha channel of the previous layer. Primarily, mask() is useful when using gradient layers as masks on images below. For example: canvas.layer("image.jpg") canvas.gradient() canvas.layer(2).flip() canvas.layer(2).mask() Adds a white-to-black linear gradient to the alpha channel of image.jpg, making it evolve from opaque on the left to transparent on the right. """ if len(self.canvas.layers) < 2: return i = self.index() if i == 0: return layer = self.canvas.layers[i-1] alpha = Image.new("L", layer.img.size, 0) # Make a composite of the mask layer in grayscale # and its own alpha channel. mask = self.canvas.layers[i] flat = ImageChops.darker(mask.img.convert("L"), mask.img.getchannel("A")) #mask.img.split()[3]) alpha.paste(flat, (mask.x,mask.y)) alpha = ImageChops.darker(alpha, layer.img.getchannel("A")) #layer.img.split()[3]) layer.img.putalpha(alpha) self.delete() def duplicate(self): """Creates a copy of the current layer. This copy becomes the top layer on the canvas. """ i = self.canvas.layer(self.img.copy(), self.x, self.y, self.name) clone = self.canvas.layers[i] clone.alpha = self.alpha clone.blend = self.blend def opacity(self, a=100): self.alpha = a * 0.01 def multiply(self): self.blend = MULTIPLY def add(self): self.blend = ADD def subtract(self): self.blend = SUBTRACT def add_modulo(self): self.blend = ADD_MODULO def subtract_modulo(self): self.blend = SUBTRACT_MODULO def difference(self): self.blend = DIFFERENCE def screen(self): self.blend = SCREEN def overlay(self): self.blend = OVERLAY def hue(self): self.blend = HUE def color(self): self.blend = COLOR def brightness(self, value=1.0): """Increases or decreases the brightness in the layer. The given value is a percentage to increase or decrease the image brightness, for example 0.8 means brightness at 80%. """ if value > 5: value = value * 0.01 b = ImageEnhance.Brightness(self.img) self.img = b.enhance(value) def contrast(self, value=1.0): """Increases or decreases the contrast in the layer. The given value is a percentage to increase or decrease the image contrast, for example 1.2 means contrast at 120%. """ # this crashes sometimes try: if value > 5: value = value * 0.01 c = ImageEnhance.Contrast(self.img) self.img = c.enhance(value) except: pass def desaturate(self): """Desaturates the layer, making it grayscale. Instantly removes all color information from the layer, while maintaing its alpha channel. """ # alpha = self.img.split()[3] alpha = self.img.getchannel("A") self.img = self.img.convert("L") self.img = self.img.convert("RGBA") self.img.putalpha(alpha) def colorize(self, black, white, mid=None, blackpoint=0, whitepoint=255, midpoint=127): """Use the ImageOps.colorize() on desaturated layer. """ # # alpha = self.img.split()[3] alpha = self.img.getchannel("A") img = self.img.convert("L") img = ImageOps.colorize(img, black, white, mid, blackpoint=0, whitepoint=255, midpoint=127) img = img.convert("RGBA") img.putalpha(alpha) self.img = img def posterize(self, bits=8): alpha = self.img.getchannel("A") img = self.img.convert("RGB") img = ImageOps.posterize(img, bits) img = img.convert("RGBA") img.putalpha(alpha) self.img = img def solarize(self, threshhold): # alpha = self.img.split()[3] alpha = self.img.getchannel("A") img = self.img.convert("RGB") img = ImageOps.solarize(img, threshhold) img = img.convert("RGBA") img.putalpha(alpha) self.img = img def autocontrast(self, cutoff=0, ignore=None): if 0: #not (1 <= bits <= 8): return # alpha = self.img.split()[3] alpha = self.img.getchannel("A") img = self.img.convert("RGB") img = ImageOps.autocontrast(img, cutoff, ignore) img = img.convert("RGBA") img.putalpha(alpha) self.img = img def deform( self, deformer, resample=BICUBIC ): self.img = ImageOps.deform(self.img, deformer, resample) def equalize(self, mask=None): alpha = self.img.getchannel("A") img = self.img.convert("RGB") img = ImageOps.equalize(img, mask) img = img.convert("RGBA") img.putalpha(alpha) self.img = img def invert(self): """Inverts the layer. """ self.img = invertimage( self.img ) def translate(self, x, y): """Positions the layer at the given coordinates. The x and y parameters define where to position the top left corner of the layer, measured from the top left of the canvas. """ self.x = int( round( x )) self.y = int( round( y )) def scale(self, w=1.0, h=1.0): """Resizes the layer to the given width and height. When width w or height h is a floating-point number, scales percentual, otherwise scales to the given size in pixels. """ w0, h0 = self.img.size if type(w) == float: w = int( round( w*w0 ) ) if type(h) == float: h = int( round( h*h0 ) ) self.img = self.img.resize((w,h), resample=LANCZOS) self.w = w self.h = h def rotate(self, angle): """Rotates the layer. Rotates the layer by given angle. Positive numbers rotate counter-clockwise, negative numbers rotate clockwise. Rotate commands are executed instantly, so many subsequent rotates will distort the image. """ # When a layer rotates, its corners will fall # outside of its defined width and height. # Thus, its bounding box needs to be expanded. # Calculate the diagonal width, and angle from # the layer center. This way we can use the # layers's corners to calculate the bounding box. def mid( t1, t2, makeint=True ): # calculate the middle between t1 and t2 return int( round( (t2-t1) / 2.0 )) w0, h0 = self.img.size diag0 = sqrt(pow(w0,2) + pow(h0,2)) d_angle = degrees(asin((w0*0.5) / (diag0*0.5))) angle = angle % 360 if ( angle > 90 and angle <= 270): d_angle += 180 w = sin(radians(d_angle + angle)) * diag0 w = max(w, sin(radians(d_angle - angle)) * diag0) w = int( round( abs(w) )) h = cos(radians(d_angle + angle)) * diag0 h = max(h, cos(radians(d_angle - angle)) * diag0) h = int( round( abs(h) )) diag1 = int( round( diag0 )) # The rotation box's background color # is the mean pixel value of the rotating image. # This is the best option to avoid borders around # the rotated image. bg = ImageStat.Stat(self.img).mean bg = (int(bg[0]), int(bg[1]), int(bg[2]), 0) box = Image.new("RGBA", (diag1,diag1), bg) dw02 = mid( w0, diag0 ) # (diag0-w0)/2 dh02 = mid( h0, diag0 ) # (diag0-h0)/2 box.paste(self.img, (dw02, dh02)) box = box.rotate(angle, Image.BICUBIC) dw2 = mid(w, diag0) # int( (diag0-w) / 2.0 ) dh2 = mid(h, diag0) #int( (diag0-h) / 2.0 ) box = box.crop(( dw2+2, dh2, diag1-dw2, diag1-dh2)) self.img = box # Since rotate changes the bounding box size, # update the layers' width, height, and position, # so it rotates from the center. self.x += mid( w, self.w ) # int( (self.w-w)/2.0 ) self.y += mid( h, self.h ) # int( (self.h-h)/2.0 ) self.w = w self.h = h def distort(self, x1=0,y1=0, x2=0,y2=0, x3=0,y3=0, x4=0,y4=0): """Distorts the layer. Distorts the layer by translating the four corners of its bounding box to the given coordinates: upper left (x1,y1), upper right(x2,y2), lower right (x3,y3) and lower left (x4,y4). """ w, h = self.img.size quad = (-x1,-y1, -x4,h-y4, w-x3,w-y3, w-x2,-y2) # quad = (x1,y1, x2,y2, x3,y3, x4,y4) #, LANCZOS) self.img = self.img.transform(self.img.size, Image.QUAD, quad) def flip(self, axis=HORIZONTAL): """Flips the layer, either HORIZONTAL or VERTICAL. """ if axis & HORIZONTAL: self.img = self.img.transpose(Image.FLIP_LEFT_RIGHT) if axis & VERTICAL: self.img = self.img.transpose(Image.FLIP_TOP_BOTTOM) def crop( self, bounds): """Crop a pillow image at bounds(left, top, right, bottom) """ w0, h0 = self.img.size x, y = self.x, self.y left, top, right, bottom = bounds left = max(x, left) top = max(y, top) right = min(right, w0) bottom = min(bottom, h0) self.img = self.img.crop( (left, top, right, bottom) ) self.w, self.h = self.img.size def blur(self): """Blurs the layer. """ self.img = self.img.filter(ImageFilter.BLUR) def boxblur(self, radius=2): """Blurs the layer. """ self.img = self.img.filter( ImageFilter.BoxBlur( radius ) ) # new def contour(self): """Contours the layer. """ self.img = self.img.filter(ImageFilter.CONTOUR) # new def detail(self): """Details the layer. """ self.img = self.img.filter(ImageFilter.DETAIL) # new def edge_enhance(self): """Edge enhances the layer. """ self.img = self.img.filter(ImageFilter.EDGE_ENHANCE) # new def edge_enhance_more(self): """Edge enhances more the layer. """ self.img = self.img.filter(ImageFilter.EDGE_ENHANCE_MORE) # new def emboss(self): """Embosses the layer. """ self.img = self.img.filter(ImageFilter.EMBOSS) # new def find_edges(self): """Find edges in the layer. """ alpha = self.img.getchannel("A") self.img = self.img.filter(ImageFilter.FIND_EDGES) self.img = self.img.convert("RGBA") self.img.putalpha(alpha) # new def smooth(self): """Smoothes the layer. """ self.img = self.img.filter(ImageFilter.SMOOTH) # new def smooth_more(self): """Smoothes the layer more. """ self.img = self.img.filter(ImageFilter.SMOOTH_MORE) def sharpen(self, value=1.0): """Increases or decreases the sharpness in the layer. The given value is a percentage to increase or decrease the image sharpness, for example 0.8 means sharpness at 80%. """ s = ImageEnhance.Sharpness(self.img) self.img = s.enhance(value) def convolute(self, kernel, scale=None, offset=0): """A (3,3) or (5,5) convolution kernel. The kernel argument is a list with either 9 or 25 elements, the weight for each surrounding pixels to convolute. """ if len(kernel) == 9: size = (3,3) elif len(kernel) == 25: size = (5,5) else: return if scale == None: scale = 0 for x in kernel: scale += x if scale == 0: scale = 1 f = ImageFilter.Kernel(size, kernel, scale=scale, offset=offset) # alpha = self.img.split()[3] alpha = self.img.getchannel("A") img = self.img.convert("RGB") # f = ImageFilter.BuiltinFilter() # f.filterargs = size, scale, offset, kernel img = img.filter(f) img = img.convert("RGBA") img.putalpha( alpha ) self.img = img def statistics(self): alpha = self.img.getchannel("A") return ImageStat.Stat(self.img, alpha) #self.img.split()[3]) def levels(self): """Returns a histogram for each RGBA channel. Returns a 4-tuple of lists, r, g, b, and a. Each list has 255 items, a count for each pixel value. """ h = self.img.histogram() r = h[0:255] g = h[256:511] b = h[512:767] a = h[768:1024] return r, g, b, a class Blend: """Layer blending modes. Implements additional blending modes to those present in PIL. These blending functions can not be used separately from the canvas.flatten() method, where the alpha compositing of two layers is handled. Since these blending are not part of a C library, but pure Python, they take forever to process. """ def subtract(self, img1, img2, scale=1.0, offset=0): base = img1.convert("RGB") blend = img2.convert("RGB") result = ImageChops.subtract(base, blend, scale=scale, offset=offset) result = result.convert("RGBA") return result def subtract_modulo(self, img1, img2): base = img1.convert("RGB") blend = img2.convert("RGB") result = ImageChops.subtract_modulo(base, blend) result = result.convert("RGBA") return result def overlay(self, img1, img2): """Applies the overlay blend mode. Overlays image img2 on image img1. The overlay pixel combines multiply and screen: it multiplies dark pixels values and screen light values. Returns a composite image with the alpha channel retained. """ p1 = list( img1.getdata() ) p2 = list( img2.getdata() ) for i in xrange(len(p1)): p3 = () for j in xrange(len(p1[i])): a = p1[i][j] / 255.0 b = p2[i][j] / 255.0 # When overlaying the alpha channels, # take the alpha of the most transparent layer. if j == 3: # d = (a+b) * 0.5 # d = a d = min(a,b) elif a > 0.5: d = 2 * (a+b - a*b)-1 else: d = 2*a*b p3 += ( int( round(d * 255.0)), ) p1[i] = p3 img = Image.new("RGBA", img1.size, 255) img.putdata(p1) return img def hue(self, img1, img2): """Applies the hue blend mode. Hues image img1 with image img2. The hue filter replaces the hues of pixels in img1 with the hues of pixels in img2. Returns a composite image with the alpha channel retained. """ p1 = list(img1.getdata()) p2 = list(img2.getdata()) for i in xrange(len(p1)): r1, g1, b1, a1 = p1[i] r1 = r1 / 255.0 g1 = g1 / 255.0 b1 = b1 / 255.0 h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1) r2, g2, b2, a2 = p2[i] r2 = r2 / 255.0 g2 = g2 / 255.0 b2 = b2 / 255.0 h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2) r3, g3, b3 = colorsys.hsv_to_rgb(h2, s1, v1) r3 = int( round( r3*255.0 )) g3 = int( round( g3*255.0 )) b3 = int( round( b3*255.0 )) p1[i] = (r3, g3, b3, a1) img = Image.new("RGBA", img1.size, 255) img.putdata(p1) return img def color(self, img1, img2): """Applies the color blend mode. Colorize image img1 with image img2. The color filter replaces the hue and saturation of pixels in img1 with the hue and saturation of pixels in img2. Returns a composite image with the alpha channel retained. """ p1 = list(img1.getdata()) p2 = list(img2.getdata()) for i in xrange(len(p1)): r1, g1, b1, a1 = p1[i] r1 = r1 / 255.0 g1 = g1 / 255.0 b1 = b1 / 255.0 h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1) r2, g2, b2, a2 = p2[i] r2 = r2 / 255.0 g2 = g2 / 255.0 b2 = b2 / 255.0 h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2) r3, g3, b3 = colorsys.hsv_to_rgb(h2, s2, v1) r3 = int( round( r3*255.0 )) g3 = int( round( g3*255.0 )) b3 = int( round( b3*255.0 )) p1[i] = (r3, g3, b3, a1) img = Image.new("RGBA", img1.size, 255) img.putdata(p1) return img class Pixels: """Provides direct access to a layer's pixels. The layer.pixels[] contains all pixel values in a 1-dimensional array. Each pixel is a tuple containing (r,g,b,a) values. After the array has been updated, layer.pixels.update() must be called for the changes to commit. """ def __init__(self, img, layer): self.layer = layer self.img = img self.data = None def __getitem__(self, i): w, h = self.img.size noofpixels = w * h if i >= noofpixels: i -= noofpixels if i < 0: i += noofpixels if self.data == None: self.data = list(self.img.getdata()) return self.data[i] def __setitem__(self, i, rgba): w, h = self.img.size noofpixels = w * h if i >= noofpixels: i -= noofpixels if i < 0: i += noofpixels if self.data == None: self.data = list(self.img.getdata()) self.data[i] = rgba def __iter__(self): for i in xrange(len(self)): yield self[i] def __len__(self): w, h = self.img.size return w * h def update(self): if self.data != None: self.img.putdata(self.data) self.data = None def convolute(self, kernel, scale=None, offset=0): """A (3,3) or (5,5) convolution kernel. The kernel argument is a list with either 9 or 25 elements, the weight for each surrounding pixels to convolute. """ if len(kernel) == 9: size = (3,3) elif len(kernel) == 25: size = (5,5) else: return if scale == None: scale = 0 for x in kernel: scale += x if scale == 0: scale = 1 # f = ImageFilter.BuiltinFilter() # f.filterargs = size, scale, offset, kernel f = ImageFilter.Kernel(size, kernel, scale=scale, offset=offset) self.layer.img = self.layer.img.filter(f) # # nodebox & standalone pillow tools # def makeunicode(s, srcencoding="utf-8", normalizer="NFC"): typ = type(s) # convert to str first; for number types etc. if typ not in (punicode,): if typ not in (pstr,): s = str(s) try: s = punicode( s, srcencoding ) except TypeError as err: # pdb.set_trace() print( "makeunicode(): %s" % repr(err) ) print( "%s - %s" % (type(s), repr(s)) ) return s if typ in (punicode,): s = unicodedata.normalize(normalizer, s) return s def uniquepath(folder, filenamebase, ext, nfill=1, startindex=1, sep="_", always=False): folder = os.path.abspath( folder ) if not always: path = os.path.join(folder, filenamebase + ext ) if not os.path.exists( path ): return path n = startindex while True: serialstring = str(n).rjust(nfill, "0") filename = filenamebase + sep + serialstring + ext fullpath = os.path.join(folder, filename) if n >= 10**nfill: nfill = nfill + 1 if not os.path.exists(fullpath): return fullpath n += 1 def hashFromString( s ): h = hashlib.sha1() h.update( s ) return h.hexdigest() def datestring(dt = None, dateonly=False, nospaces=True, nocolons=True): """Make an ISO datestring. The defaults are good for using the result of 'datestring()' in a filename. """ if not dt: now = str(datetime.datetime.now()) else: now = str(dt) if not dateonly: now = now[:19] else: now = now[:10] if nospaces: now = now.replace(" ", "_") if nocolons: now = now.replace(":", "") return now def grid(cols, rows, colSize=1, rowSize=1, shuffled=False): """Returns an iterator that contains coordinate tuples. The grid can be used to quickly create grid-like structures. A common way to use them is: for x, y in grid(10,10,12,12): rect(x,y, 10,10) """ # Prefer using generators. rowRange = range( int(rows) ) colRange = range( int(cols) ) # Shuffled needs a real list, though. if (shuffled): rowRange = list(rowRange) colRange = list(colRange) random.shuffle(rowRange) random.shuffle(colRange) for y in rowRange: for x in colRange: yield (x*colSize, y*rowSize) # # image tools section # def invertimage( img ): # alpha = img.split()[3] alpha = img.getchannel("A") img = img.convert("RGB") img = ImageOps.invert(img) img = img.convert("RGBA") img.putalpha(alpha) return img def cropimage( img, bounds): """Crop a pillow image at bounds(left, top, right, bottom) """ return img.crop( bounds ) def splitrect( left, top, right, bottom, hor=True, t=0.5 ): """Split a PIL image horizontally or vertically. A split is horizontal if the splitline is horizontal. Return a list with images. """ # w,h = img.size w = int( round( right-left )) h = int( round( bottom-top )) w2 = int( round( w * t )) h2 = int( round( h * t )) if hor: rects = [ (left, top, right, top+h2), (left, top+h2+1, right, bottom) ] else: rects = [ (left, top, l+w2, bottom), (left+w2+1, top, right, bottom) ] return rects def splitimage( img ): pass # gridsizeh = w // hor # remainderh = w % hor # noofmainchunks = noofrecords // chunksize # remainingrecords = noofrecords % chunksize """ with Image.open("hopper.jpg") as im: # The crop method from the Image module takes four coordinates as input. # The right can also be represented as (left+width) # and lower can be represented as (upper+height). (left, upper, right, lower) = (20, 20, 100, 100) # Here the image "im" is cropped and assigned to new variable im_crop im_crop = im.crop((left, upper, right, lower)) """ def aspectRatio(size, maxsize, height=False, width=False, assize=False): """Resize size=(w,h) to maxsize. use height == maxsize if height==True use width == maxsize if width==True use max(width,height) == maxsize if width==height==False """ w, h = size scale = 1.0 if width !=False: currmax = w elif height !=False: currmax = h else: currmax = max( (w,h) ) if width and height: currmax = min( (w,h) ) if currmax == maxsize: # return 1.0 pass elif maxsize == 0: #return 1.0 pass else: scale = float(maxsize) / currmax w = int( round( w*scale ) ) h = int( round( h*scale ) ) size = (w,h) if assize: return size return scale def innerRect( w0, h0, w1, h1): """Create an inner size crop rect (0,0,w1,h1) + translation """ pass def insetRect( rectangle, horInset, vertInset): """ """ x, y, w, h = rectangle dh = horInset / 2.0 dv = vertInset / 2.0 return x+dh, y+dv, w-horInset, h-vertInset def cropImageToRatioHorizontal( layer, ratio ): """ """ w, h = layer.bounds() newwidth = int( round( h*ratio )) d = int( newwidth / 2.0 ) x,y,w,h = insetRect( (0,0,w,h), d, 0 ) layer.img = layer.img.crop(box=(x,y,x+w,y+h)) return layer def scaleLayerToHeight( layer, newheight ): # get current image bounds w, h = layer.bounds() # calculate scale & apply s = aspectRatio( (w,h), newheight, height=True) layer.scale(s, s) return layer def placeImage(canv, path, x, y, maxsize=None, name="", width=True, height=False): """Create an image layer. """ if maxsize: img1 = resizeImage(path, maxsize, width=width, height=height) top = canv.layer(img1, name=name) else: top = canv.layer(path, name=name) canv.top.translate(x, y) w, h, = canv.top.bounds() return top, w, h def resizeImage( filepath, maxsize, orientation=True, width=True, height=True): """Get a downsampled image for use in layers. """ f = False try: img = Image.open(filepath) except Exception as err: print("\nresizeImage() Image.open() FAILED '%s'" % filepath.encode("utf-8")) print(err) return "" # downsample the image if maxsize: w,h = aspectRatio( (img.size), maxsize, height=height, width=height, assize=True) img = img.resize( (w,h), resample=Image.LANCZOS) # respect exif orientation if orientation: img = normalizeOrientationImage( img ) if f: f.close() return img.convert("RGBA") def normalizeOrientationImage( img ): """Rotate an image according to exif info. """ rotation = 0 try: info = img._getexif() if 274 in info: r = info[274] if r == 3: rotation = 180 elif r == 6: rotation = -90 elif r == 8: rotation = 90 except (Exception, IndexError) as err: pass if rotation != 0: return img.rotate( rotation ) return img # # text section # def label( canvas, string, x, y, fontsize=18, fontpath="" ): """Needs to be written... """ # search for a usable font systemarials = [ "C:\Windows\Fonts\arial.ttf", "/Library/Fonts/Arial.ttf"] systemarials.insert(0, fontpath) font = False for f in systemarials: if os.path.exists( f ): font = f break if not font: return False w,h = canvas.w, canvas.h mask = Image.new("L", (w, h), 0) blatt = Image.new("RGBA", (w, h), (0,0,0,0)) drawtext = ImageDraw.Draw( blatt ) drawmask = ImageDraw.Draw( mask ) # use a bitmap font font = PIL.ImageFont.truetype(font=font, size=fontsize, index=0, encoding='') drawtext.text((x, y), string, font=font, fill=(192,192,192,255)) drawmask.text((x, y), string, font=font, fill=192) drawtext.text((x-1, y-1), string, font=font, fill=(0,0,0,255)) drawmask.text((x-1, y-1), string, font=font, fill=255) canvas.layer( blatt ) canvas.layer( mask ) canvas.top.mask()
nilq/baby-python
python
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2019 Fortinet, Inc # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ --- author: - Miguel Angel Munoz (@magonzalez) httpapi : fortios short_description: HttpApi Plugin for Fortinet FortiOS Appliance or VM description: - This HttpApi plugin provides methods to connect to Fortinet FortiOS Appliance or VM via REST API version_added: "2.9" """ import json from ansible.plugins.httpapi import HttpApiBase from ansible.module_utils.basic import to_text from ansible.module_utils.six.moves import urllib import re from datetime import datetime class HttpApi(HttpApiBase): def __init__(self, connection): super(HttpApi, self).__init__(connection) self._conn = connection self._ccsrftoken = '' self._system_version = None self._ansible_fos_version = 'v6.0.0' self._ansible_galaxy_version = '1.1.8' self._log = open("/tmp/fortios.ansible.log", "a") def log(self, msg): log_message = str(datetime.now()) log_message += ": " + str(msg) + '\n' self._log.write(log_message) self._log.flush() def get_access_token(self): '''this is only available after a module is initialized''' token = self._conn.get_option('access_token') if 'access_token' in self._conn._options else None return token def set_become(self, become_context): """ Elevation is not required on Fortinet devices - Skipped :param become_context: Unused input. :return: None """ return None def login(self, username, password): """Call a defined login endpoint to receive an authentication token.""" if (username is None or password is None) and self.get_access_token() is None: raise Exception('Please provide access token or username/password to login') if self.get_access_token() is None: self.log('login with username and password') data = "username=" + urllib.parse.quote(username) + "&secretkey=" + urllib.parse.quote(password) + "&ajax=1" dummy, result_data = self.send_request(url='/logincheck', data=data, method='POST') self.log('login with user: %s %s' % (username, 'succeeds' if result_data[0] == '1' else 'fails')) if result_data[0] != '1': raise Exception('Wrong credentials. Please check') # If we succeed to login, we retrieve the system status first else: self.log('login with access token') self.send_request(url='/logincheck') status, _ = self.send_request(url='/api/v2/cmdb/system/interface?vdom=root&action=schema') if status == 401: raise Exception('Invalid access token. Please check') self.update_system_version() def logout(self): """ Call to implement session logout.""" self.log('logout') self.send_request(url='/logout', method="POST") def update_auth(self, response, response_text): """ Get cookies and obtain value for csrftoken that will be used on next requests :param response: Response given by the server. :param response_text Unused_input. :return: Dictionary containing headers """ if self.get_access_token() is None: headers = {} for attr, val in response.getheaders(): if attr == 'Set-Cookie' and 'APSCOOKIE_' in val: headers['Cookie'] = val elif attr == 'Set-Cookie' and 'ccsrftoken=' in val: csrftoken_search = re.search('\"(.*)\"', val) if csrftoken_search: self._ccsrftoken = csrftoken_search.group(1) headers['x-csrftoken'] = self._ccsrftoken self.log('update x-csrftoken: %s' % (self._ccsrftoken)) return headers else: self.log('using access token - setting header') return { "Accept": "application/json" } def handle_httperror(self, exc): """ propogate exceptions to users :param exc: Exception """ self.log('Exception thrown from handling http: ' + to_text(exc)) return exc def _concat_token(self, url): if self.get_access_token(): token_pair = 'access_token=' + self.get_access_token() return url + '&' + token_pair if '?' in url else url + '?' + token_pair return url def _concat_params(self, url, params): if not params or not len(params): return url url = url + '?' if '?' not in url else url for param_key in params: param_value = params[param_key] if url[-1] == '?': url += '%s=%s' % (param_key, param_value) else: url += '&%s=%s' % (param_key, param_value) return url def send_request(self, **message_kwargs): """ Responsible for actual sending of data to the connection httpapi base plugin. :param message_kwargs: A formatted dictionary containing request info: url, data, method :return: Status code and response data. """ url = message_kwargs.get('url', '/') if self.get_access_token() is not None: url = self._concat_token(message_kwargs.get('url', '/')) data = message_kwargs.get('data', '') method = message_kwargs.get('method', 'GET') params = message_kwargs.get('params', {}) url = self._concat_params(url, params) self.log('send request: METHOD:%s URL:%s DATA:%s' % (method, url, data)) try: response, response_data = self.connection.send(url, data, method=method) json_formatted = to_text(response_data.getvalue()) return response.status, json_formatted except Exception as err: raise Exception(err) def update_system_version(self): """ retrieve the system status of fortigate device """ url = '/api/v2/cmdb/system/interface?vdom=root&action=schema' status, result = self.send_request(url=url) self.log('update sys ver: ' + str(status) + ' len=' + str(len(to_text(result)))) result_json = json.loads(result) self._system_version = result_json.get('version', 'undefined') self.log('system version: %s' % (self._system_version)) self.log('ansible version: %s' % (self._ansible_fos_version)) def get_system_version(self): if not self._system_version: raise Exception('Wrong calling stack, httpapi must login!') system_version_words = self._system_version.split('.') ansible_version_words = self._ansible_fos_version.split('.') result = dict() result['system_version'] = self._system_version result['ansible_collection_version'] = self._ansible_fos_version + ' (galaxy: %s)' % (self._ansible_galaxy_version) result['matched'] = system_version_words[0] == ansible_version_words[0] and system_version_words[1] == ansible_version_words[1] if not result['matched']: result['message'] = 'Please follow steps in FortiOS versioning notes: https://ansible-galaxy-fortios-docs.readthedocs.io/en/latest/version.html' else: result['message'] = 'versions match' return result
nilq/baby-python
python
# =============================================================================== # # # # This file has been generated automatically!! Do not change this manually! # # # # =============================================================================== # from __future__ import annotations from pydantic import Field from ..base_object import BaseObject class ReorderInstalledStickerSets(BaseObject): """ Changes the order of installed sticker sets :param is_masks: Pass true to change the order of mask sticker sets; pass false to change the order of ordinary sticker sets :type is_masks: :class:`bool` :param sticker_set_ids: Identifiers of installed sticker sets in the new correct order :type sticker_set_ids: :class:`list[int]` """ ID: str = Field("reorderInstalledStickerSets", alias="@type") is_masks: bool sticker_set_ids: list[int] @staticmethod def read(q: dict) -> ReorderInstalledStickerSets: return ReorderInstalledStickerSets.construct(**q)
nilq/baby-python
python
from ..container import container from ..parallel import rank0_obj import logging logger: logging.Logger = rank0_obj(container.get(logging.Logger)) # logger: logging.Logger = container.get(logging.Logger)
nilq/baby-python
python
from django.contrib import admin from personal.models import ToDo from .to_do import ToDoAdmin admin.site.register(ToDo, ToDoAdmin)
nilq/baby-python
python
from swagger_server.models.beacon_concept import BeaconConcept # noqa: E501 from swagger_server.models.beacon_concept_with_details import BeaconConceptWithDetails # noqa: E501 from swagger_server.models.beacon_concept_detail import BeaconConceptDetail from swagger_server.models.exact_match_response import ExactMatchResponse # noqa: E501 from beacon_controller import biolink_model as blm from beacon_controller.providers import rhea from beacon_controller.providers.xrefs import get_xrefs from beacon_controller.const import Category, Predicate def get_concept_details(concept_id): # noqa: E501 """get_concept_details Retrieves details for a specified concepts in the system, as specified by a (url-encoded) CURIE identifier of a concept known the given knowledge source. # noqa: E501 :param concept_id: (url-encoded) CURIE identifier of concept of interest :type concept_id: str :rtype: BeaconConceptWithDetails """ concept_id = concept_id.upper() if concept_id.startswith('EC:'): concept = rhea.get_enzyme(concept_id) if concept is None: return None _, ec_number = concept_id.split(':', 1) synonyms = concept.get('Synonyms') if isinstance(synonyms, str): synonyms = synonyms.split(';') else: synonyms = [] return BeaconConceptWithDetails( id=concept_id, uri=f'https://enzyme.expasy.org/EC/{ec_number}', name=concept.get('Name'), symbol=None, categories=[Category.protein.name], description=None, synonyms=synonyms, exact_matches=[], details=[] ) elif concept_id.startswith('RHEA:'): records = rhea.get_records(f""" PREFIX rh:<http://rdf.rhea-db.org/> SELECT ?equation ?reaction WHERE {{ ?reaction rh:accession "{concept_id}" . ?reaction rh:equation ?equation . }} LIMIT 1 """) for record in records: return BeaconConceptWithDetails( id=concept_id, uri=record['reaction']['value'], name=record['equation']['value'], symbol=None, categories=[Category.molecular_activity.name], description=None, synonyms=[], exact_matches=[], details=[] ) else: records = rhea.get_records(f""" PREFIX rh:<http://rdf.rhea-db.org/> SELECT ?compoundAc ?chebi (count(distinct ?reaction) as ?reactionCount) ?compoundName WHERE {{ ?reaction rdfs:subClassOf rh:Reaction . ?reaction rh:status rh:Approved . ?reaction rh:side ?reactionSide . ?reactionSide rh:contains ?participant . ?participant rh:compound ?compound . OPTIONAL {{ ?compound rh:chebi ?chebi . }} ?compound rh:name ?compoundName . ?compound rh:accession "{concept_id}" . }} LIMIT 1 """) try: uri = record['chebi']['value'] except: uri = None for record in records: return BeaconConceptWithDetails( id=concept_id, uri=uri, name=record['compoundName']['value'], symbol=None, categories=[Category.chemical_substance.name], description=None, synonyms=[], exact_matches=[], details=[BeaconConceptDetail(tag='reactionCount', value=record['reactionCount']['value'])] ) def get_concepts(keywords=None, categories=None, offset=None, size=None): # noqa: E501 """get_concepts Retrieves a list of whose concept in the beacon knowledge base with names and/or synonyms matching a set of keywords or substrings. The results returned should generally be returned in order of the quality of the match, that is, the highest ranked concepts should exactly match the most keywords, in the same order as the keywords were given. Lower quality hits with fewer keyword matches or out-of-order keyword matches, should be returned lower in the list. # noqa: E501 :param keywords: (Optional) array of keywords or substrings against which to match concept names and synonyms :type keywords: List[str] :param categories: (Optional) array set of concept categories - specified as Biolink name labels codes gene, pathway, etc. - to which to constrain concepts matched by the main keyword search (see [Biolink Model](https://biolink.github.io/biolink-model) for the full list of terms) :type categories: List[str] :param offset: offset (cursor position) to next batch of statements of amount &#39;size&#39; to return. :type offset: int :param size: maximum number of concept entries requested by the client; if this argument is omitted, then the query is expected to returned all the available data for the query :type size: int :rtype: List[BeaconConcept] """ if size is None: size = 10 concepts = [] if categories is None or any(a in categories for a in blm.ancestors(Category.protein.name)): enzymes, total_num_rows = rhea.find_enzymes(keywords, offset, size, metadata=True) for enzyme in enzymes: concepts.append(BeaconConcept( id=f'EC:{enzyme.get("ID")}', name=enzyme.get('Name'), categories=[Category.protein.name], description=None )) if size is not None and len(concepts) < size: offset = max(0, offset - total_num_rows) if offset is not None else None size = size - len(concepts) if size is not None else None elif size is not None and len(concepts) >= size: return concepts if categories is None or any(a in categories for a in blm.ancestors(Category.chemical_substance.name)): compounds = rhea.find_compounds(keywords, offset=offset, limit=size) for compound in compounds: concepts.append(BeaconConcept( id=compound.get('compoundAc').get('value'), name=compound.get('compoundName').get('value'), categories=[Category.chemical_substance.name], description=None )) return concepts def get_exact_matches_to_concept_list(c): # noqa: E501 """get_exact_matches_to_concept_list Given an input array of [CURIE](https://www.w3.org/TR/curie/) identifiers of known exactly matched concepts [*sensa*-SKOS](http://www.w3.org/2004/02/skos/core#exactMatch), retrieves the list of [CURIE](https://www.w3.org/TR/curie/) identifiers of additional concepts that are deemed by the given knowledge source to be exact matches to one or more of the input concepts **plus** whichever concept identifiers from the input list were specifically matched to these additional concepts, thus giving the whole known set of equivalent concepts known to this particular knowledge source. If an empty set is returned, the it can be assumed that the given knowledge source does not know of any new equivalent concepts matching the input set. The caller of this endpoint can then decide whether or not to treat its input identifiers as its own equivalent set. # noqa: E501 :param c: an array set of [CURIE-encoded](https://www.w3.org/TR/curie/) identifiers of concepts thought to be exactly matching concepts, to be used in a search for additional exactly matching concepts [*sensa*-SKOS](http://www.w3.org/2004/02/skos/core#exactMatch). :type c: List[str] :rtype: List[ExactMatchResponse] """ results = [] for conceptId in c: if ':' not in conceptId: continue xrefs = get_xrefs(conceptId) if xrefs != []: results.append(ExactMatchResponse( id=conceptId, within_domain=True, has_exact_matches=xrefs )) else: results.append(ExactMatchResponse( id=conceptId, within_domain=False, has_exact_matches=[] )) return results
nilq/baby-python
python
import pyautogui as pt import pyperclip as pc from pynput.mouse import Controller, Button from time import sleep from whatsapp_responses import response #Mause click workaround for MAc os mouse = Controller() #Instruction for our whatsapp Bot class WhatsApp: #define the starting values def __init__(self, speed=.5, click_speed=.3): self.speed = speed self.click_speed = click_speed self.message = "" self.last_message = "" #Navigate tot the green dots for new messages def nav_green_dot(self): try: position = pt.locateOnScreen('green_dot.png', confidence=.7) print(position) pt.moveTo(position[0:2], duration =self.speed) pt.moveRel(-100, 0, duration= self.speed) pt.doubleClick(interval=self.click_speed) except Exception as e: print('Exception (nav_green_dot): ', e) #Naviagte to our message input box def nav_input_box(self): try: position = pt.locateOnScreen('paperclip.png', confidence=.7) pt.moveTo(position[0:2], duration =self.speed) pt.moveRel(100, 10, duration= self.speed) pt.doubleClick(interval=self.click_speed) except Exception as e: print('Exception (nav_input_box): ', e) #Navigate to the messag we want to respond to def nav_message(self): try: position = pt.locateOnScreen('paperclip.png', confidence=.7) pt.moveTo(position[0:2], duration =self.speed) pt.moveRel(35, -50, duration= self.speed) except Exception as e: print('Exception (nav_message): ', e) #copies the message that we want to proceed def get_message(self): mouse.click(Button.left, 3) sleep(self.speed) mouse.click(Button.right, 1) sleep(self.speed) pt.moveRel(10, 10, duration=self.speed) mouse.click(Button.left, 1) sleep(1) self.message = pc.paste() print('User says: ', self.message) # send the message to the user def send_message(self): try: #Checks whether the last message was the same if self.message != self.last_message : bot_response = response(self.message) print('You say: ', bot_response) pt.typewrite(bot_response, interval=.1) pt.typewrite('\n') #sends the message (disable while testing) #assign then the last message self.last_message = self.message else: print('No new message...') except Exception as e: print('Exception (send_message): ', e) #close the response box def nav_x(self): try: position = pt.locateOnScreen('paperclip.png', confidence=.7) position = pt.locateOnScreen('x.png', confidence=.7) pt.moveTo(position[0:2], duration =self.speed) pt.moveRel(3, 10, duration= self.speed) mouse.click(Button.left, 1) except Exception as e: print('Exception (nav_x): ', e) wa_bot = WhatsApp(speed=.5, click_speed=.4) sleep(2) while True: wa_bot.nav_green_dot() wa_bot.nav_x() wa_bot.nav_message() wa_bot.get_message() wa_bot.nav_input_box() wa_bot.send_message() sleep(20)
nilq/baby-python
python
import json import logging import os import uuid from datetime import datetime, timedelta import boto3 import telegram from telegram import InlineKeyboardMarkup, InlineKeyboardButton logger = logging.getLogger() if logger.handlers: for handler in logger.handlers: logger.removeHandler(handler) logging.basicConfig(level=logging.INFO) s3 = boto3.client("s3") OK_RESPONSE = { "statusCode": 200, "headers": {"Content-Type": "application/json"}, "body": json.dumps("ok"), } ERROR_RESPONSE = {"statusCode": 400, "body": json.dumps("Oops, something went wrong!")} BOT_USERMAME = os.environ.get("BOT_USERMAME") def configure_telegram(): """ Configures the bot with a Telegram Token. Returns a bot instance. """ telegram_token = os.environ.get("TELEGRAM_TOKEN") if not telegram_token: logger.error("The TELEGRAM_TOKEN must be set") raise NotImplementedError return telegram.Bot(telegram_token) bot = configure_telegram() def handler(event, context): logger.info(f"Event: {event}") if event.get("httpMethod") == "POST" and event.get("body"): update = telegram.Update.de_json(json.loads(event.get("body")), bot) chat_id = update.effective_message.chat.id if update.effective_message else None text = update.effective_message.text attachment = update.effective_message.effective_attachment if text in ["/start", f"/start@{BOT_USERMAME}"]: bot.send_message(chat_id=chat_id, text="Beep boop I'm under construction!") elif attachment: bot.send_message(chat_id=chat_id, text="Processing...") file_name = uuid.uuid4() file_path = f"/tmp/{file_name}.mov" attachment_file = bot.get_file(attachment.file_id) attachment_file.download(file_path) with open(file_path, "rb") as reader: os.remove(file_path) s3.put_object( Bucket=os.environ["INPUT_BUCKET_NAME"], Key=f"{file_name}.mov", Body=reader.read(), Expires=datetime.now() + timedelta(hours=1), Metadata={ "chat-id": str(chat_id), "input-format": "mov", "target-format": "mp4", }, ) return OK_RESPONSE return ERROR_RESPONSE def on_convert(event, context): logger.info(f"Event: {event}") if "Records" not in event: logger.info("Not a S3 invocation") return for record in event["Records"]: if "s3" not in record: logger.info("Not a S3 invocation") continue bucket = record["s3"]["bucket"]["name"] key = record["s3"]["object"]["key"] if bucket != os.environ["OUTPUT_BUCKET_NAME"]: logger.info("Not an output bucket invocation") continue s3_object = s3.get_object(Bucket=bucket, Key=key) chat_id = s3_object["Metadata"].get("chat-id") bot.send_message( chat_id=chat_id, text=f"https://{bucket}.s3.amazonaws.com/{key}" ) def set_webhook(event, context): """ Sets the Telegram bot webhook. """ host = event.get("headers").get("Host") stage = event.get("requestContext").get("stage") url = f"https://{host}/{stage}/" webhook = bot.set_webhook(url) if webhook: return OK_RESPONSE return ERROR_RESPONSE def build_inline_keyboard(file_name: str, file_extension: str) -> InlineKeyboardMarkup: keyboard = [] formats = [] for format_name in formats: callback_data = f'{file_name}-{file_extension}__{format_name}' keyboard.append(InlineKeyboardButton(format_name, callback_data=callback_data)) return InlineKeyboardMarkup(keyboard)
nilq/baby-python
python
import pandas as pd from IPython import embed import sys if __name__ == '__main__': if len(sys.argv) < 2: print('please input params: <tnse.csv> file') exit(1) path = sys.argv[1] df = pd.read_csv(path) filtered_df = [] i=0 for idx, r in df.iterrows(): if(r['domain']=='askubuntu' and i < 700): i+=1 filtered_df.append(r) elif(r['domain']!='askubuntu'): filtered_df.append(r) filtered_df = pd.DataFrame(filtered_df) filtered_df.to_csv(path+"_filtered") # embed()
nilq/baby-python
python
class TimePattern(object): def __str__(self): raise NotImplementedError('Please implement __str__ function') class SimpleTimer(TimePattern): def __init__(self, seconds=0, minutes=0, hours=0): self.seconds = seconds self.minutes = minutes self.hours = hours def __str__(self): total_seconds = self.hours * 3600 + self.minutes * 60 + self.seconds return 'PT{0:0>2}:{1:0>2}:{2:0>2}'.format(total_seconds // 3600, total_seconds % 3600 // 60, total_seconds % 60)
nilq/baby-python
python
from setuptools import setup, find_packages setup( name = "cascade", version = "0.1", packages = find_packages(), install_requires = ['progressbar', 'imaplib2'], author = "Oz Akan", author_email = "code@akan.me", description = "Cascade copies e-mails between IMAP servers", license = "Apache Version 2.o", url = "https://github.com/humankeyboard/cascade", classifiers = [ "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7" ], entry_points = { 'console_scripts' : [ 'cascade = cmd.app:app' ] } )
nilq/baby-python
python
# Fichier permettant de moduler les differentes methodes de clustering try: # Import generaux import numpy as np import pylab import sys import platform import matplotlib.pyplot as plt import re # Import locaux import kmeans import rkde except: exit(1) """ Clustering """ # Clusterise les donnees avec la methode desiree # Entree : # - M : la matrice des distances entre les objets # - methode : une chaine de caractere donnant le nom de la methode (nom de module) # - params : une liste des parametres requis pour la methode demandee # - kmeans : params = [k, n_iter] # - rkde : params = [bandwidth, prob] # Sortie : # - assign : un tableau donnant pour chaque entier (objet) son numero de cluster # - nb_cluster : le nombre de clusters formes def make_clusters(M, methode, params): function = methode + ".do" assign, nb_clusters = eval(function)(M, params[0], params[1]) return assign, nb_clusters """ Lecture et affichage de donnees """ # Fonction de lecture dans un fichier # Entree : # - file_name : une chaine de caracteres donnant le nom du fichier a ouvrir # - nb_item : nombre de lignes a lire (-1 pour tout lire, defaut a -1) # Sortie : # - data : une liste de liste de flottants def read_data(file_name, nb_item = -1): f = open(file_name,'r') data = [] cpt = 0 for line in f: if (0 <= nb_item and nb_item <= cpt): break line = re.split('\s+', line) # '\s' matches whitespace characters line = [float(x) for x in line if x != ''] data.append(line) cpt += 1 f.close() return data # Fonction d'affichage d'un nuage de points # Entree : # - data : un ensemble de points sous la forme d'une matrice de taille n*2 # - assign : un tableau de taille n representant une assignation de [data] def show(data, assign): colors = "bgrcmyk" symbols = ".ov18sp*h+xD_" nb_clusters = max(assign) + 1 pylab.figure() mini = min( min(data[:][0]), min(data[:][1]) ) maxi = max( max(data[i][0]), max(data[i][1]) ) pylab.xlim([mini, maxi]) pylab.ylim([mini, maxi]) if (nb_clusters < 8): for i_k in range(nb_clusters): pylab.plot([data[i][0] for i in range(len(data)) if assign[i] == i_k], [data[i][1] for i in range(len(data)) if assign[i] == i_k], colors[i_k] + ".") else: for i_k in range(nb_clusters): pylab.plot( [data[i][0] for i in range(len(data)) if assign[i] == i_k], [data[i][1] for i in range(len(data)) if assign[i] == i_k], colors[i_k % 7]) + symbols[int(i_k / 7)] pylab.show() """ Lecture et ecriture d'une assignation """ # Lis un fichier ou est inscrit une assignation. # Entree : # - file : adresse et nom du fichier # Sortie : # - assign : un vecteur numpy d'entiers def read_assign(file_name): f = open(file_name,'r') assign_tmp = [] i = 0 for line in f: try: assign_tmp.append(int(line)) i = i + 1 except ValueError: continue f.close() return np.array(assign_tmp) # Ecris une assignation dans un fichier # Entree : # - file_name : adresse et nom d'un fichier # - assign : l'assignation a ecrire # - nb_iter : le nombre d'iterations faites par l'algorithme (-1) s'il n'est pas # base sur ce principe # - s : la seed utilisee pour le clustering def write_cluster(file_name, assign, nb_iter, s): nb_data = len(assign) nb_cluster = max(assign) + 1 f = open(file_name, 'w') f.write('nb_cluster = ' + str(nb_cluster) + '\n') f.write('nb_iter = ' + str(nb_iter) + '\n') f.write('nb_data = ' + str(nb_data) + '\n') f.write('seed = ' + str(s) + '\n') for i in assign: f.write(str(i) + '\n') f.close() """ Fonctions non encore retravaillees """ # Fonction pour enregistrer des images : # data_file = fichier contenant les donnees # assign_file = fichier cree a partir du clustering et contenant la table d'assignation # file_figure = nom du fichier dans lequel sera enregistre l'image # format = nom de l'extention du fichier cree (pdf,svg,png...) # exemple : save('cercles/cercles.txt', 'cercles_kmeans', 'figure_cercles_kmeans', 'pdf') def save(data_file, assign_file,file_figure,format): data = read_data(data_file) assign = read_assign(data,assign_file) nombre_clusters = numpy.amax(assign) +1 plt.ioff() fig = plt.figure() colors = "bgrcmyk" symbols = ".ov18sp*h+xD_" mini = min( min([data[i][0] for i in range(len(data))]), min([data[i][1] for i in range(len(data))]) ) maxi = max( max([data[i][0] for i in range(len(data))]), max([data[i][1] for i in range(len(data))]) ) plt.xlim([mini, maxi]) plt.ylim([mini, maxi]) if (nombre_clusters < 8): for i_k in range(nombre_clusters): plt.plot([data[i][0] for i in range(len(data)) if assign[i] == i_k], [data[i][1] for i in range(len(data)) if assign[i] == i_k], colors[i_k] + ".") else: if (nombre_clusters < 85): for i_k in range(nombre_clusters): plt.plot( [data[i][0] for i in range(len(data)) if assign[i] == i_k], [data[i][1] for i in range(len(data)) if assign[i] == i_k], colors[i_k % 7] + symbols[int(i_k / 7)] ) else: print("too many clusters") if (platform.system() == "Windows"): plt.savefig('C:/users/alex/documents/Alex/Cours/ENS/M1_Cours/Projet/data/Results/'+file_figure+'.'+format) else: plt.savefig('../data/Results/'+file_figure+'.'+format) plt.close(fig)
nilq/baby-python
python
from flask_wtf import FlaskForm from wtforms import StringField, IntegerField, PasswordField from wtforms import HiddenField, TextAreaField, FileField, SubmitField from wtforms.validators import DataRequired, NumberRange ATTR_DATA = 'data' ATTR_ACTION = 'action' ATTR_MANAGER = 'manager' ATTR_KEY_LEN = 'length' ATTR_KEY_E = 'keyE' ATTR_KEY_D = 'keyD' ATTR_KEY_N = 'keyN' ACTION_GEN_KEY = 'gen_key' ACTION_SET_KEY = 'set_key' ACTION_GO = 'go' ACTION_POST = 'post' class AuthForm(FlaskForm): login = StringField('Логин', validators=[DataRequired()]) passw = PasswordField('Пароль', validators=[DataRequired()]) submit = SubmitField('Войти') class KeyGenForm(FlaskForm): action = HiddenField(render_kw={'value': ACTION_GEN_KEY}) length = IntegerField('Длина ключа', validators=[DataRequired()]) submit = SubmitField('Сгенерировать') class KeySetForm(FlaskForm): action = HiddenField(render_kw={'value': ACTION_SET_KEY}) keyE = IntegerField('E', validators=[DataRequired(), NumberRange()]) keyD = IntegerField('D', validators=[DataRequired(), NumberRange()]) keyN = IntegerField('N', validators=[DataRequired(), NumberRange()]) submit = SubmitField('Установить') class MessageForm(FlaskForm): action = HiddenField(render_kw={'value': ACTION_GO}) message = TextAreaField() inFile = FileField() submit = SubmitField('Зашифровать')
nilq/baby-python
python
# coding=utf-8 import random from common import constant from common import errcode from dao.sms.sms_dao import SmsDao from handlers.base.base_handler import BaseHandler from mycelery.tasks import send_sms_task class SmsChangePhoneHandler(BaseHandler): methods = ['POST'] def __init__(self): expect_request_para = { "phone": None, "common_param": None, } need_para = ( "phone", "common_param", ) super(SmsChangePhoneHandler, self).__init__(expect_request_para, need_para) # 特殊控制,此接口sid可以为空 self.sid_control_level = constant.SID_CAN_BE_NULL def _process_imp(self): # 保存到数据库 code = random.randint(1000, 9999) SmsDao.insert(self.para_map["phone"], code, constant.SMS_CHANGE_PHONE) # 发送短信 send_sms_task.send_change_phone_sms.delay(self.para_map["phone"], code) self.ret_code = errcode.NO_ERROR self.ret_msg = 'ok' return
nilq/baby-python
python
# Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution: def addTwoNumbers(self, l1, l2): """ :type l1: ListNode :type l2: ListNode :rtype: ListNode """ l1_unlinked = [] l2_unlinked = [] while l1 != None: l1_unlinked.append(l1.val) l1 = l1.next while l2 != None: l2_unlinked.append(l2.val) l2 = l2.next l1_unlinked.reverse() l2_unlinked.reverse() l1_comb = int(''.join(map(str, l1_unlinked))) l2_comb = int(''.join(map(str, l2_unlinked))) add_two = l1_comb + l2_comb add_two_list = [int(i) for i in str(add_two)] add_two_list.reverse() return add_two_list
nilq/baby-python
python
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/labelbox/1_CocoExporter.ipynb (unless otherwise specified). __all__ = ['UnknownFormatError', 'coco_from_json', 'make_coco_metadata', 'add_label', 'append_polygons_as_annotations', 'label_to_polygons', 'LOGGER'] # Cell """ Module for converting labelbox.com JSON exports to MS COCO format. """ # https://raw.githubusercontent.com/Labelbox/Labelbox/master/exporters/coco-exporter/coco_exporter.py # Cell import datetime as dt import json import logging from typing import Any, Dict from PIL import Image import requests from shapely import wkt from shapely.geometry import Polygon # Cell class UnknownFormatError(Exception): """Exception raised for unknown label_format""" def __init__(self, label_format): Exception.__init__(self) self.message = "Provided label_format '{}' is unsupported".format(label_format) LOGGER = logging.getLogger(__name__) def coco_from_json(labeled_data, coco_output, label_format='XY'): "Writes labelbox JSON export into MS COCO format." # read labelbox JSON output with open(labeled_data, 'r') as file_handle: label_data = json.loads(file_handle.read()) # setup COCO dataset container and info coco = make_coco_metadata(label_data[0]['Project Name'], label_data[0]['Created By'],) for data in label_data: # Download and get image name try: add_label(coco, data['ID'], data['Labeled Data'], data['Label'], label_format) except requests.exceptions.MissingSchema as exc: LOGGER.warning(exc) continue except requests.exceptions.ConnectionError: LOGGER.warning('Failed to fetch image from %s, skipping', data['Labeled Data']) continue with open(coco_output, 'w+') as file_handle: file_handle.write(json.dumps(coco)) def make_coco_metadata(project_name: str, created_by: str) -> Dict[str, Any]: """Initializes COCO export data structure. Args: project_name: name of the project created_by: email of the project creator Returns: The COCO export represented as a dictionary. """ return { 'info': { 'year': dt.datetime.now(dt.timezone.utc).year, 'version': None, 'description': project_name, 'contributor': created_by, 'url': 'labelbox.com', 'date_created': dt.datetime.now(dt.timezone.utc).isoformat() }, 'images': [], 'annotations': [], 'licenses': [], 'categories': [] } def add_label( coco: Dict[str, Any], label_id: str, image_url: str, labels: Dict[str, Any], label_format: str): """Incrementally updates COCO export data structure with a new label. Args: coco: The current COCO export, will be incrementally updated by this method. label_id: ID for the instance to write image_url: URL to download image file from labels: Labelbox formatted labels to use for generating annotation label_format: Format of the labeled data. Valid options are: "WKT" and "XY", default is "XY". Returns: The updated COCO export represented as a dictionary. """ image = { "id": label_id, "file_name": image_url, "license": None, "flickr_url": image_url, "coco_url": image_url, "date_captured": None, } response = requests.get(image_url, stream=True, timeout=10.0) response.raw.decode_content = True image['width'], image['height'] = Image.open(response.raw).size coco['images'].append(image) # remove classification labels (Skip, etc...) if not callable(getattr(labels, 'keys', None)): return # convert label to COCO Polygon format for category_name, label_data in labels.items(): try: # check if label category exists in 'categories' field category_id = [c['id'] for c in coco['categories'] if c['supercategory'] == category_name][0] except IndexError: category_id = len(coco['categories']) + 1 category = { 'supercategory': category_name, 'id': category_id, 'name': category_name } coco['categories'].append(category) polygons = label_to_polygons(label_format, label_data) append_polygons_as_annotations(coco, image, category_id, polygons) def append_polygons_as_annotations(coco, image, category_id, polygons): "Adds `polygons` as annotations in the `coco` export" for polygon in polygons: segmentation = [] for x_val, y_val in polygon.exterior.coords: segmentation.extend([x_val, y_val]) annotation = { "id": len(coco['annotations']) + 1, "image_id": image['id'], "category_id": category_id, "segmentation": [segmentation], "area": polygon.area, # float "bbox": [polygon.bounds[0], polygon.bounds[1], polygon.bounds[2] - polygon.bounds[0], polygon.bounds[3] - polygon.bounds[1]], "iscrowd": 0 } coco['annotations'].append(annotation) def label_to_polygons(label_format, label_data): "Converts segmentation `label: String!` into polygons" if label_format == 'WKT': if isinstance(label_data, list): # V3 polygons = map(lambda x: wkt.loads(x['geometry']), label_data) else: # V2 polygons = wkt.loads(label_data) elif label_format == 'XY': polygons = [] for xy_list in label_data: if 'geometry' in xy_list: # V3 xy_list = xy_list['geometry'] # V2 and V3 if not isinstance(xy_list, list): LOGGER.warning('Could not get an point list to construct polygon, skipping') continue else: # V2, or non-list if not isinstance(xy_list, list) or not xy_list or 'x' not in xy_list[0]: # skip non xy lists LOGGER.warning('Could not get an point list to construct polygon, skipping') continue if len(xy_list) > 2: # need at least 3 points to make a polygon polygons.append(Polygon(map(lambda p: (p['x'], p['y']), xy_list))) else: exc = UnknownFormatError(label_format=label_format) LOGGER.exception(exc.message) raise exc return polygons
nilq/baby-python
python
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Custom flag type definitions.""" import gflags import numpy def DEFINE_linspace(name, default, help_string, nonempty=False, increasing=False, flag_values=gflags.FLAGS, **kwargs): # pylint: disable=invalid-name """Defines a 'linspace' flag. The flag value should be specified as <lower>,<upper>,<count>. The components are used as arguments to numpy.linspace, so they must be parsable as float, float, and int, respectively. The parsed flag value will be a 1-dimensional numpy.ndarray. Args: name: Name of the flag. default: Default value (as unparsed string), or None if flag is unset by default. help_string: Helpful description of the flag. nonempty: Indicates whether the flag value is required to be nonempty. If True, None is still an allowable default. Use gflags.MarkFlagAsRequired to disallow None. increasing: Indicates whether the flag value should be an increasing array. This is only enforced if the parsed value has >=2 elements. flag_values: The gflags.FlagValues object in which to define the flag. **kwargs: See gflags.DEFINE. """ gflags.DEFINE(_LinspaceParser(), name, default, help_string, flag_values=flag_values, **kwargs) if nonempty: # numpy.array can't be implicitly converted to a boolean. # pylint: disable=g-explicit-length-test gflags.RegisterValidator(name, lambda v: len(v) > 0, '--%s must specify a nonempty range.' % name, flag_values=flag_values) if increasing: gflags.RegisterValidator(name, lambda v: len(v) < 2 or v[-1] > v[0], '--%s must specify an increasing range.', flag_values=flag_values) class _LinspaceParser(gflags.ArgumentParser): """Parser for 'linspace' flag type.""" def Parse(self, argument): parts = argument.split(',') if len(parts) != 3: raise ValueError('Wrong number of components. Must be of the form ' '<lower>,<upper>,<count>', argument) try: lower, upper, count = float(parts[0]), float(parts[1]), int(parts[2]) except ValueError: raise ValueError('Bad value. Components must be parsable as float, ' 'float, and int, respectively', argument) return numpy.linspace(lower, upper, count) def Type(self): return numpy.ndarray
nilq/baby-python
python
""" authentication/views.py Created on Oct. 23, 2017 by Jiayao """ from __future__ import (absolute_import) from django.shortcuts import render from django.http import HttpResponseRedirect from django.urls import reverse from django.views import generic from django.contrib.auth import (authenticate, login, logout) from django.contrib.auth.decorators import login_required from account.models import (User, Tutor) from .forms import (UserForm, TutorForm, UpdateUserForm, UpdateTutorForm) class SINGUP_STATUS: NONE = 0 SUCCESS = 1 EXISTED = 2 FAILED = 3 class ProfileView(generic.TemplateView): '""Models the profile view.""' model = User template_name = 'profile.html' login_required = True def get_context_data(self, **kwargs): context = super(ProfileView, self).get_context_data(**kwargs) context['user_form'] = None context['tutor_form'] = None context['tutor_type'] = None context['tutor'] = None return context def get(self, req, *args, **kwargs): context = self.get_context_data(**kwargs) user = User.objects.get(username=req.session['username']) context['user_form'] = UpdateUserForm(prefix='user_form', instance=user) if user.tutor is not None: if user.tutor.tutor_type == Tutor.PRIVATE_TUTOR: context['tutor_type'] = 'Private' else: context['tutor_type'] = 'Contracted' context['tutor_form'] = UpdateTutorForm( prefix='tutor_form', instance=user.tutor) context['tutor'] = user.tutor return self.render_to_response(context) def post(self, req, *args, **kwargs): user = User.objects.get(username=req.session['username']) user_form = UpdateUserForm(req.POST, prefix='user_form', instance=user) if user_form.is_valid(): user_form.save() else: return render(req, 'message.html', {'message_title': 'Profile Update Failure', 'message_content': 'Please enter valid information.'}) if user.tutor is not None: tutor_form = UpdateTutorForm( req.POST, prefix='tutor_form', instance=user.tutor) # print(tutor_form) if tutor_form.is_valid(): tutor_form.save() else: print(tutor_form.errors) return render(req, 'message.html', {'message_title': 'Profile Update Failure', 'message_content': 'Please enter valid information.'}) return render(req, 'message.html', {'message_title': 'Profile', 'message_content': 'Update Successful.'}) class IndexView(generic.TemplateView): '""Models the index view.""' template_name = 'signup.html' def get_context_data(self, **kwargs): context = super(IndexView, self).get_context_data(**kwargs) context['choice'] = True context['user_form'] = None context['tutor_form'] = None context['status'] = SINGUP_STATUS.NONE context['SIGNUP_STATUS'] = SINGUP_STATUS return context class LoginView(generic.TemplateView): '""Models the login view.""' template_name = 'login.html' def get_context_data(self, **kwargs): context = super(LoginView, self).get_context_data(**kwargs) context['status'] = 1 return context def post(self, req, *args, **kwargs): user = authenticate(username=req.POST['username'], password=req.POST['password']) if user is not None: login(req, user) req.session['username'] = req.POST['username'] if user.is_staff: return HttpResponseRedirect(reverse('admin:index')) return HttpResponseRedirect(reverse('homepage')) else: return render(req, self.template_name, {'status': 0}) @login_required def logout_view(req): logout(req) req.session['username'] = None return HttpResponseRedirect(reverse('homepage')) class StudentFormView(generic.edit.CreateView): '""Models the sign-up form.""' template_name = 'signup.html' form_class = UserForm class TutorFormView(generic.edit.CreateView): '""Models the sign-up form.""' template_name = 'signup.html' form_class = TutorForm class StudentView(IndexView): def get(self, req, *args, **kwargs): context = self.get_context_data(**kwargs) context['choice'] = False context['user_form'] = UserForm(prefix='user_form') return self.render_to_response(context) def post(self, req, *args, **kwargs): context = self.get_context_data(**kwargs) form = UserForm(req.POST, prefix='user_form') if form.is_valid(): username = form.cleaned_data['username'] password = form.cleaned_data['password'] try: user = User.objects.get(username=username) context['status'] = SINGUP_STATUS.EXISTED except User.DoesNotExist: user = form.save() user.set_password(password) user.save() context['status'] = SINGUP_STATUS.SUCCESS else: context['status'] = SINGUP_STATUS.FAILED return self.render_to_response(context) class TutorView(IndexView): def get(self, req, *args, **kwargs): context = self.get_context_data(**kwargs) context['choice'] = False context['user_form'] = UserForm(prefix='user_form') context['tutor_form'] = TutorForm(prefix='tutor_form') return self.render_to_response(context) def post(self, req, *args, **kwargs): context = self.get_context_data() form = UserForm(req.POST, prefix='user_form') tutor_form = TutorForm(req.POST, prefix='tutor_form') if form.is_valid() and tutor_form.is_valid(): username = form.cleaned_data['username'] password = form.cleaned_data['password'] try: user = User.objects.get(username=username) context['status'] = SINGUP_STATUS.EXISTED except User.DoesNotExist: user = form.save() user.set_password(password) user.save() tutor_form.cleaned_data['user'] = user tutor = tutor_form.save(commit=False) tutor.user = user tutor.save() context['status'] = SINGUP_STATUS.SUCCESS else: context['status'] = SINGUP_STATUS.FAILED return self.render_to_response(context) class BothView(IndexView): '""Models the sign-up form.""' def get(self, req, *args, **kwargs): context = self.get_context_data(**kwargs) context['choice'] = False context['user_form'] = UserForm(prefix='user_form') context['tutor_form'] = TutorForm(prefix='tutor_form') return self.render_to_response(context) def post(self, req, *args, **kwargs): context = self.get_context_data() form = UserForm(req.POST, prefix='user_form') tutor_form = TutorForm(req.POST, prefix='tutor_form') if form.is_valid() and tutor_form.is_valid(): username = form.cleaned_data['username'] password = form.cleaned_data['password'] try: user = User.objects.get(username=username) context['status'] = SINGUP_STATUS.EXISTED except User.DoesNotExist: user = form.save() user.set_password(password) user.save() tutor_form.cleaned_data['user'] = user tutor = tutor_form.save(commit=False) tutor.user = user tutor.save() context['status'] = SINGUP_STATUS.SUCCESS else: context['status'] = SINGUP_STATUS.FAILED return self.render_to_response(context) PASSWORD_EMAIL_SENDER = 'noreply@hola-inc.top' PASSWORD_RESET_TOKEN_REGEX = r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$' PASSWORD_RESET_DONE_MSG = r""" We've emailed you instructions for setting your password, if an account exists with the email you entered. You should receive them shortly. If you don't receive an email, please make sure you've entered the address you registered with," and check your spam folder. """ PASSWORD_RESET_EX_MSG = r""" The password reset link was invalid, possibly because it has already been used. Please request a new password reset. """ PASSWORD_RESET_COMPLETE = """ Your password has been set. You may go ahead and login now. """
nilq/baby-python
python
""" * * Author: Juarez Paulino(coderemite) * Email: juarez.paulino@gmail.com * """ a,b,c,d=[int(input())for _ in[0]*4] print('10'[a!=d or (a<1 and d<1 and c>0)])
nilq/baby-python
python
from flask import Blueprint from flask import Response from flask import abort from flask import g from flask import jsonify from flask import request from flask import current_app from gleague.api import admin_required from gleague.api import login_required from gleague.core import db from gleague.models import Match from gleague.models import PlayerMatchRating from gleague.match_import import create_match_from_replay matches_bp = Blueprint("matches", __name__) @matches_bp.route("/", methods=["POST"]) @admin_required def create_match(): replay = request.files["file"] if replay: base_pts_diff = current_app.config.get("MATCH_BASE_PTS_DIFF", 20) create_match_from_replay(replay, base_pts_diff) return Response(status=201) return abort(400) @matches_bp.route("/<int:match_id>/ratings/", methods=["GET"]) def get_rates(match_id): if not Match.is_exists(match_id): return abort(404) steam_id = g.user.steam_id if g.user else None ratings = PlayerMatchRating.get_match_ratings(match_id, steam_id) return jsonify({"ratings": ratings}), 200 @matches_bp.route( "/<int:match_id>/ratings/<int:player_match_stats_id>", methods=["POST"] ) @login_required def rate_player(match_id, player_match_stats_id): rating = request.args.get("rating", None) try: rating = int(rating) except Exception: return abort(400) match = Match.query.get(match_id) if not match: return abort(404) if rating not in range(1, 6): return abort(406) if not match.is_played(g.user.steam_id): return abort(403) db.session.add( PlayerMatchRating( player_match_stats_id=player_match_stats_id, rating=rating, rated_by_steam_id=g.user.steam_id, ) ) db.session.flush() return Response(status=200)
nilq/baby-python
python
# Notes: copied inspect.py, dis.py, and opcodes.py into Jython dir (replacing stub inspect.py) # Opcode will not work as using JVM, but required by dis.py, which was required by inspect.py # only want functionality of getting source lines. # Also copied textwrap.py? # support for using tk import java.io from java.lang import Class, Runnable, Thread import javax.swing.filechooser from javax.swing import SwingUtilities, SwingConstants, \ AbstractAction, BorderFactory, Box, BoxLayout, ImageIcon, \ JDialog, JFrame, JScrollPane, JPanel, JComponent, JSplitPane, JTabbedPane, \ JColorChooser, JOptionPane, JFileChooser, \ JTextArea, JTextField, JLabel, JPasswordField, JEditorPane, JTextPane, \ JButton, JCheckBox, \ JMenuItem, JCheckBoxMenuItem, JMenuBar, JMenu, JPopupMenu, KeyStroke, \ JTree, \ JComboBox, DefaultComboBoxModel, \ JTable, \ JList, ListSelectionModel, DefaultListCellRenderer, DefaultListModel, \ JSlider, \ TransferHandler from javax.swing.table import DefaultTableModel, DefaultTableCellRenderer from javax.swing.event import ChangeListener, TreeSelectionListener, ListSelectionListener, HyperlinkEvent, TableModelListener from java.awt.event import ActionListener, MouseAdapter, MouseMotionAdapter, MouseEvent, WindowFocusListener, MouseListener, KeyAdapter, KeyEvent from javax.swing.text.html import HTMLEditorKit, FormView, HTML from javax.swing.text import StyleConstants from javax.swing.tree import DefaultMutableTreeNode, DefaultTreeModel, DefaultTreeCellRenderer, TreePath from javax.swing.border import BevelBorder from java.awt import Color, Cursor, BorderLayout, FlowLayout, Font, Dimension, Rectangle, Component, Polygon, Point, GridLayout, GridBagLayout, BasicStroke, Toolkit from pawt import GridBag from java.awt.datatransfer import DataFlavor, Transferable from java.awt.dnd import DropTarget, DnDConstants, DropTargetAdapter, DragSourceListener, \ DragGestureListener, DragSource, DragSourceAdapter from java.awt.image import BufferedImage import os, os.path ############# useful classes that are not Swing specific ######### class Point: def __init__(self, x, y): self.x = x self.y = y def __repr__(self): return "Point(%s, %s)" % (self.x, self.y) class MouseEvent: def __init__(self, isMeta, eventName, downPosition, previousPosition, currentPosition, upPosition): self.isMeta = isMeta self.eventName = eventName self.downPosition = downPosition self.previousPosition = previousPosition self.currentPosition = currentPosition self.upPosition = upPosition # Support for late binding calls to methods for PrototypeMethod references passed to Swing # This means methods edited in the system will have their changes called by Swing widgets # even if they are edited after the widget was created # Example: # Instead of: self.widget.bind("<ButtonRelease-1>", self.OnMouseEventButton1Up) # Use: self.widget.bind("<ButtonRelease-1>", LateBindWrapper(self, "OnMouseEventButton1Up")) class LateBindWrapper: def __init__(self, receiver, methodName, methodIsOptional=0, extraArg=None): self.receiver = receiver self.methodName = methodName self.methodIsOptional = methodIsOptional self.extraArg = extraArg def __call__(self, *args, **kwargs): if not self.receiver.hasProperty(self.methodName): if not self.methodIsOptional: raise AttributeError, self.methodName return None function = getattr(self.receiver, self.methodName) if self.extraArg: return function(self.extraArg, *args, **kwargs) else: return function(*args, **kwargs) # used to provide "components" attribute in Morph for PythonCard compatability. class IndirectAttributeAccessor: def __init__(self, receiver, methodName): self._receiver = receiver self._methodName = methodName def __getattr__(self, name): function = getattr(self._receiver, self._methodName) result = function(name) if result == None: raise AttributeError, name return result # Support for window management def WorldShouldNoLongerBeUsedAsInspector(root, world): # make sure no one still using this window as inspector for window in root.openWindows: if window.inspectorWorld == world: window.inspectorWorld = None def WindowShouldNoLongerHaveInspector(root, windowToClose, otherWindowsToClose): # make sure no inspector is still hooked to this window for window in root.openWindows: #print window, window.inspectorForViewer # PDF FIX BROKEN if window.inspectorForViewer and window.inspectorForViewer == windowToClose: #print "found window" window.inspectorForViewer = None window._updateTitle() otherWindowsToClose.append(window) def ExposeWindow(root, window): # keep exposed window at end of list root.openWindows.remove(window) root.openWindows.append(window) # close a window and related inspector (and its inspector's inspector etc.) def CloseWindow(root, window): otherWindowsToClose = [] WorldShouldNoLongerBeUsedAsInspector(root, window.world) WindowShouldNoLongerHaveInspector(root, window, otherWindowsToClose) window.world.removeMorph(window) if window in root.openWindows: root.openWindows.remove(window) if not root.openWindows: print "all windows closed -- PataPata application shutting down" root.quit() # close related inspectors if otherWindowsToClose: for otherWindow in otherWindowsToClose: CloseWindow(root, otherWindow) # debugging class WrappedOutput: def __init__(self, oldStream): self.oldStream = oldStream def write(self, text): raise "write %s" % text self.oldStream.write(text) if text == None or text == "None": raise "Stop" # for tree text compoarison # needs imporovements def MyCompare(a, b): """ ensure that things with brackets sort after text """ if type(a) in (str, unicode): aText = a else: aText = a.GetText() if type(b) in (str, unicode): bText = b else: bText = b.GetText() inheritedText = "[Inherited] " if not aText[0].isalnum() and not bText[0].isalnum(): if aText.find(inheritedText) == 0 and bText.find(inheritedText) == 0: return MyCompare(aText[len(inheritedText):], bText[len(inheritedText):]) return cmp(aText, bText) elif not aText[0].isalnum(): return 1 elif not bText[0].isalnum(): return -1 else: return cmp(aText, bText) #################################################### # utility function def GetNewText(parent, oldText="", prompt="Enter the new text", title="Text input"): # PDF FIX -- does not use title return JOptionPane.showInputDialog(parent, prompt, oldText) def ShowMessage(parent, messageText="Something happened", title="Message"): JOptionPane.showMessageDialog(parent, messageText, title, JOptionPane.PLAIN_MESSAGE) class OptionsCallbackPopupMenu: # options should be a list of (name, function, [arg1, [arg2]]) tuples def __init__(self, parent, x, y, options, world, extraOptions=None): self.world = world self.popupMenu = JPopupMenu() self.options = options self.addToMenuForOptions(options) if extraOptions: self.addToMenuForOptions(extraOptions) self.popupMenu.show(parent, x, y) def addToMenuForOptions(self, options, menu=None): if not menu: menu = self.popupMenu for option in options: if not option or not option[0]: menu.addSeparator() else: if type(option[1]) in [tuple, list]: # nested menu submenu = JMenu(option[0]) self.addToMenuForOptions(option[1], submenu) menu.add(submenu) else: menuItem = JMenuItem(option[0], actionPerformed=lambda event, option=option: self.OnChoice(option)) menu.add(menuItem) def OnChoice(self, option): print "OnChoice", option if len(option) == 2: option[1]() elif len(option) == 3: option[1](option[2]) elif len(option) == 4: option[1](option[2], option[3]) def BindCommonEvents(morph, subwidget=None): if subwidget: widget = subwidget else: widget = morph.widget # PDF FIX PORT """ widget.bind("<FocusIn>", LateBindWrapper(morph, "gainFocus")) widget.bind("<FocusOut>", LateBindWrapper(morph, "loseFocus")) widget.bind("<Enter>", LateBindWrapper(morph, "mouseEnter")) widget.bind("<Leave>", LateBindWrapper(morph, "mouseLeave")) $$widget.bind("<Motion>", LateBindWrapper(morph, "mouseMove")) $$widget.bind("<ButtonPress-1>", LateBindWrapper(morph, "mouseDown")) $$widget.bind("<ButtonRelease-1>", LateBindWrapper(morph, "mouseUp")) $$widget.bind("<B1-Motion>", LateBindWrapper(morph, "mouseDrag")) widget.bind("<Double-Button-1>", LateBindWrapper(morph, "mouseDoubleClick")) widget.bind("<ButtonPress-2>", LateBindWrapper(morph, "mouseMiddleDown")) widget.bind("<ButtonRelease-2>", LateBindWrapper(morph, "mouseMiddleUp")) widget.bind("<Double-Button-2>", LateBindWrapper(morph, "mouseMiddleDoubleClick")) # these three may depend on meaning of context -- maybe mouse plus another key on Mac? $$widget.bind("<ButtonPress-3>", LateBindWrapper(morph, "mouseContextDown")) $$widget.bind("<ButtonRelease-3>", LateBindWrapper(morph, "mouseContextUp")) widget.bind("<Double-Button-3>", LateBindWrapper(morph, "mouseContextDoubleClick")) """ widget.addMouseMotionListener(CallbackMouseMotionListener("", LateBindWrapper(morph, "mouseMove"), LateBindWrapper(morph, "mouseDrag"))) widget.addMouseListener(CallbackLeftMouseButtonListener("", LateBindWrapper(morph, "mouseDown"), LateBindWrapper(morph, "mouseUp"))) widget.addMouseListener(CallbackRightMouseButtonListener("", LateBindWrapper(morph, "mouseContextDown"), LateBindWrapper(morph, "mouseContextUp"))) widget.mouseEntered = LateBindWrapper(morph, "mouseEnter") widget.mouseExited = LateBindWrapper(morph, "mouseLeave") # PDF FIX UNFINISHED class MyTreeNodeWithItem(DefaultMutableTreeNode): def __init__(self, item): self.item = item self.userObject = item.GetText() self.areChildrenDefined = 0 def getChildCount(self): if not self.areChildrenDefined: self.defineChildNodes() return DefaultMutableTreeNode.getChildCount(self) def defineChildNodes(self): self.areChildrenDefined = 1 if self.item.IsExpandable(): childItems = self.item.GetSubList() for item in childItems: newNode = MyTreeNodeWithItem(item) #newNode.setParent(self) self.add(newNode) def collapsed(self, tree): self.removeAllChildren() self.areChildrenDefined = 0 tree.model.nodeStructureChanged(self) # Support for an inspector tree node class PrototypeInspectorTreeItem: #class PrototypeInspectorTreeItem(TreeWidget.TreeItem): def __init__(self, parentObject, key, childObject, inheritedFlag): self.parentObject = parentObject self.key = key self.childObject = childObject self.inheritedFlag = inheritedFlag def __str__(self): return self.GetText() def GetText(self): childObject = self.childObject extra = "" if not hasattr(childObject, "__class__"): extra = " : " + `childObject` elif not hasattr(childObject, "__dict__") and not type(childObject) in [dict, list]: extra = " : " + `childObject` elif isinstance(childObject, PrototypeClass): extra = " : <Prototype %s> %s" % (`id(childObject)`, childObject.traits) elif isinstance(childObject, PrototypeMethod): #extra = " : <PrototypeMethod %s>" % childObject.source.split("\n")[0] extra = " : <PrototypeMethod %s>" % id(childObject) else: name = "%s" % childObject.__class__ unwantedPrefix = "__main__." if name.find(unwantedPrefix) == 0: name = name[len(unwantedPrefix):] extra = " : %s" % name if len(extra) > 40: extra = extra[:40] + "..." result = "%s" % self.key + extra if self.inheritedFlag: result = "[Inherited] " + result return result def IsEditable(self): return 0 def SetText(self, text): pass def GetIconName(self): if not self.IsExpandable(): return "python" # XXX wish there was a "file" icon def IsExpandable(self): childObject = self.childObject result = (hasattr(childObject, "__dict__") and not isinstance(childObject, PrototypeMethod)) or (type(childObject) in [list, dict]) return result def GetSubList(self): result = [] nonInheritedNames = None itemObject = self.childObject if type(itemObject) == dict: names = itemObject.keys() names.sort() elif type(itemObject) == list: names = range(len(itemObject)) elif isinstance(itemObject, PrototypeClass): properties = itemObject.allProperties() names = properties.keys() names.sort() nonInheritedNames = itemObject._attributes.keys() nonInheritedNames.sort() else: names = itemObject.__dict__.keys() names.sort() for key in names: if type(itemObject) in [list, dict]: childObject = itemObject[key] else: # hide the world pointer in all objects, plus other clutter if key == "world": continue elif key in ["function", "prototypeHoldingTheFunction"] and isinstance(itemObject, PrototypeMethod): continue try: childObject = getattr(itemObject, key) except AttributeError: # case where property exists, but not local or inherited print "missing property definition for ", key continue inheritedFlag = 0 if nonInheritedNames: inheritedFlag = not (key in nonInheritedNames) store = PrototypeInspectorTreeItem(itemObject, key, childObject, inheritedFlag) result.append(store) result.sort(MyCompare) return result # support function to look through children of a tree node and find a match for the key def InspectorTree_FindChildNodeWithKey(treeMorph, parentNode, key): for index in range(0, parentNode.getChildCount()): childNode = parentNode.getChildAt(index) if childNode.item.key == key: return childNode return None def InspectorTree_ScrollToAndSelectChildNodeWithKey(treeMorph, parentNode, key, collapseAndExpandParent=1): if collapseAndExpandParent: path = TreePath(parentNode.getPath()) treeMorph._tree.collapsePath(path) parentNode.collapsed(treeMorph._tree) treeMorph._tree.expandPath(path) newNode = InspectorTree_FindChildNodeWithKey(treeMorph, parentNode, key) path = TreePath(newNode.getPath()) treeMorph._tree.makeVisible(path) treeMorph._tree.setSelectionPath(path) def InspectorTree_ScrollToAndSelectNode(treeMorph, node, collapseAndExpandNode=1): if collapseAndExpandNode: if collapseAndExpandNode != "expandOnly": treeMorph._tree.collapsePath(TreePath(node.getPath())) node.collapsed(treeMorph._tree) treeMorph._tree.expandPath(TreePath(node.getPath())) path = TreePath(node.getPath()) treeMorph._tree.makeVisible(path) treeMorph._tree.setSelectionPath(path) def InspectorTree_FindChildNodeWithValue(treeMorph, parentNode, value): for index in range(0, parentNode.getChildCount()): childNode = parentNode.getChildAt(index) if childNode.item.childObject == value: return childNode return None def InspectorTree_CollapseAndExpandNode(treeMorph, node): path = TreePath(node.getPath()) treeMorph._tree.collapsePath(path) node.collapsed(treeMorph._tree) treeMorph._tree.expandPath(path) # for CallbackRunnable to be able to get None parameters class NoParamSpecified: pass class CallbackRunnable(Runnable): def __init__(self, callback, param1=NoParamSpecified, param2=NoParamSpecified): self.callback = callback self.param1 = param1 self.param2 = param2 def run(self): if self.param1 == NoParamSpecified: self.callback() else: if self.param2 == NoParamSpecified: self.callback(self.param1) else: self.callback(self.param1, self.param2) def invokeLater(self): SwingUtilities.invokeLater(self) ########## Newer def GetNativeFont(font): name = font[0] # PDF FINISH -- style not handled style = Font.PLAIN size = font[1] return Font(name, style, size) def GetWidthAndHeightForTextInFont(text, font): try: # idea from: http://today.java.net/pub/a/today/2004/04/22/images.html?page=last buffer = BufferedImage(1, 1, BufferedImage.TYPE_INT_RGB) g2 = buffer.createGraphics() # PDF IMPROVE the correspondance of hints to what is actually used #g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON) fc = g2.getFontRenderContext() bounds = font.getStringBounds(text, fc) # HAD FUNKY ERROR WITH COMMA AND getWidth: return int(bounds.geWidth()), int(bounds,getHeight()) return int(bounds.width), int(bounds.height) except: print "GetWidthAndHeightForTextInFont exception" raise def hexToColor(text): r = int(text[0:2], 16) g = int(text[2:4], 16) b = int(text[4:6], 16) return Color(r, g, b) colorsLookupDictionary = { #java colors 'white': Color.white, 'black': Color.black, 'blue': Color.blue, 'cyan': Color.cyan, 'dark gray': Color.darkGray, 'gray': Color.gray, 'grey': Color.gray, 'green': Color.green, 'light gray': Color.lightGray, 'light grey': Color.lightGray, 'magenta': Color.magenta, 'orange': Color.orange, 'pink': Color.pink, 'red': Color.red, 'yellow': Color.yellow, # other colors 'light blue': hexToColor("C0D9D9"), 'green yellow': hexToColor("93DB70"), 'medium violet red': hexToColor("DB7093"), 'medium goldenrod': hexToColor("EAEAAE"), 'plum': hexToColor("EAADEA"), 'tan': hexToColor("DB9370"), 'turquoise': hexToColor("ADEAEA"), 'spring green': hexToColor("00FF7F"), 'orange red': hexToColor("FF2400"), 'goldenrod': hexToColor("DBDB70"), 'purple': hexToColor("800080"), 'light purple': hexToColor("C000C0"), 'sienna': hexToColor("A0522D"), 'slate blue': hexToColor("007FFF"), 'sea green': hexToColor("238E68"), 'very light gray': hexToColor("CDCDCD"), 'gold': hexToColor("FFD700"), 'violet red': hexToColor("CC3299"), 'coral': hexToColor("FF7F00"), 'light steel blue': hexToColor("8F8FBD"), 'silver': hexToColor("E6E8FA"), 'dark turquoise': hexToColor("7093DB"), 'light wood': hexToColor("E9C2A6"), 'feldspar': hexToColor("D19275"), 'thistle': hexToColor("D8BFD8"), 'khaki': hexToColor("F0E68C"), 'cool copper': hexToColor("D98719"), 'firebrick': hexToColor("B22222"), 'forest green': hexToColor("238E23"), 'steel blue': hexToColor("236B8E"), } def colorFromName(name): name = name.lower() return colorsLookupDictionary[name] def colorName(color): for colorName in colorsLookupDictionary.keys(): if colorsLookupDictionary[colorName] == color: return colorName return "" def GetNativeColor(nameOrTuple): if type(nameOrTuple) == tuple: return Color(nameOrTuple[0], nameOrTuple[1], nameOrTuple[2]) else: if nameOrTuple and nameOrTuple[0] == '#': return hexToColor(nameOrTuple[1:7]) try: return colorFromName(nameOrTuple) except KeyError: # try to return a system color return Color.getColor(nameOrTuple) ########### # PDF IMPORVE __ WHEN DRAG THIS< OBJECTS DISAPEAR __ NEED TO BE KEPT COPIED AT MORPH? class MyImageCanvas(JComponent): def __init__(self, backdropImage, cursorImage): # PDF RESOLVE NAME images are actually expected to be icons... self.clearImages() self.backdropImage = backdropImage self.cursorImage = cursorImage self.doubleBuffered = 1 def paintComponent(self, g): #JComponent.paintComponent(self, g) #draw entire component with background g.setColor(self.backgroundColor) g.fillRect(0, 0, self.getWidth(), self.getHeight()) if self.backdropImage: self.backdropImage.paintIcon(self, g, 0, 0) #g.drawImage(self.backdropImage, 0, 0, self) for image, position in self.otherImagesWithPosition: image.paintIcon(self, g, position[0], position[1]) #g.drawImage(image, position[0], position[1], self) if self.cursorImage: x = self.cursorImagePosition[0] - self.cursorOriginOffset[0] y = self.cursorImagePosition[1] - self.cursorOriginOffset[1] self.cursorImage.paintIcon(self, g, x, y) #g.drawImage(self.cursorImage, x, y, self) def getPreferredSize(self): if self.backdropImage: try: return (self.backdropImage.iconWidth, self.backdropImage.iconHeight) except: print "problem" return (100, 100) def getMinimumSize(self): return self.getPreferredSize() def clearImages(self): self.backdropImage = None self.cursorImage = None self.cursorImagePosition = (0, 0) self.cursorOriginOffset = (0, 0) self.backgroundColor = Color.white # list of tuples as (image, position) self.otherImagesWithPosition = [] def addOtherImage(self, image, position): self.otherImagesWithPosition.append((image, position)) self.repaint() def clearOtherImages(self): self.otherImagesWithPosition = [] self.repaint() ############### # callbacks that check for the metaKey def IsEventMatchForFilter(event, filter): #print "IsEventMatchForFilter", filter, event modifiers = event.getModifiersExText(event.getModifiersEx()) items = modifiers.split("+") if filter == "": if "Alt" in items: return 0 if "Ctrl" in items: return 0 if "Shift" in items: return 0 return 1 elif filter == "Alt": if "Ctrl" in items: return 0 if "Shift" in items: return 0 if "Alt" in items: return 1 return 0 elif filter == "Control": if "Shift" in items: return 0 if "Alt" in items: return 0 if "Ctrl" in items: return 1 return 0 elif filter == "Shift": if "Alt" in items: return 0 if "Ctrl" in items: return 0 if "Shift" in items: return 1 return 0 elif filter == "Shift-Control": if "Alt" in items: return 0 if "Ctrl" in items and "Shift" in items: return 1 return 0 return 0 class CallbackLeftMouseButtonListener(MouseAdapter): def __init__(self, modifiersFilter, callbackOnDown, callbackOnUp): self.modifiersFilter = modifiersFilter self.callbackOnDown = callbackOnDown self.callbackOnUp = callbackOnUp def mousePressed(self, event): if self.callbackOnDown and IsEventMatchForFilter(event, self.modifiersFilter): if SwingUtilities.isLeftMouseButton(event): self.callbackOnDown(event) def mouseReleased(self, event): if self.callbackOnUp and IsEventMatchForFilter(event, self.modifiersFilter): if SwingUtilities.isLeftMouseButton(event): self.callbackOnUp(event) class CallbackRightMouseButtonListener(MouseAdapter): def __init__(self, modifiersFilter, callbackOnDown, callbackOnUp): self.modifiersFilter = modifiersFilter self.callbackOnDown = callbackOnDown self.callbackOnUp = callbackOnUp def mousePressed(self, event): if self.callbackOnDown and IsEventMatchForFilter(event, self.modifiersFilter): if SwingUtilities.isRightMouseButton(event): self.callbackOnDown(event) def mouseReleased(self, event): if self.callbackOnUp and IsEventMatchForFilter(event, self.modifiersFilter): if SwingUtilities.isRightMouseButton(event): self.callbackOnUp(event) class CallbackMouseMotionListener(MouseMotionAdapter): def __init__(self, modifiersFilter, callback, draggedCallback=None): self.modifiersFilter = modifiersFilter self.callback = callback self.draggedCallback = draggedCallback def mouseMoved(self, event): if self.callback and IsEventMatchForFilter(event, self.modifiersFilter): self.callback(event) def mouseDragged(self, event): if IsEventMatchForFilter(event, self.modifiersFilter): if self.draggedCallback: self.draggedCallback(event) else: self.callback(event) class CallbackKeyListener(KeyAdapter): def __init__(self, pressedCallback, releasedCallback): self.pressedCallback = pressedCallback self.releasedCallback = releasedCallback def keyPressed(self, event): print "CallbackKeyListener", event if self.pressedCallback: self.pressedCallback(event) def keyReleased(self, event): print "CallbackKeyListener", event if self.releasedCallback: self.releasedCallback(event) #### class FileDialog: def __init__(self, parent, title="Choose file", loadOrSave="load"): self.parent = parent self.title = title self.loadOrSave = loadOrSave def go(self, pattern="*.py", default=None): fileChooser = JFileChooser() if self.title: fileChooser.setDialogTitle(self.title) if default: fileChooser.setSelectedFile(java.io.File(default)) fileChooser.setCurrentDirectory(java.io.File(".")) if self.loadOrSave == "load": result = fileChooser.showOpenDialog(self.parent) else: result = fileChooser.showSaveDialog(self.parent) if (result == JFileChooser.APPROVE_OPTION): fileResult = None fileAndMaybeDir = fileChooser.getSelectedFile().getAbsoluteFile() if not fileAndMaybeDir.isDirectory(): fileResult = str(fileAndMaybeDir) return fileResult else: return None #### COMMON # Cursor def Common_GetCursor(widget): return widget.getCursor() def Common_SetCursor(widget, cursor): widget.setCursor(cursor) def Common_SetCursorByName(widget, cursorName): if cursorName == "normal": raise "unfinished" elif cursorName == "cross": newCursor = Cursor(Cursor.CROSSHAIR_CURSOR) else: raise "Unsupported cursor name" self.widget.setCursor(newCursor) # Image def Common_LoadImage(fileName): return ImageIcon(fileName) def Common_ImageWidth(image): return image.iconWidth def Common_ImageHeight(image): return image.iconHeight # Native Event def Common_NativeEventPositionInWindow(event): return event.x, event.y
nilq/baby-python
python
from selenium.webdriver.support.ui import Select class ContactHelper: def __init__(self, app): self.app = app def open_contact_page(self): wd = self.app.wd if not (wd.find_element_by_title("Search for any text") and wd.find_element_by_name("add")): wd.find_element_by_link_text("home page").click() def create_contact(self, contact): wd = self.app.wd self.open_contact_page() wd.find_element_by_link_text("add new").click() # add first name wd.find_element_by_name("firstname").click() wd.find_element_by_name("firstname").clear() wd.find_element_by_name("firstname").send_keys(contact.firstname) # add middle name wd.find_element_by_name("middlename").click() wd.find_element_by_name("middlename").clear() wd.find_element_by_name("middlename").send_keys(contact.middlename) # add address wd.find_element_by_name("address").click() wd.find_element_by_name("address").clear() wd.find_element_by_name("address").send_keys(contact.address) # add mobile wd.find_element_by_name("mobile").click() wd.find_element_by_name("mobile").clear() wd.find_element_by_name("mobile").send_keys(contact.mobile) # add list name wd.find_element_by_name("lastname").click() wd.find_element_by_name("lastname").clear() wd.find_element_by_name("lastname").send_keys(contact.lastname) # add nickname wd.find_element_by_name("nickname").click() wd.find_element_by_name("nickname").clear() wd.find_element_by_name("nickname").send_keys(contact.nickname) # add title wd.find_element_by_name("title").click() wd.find_element_by_name("title").clear() wd.find_element_by_name("title").send_keys(contact.title) # add company wd.find_element_by_name("company").click() wd.find_element_by_name("company").clear() wd.find_element_by_name("company").send_keys(contact.company) # add home number wd.find_element_by_name("home").click() wd.find_element_by_name("home").clear() wd.find_element_by_name("home").send_keys(contact.home) # add work humber wd.find_element_by_name("work").click() wd.find_element_by_name("work").clear() wd.find_element_by_name("work").send_keys(contact.work) # add fax number wd.find_element_by_name("fax").click() wd.find_element_by_name("fax").clear() wd.find_element_by_name("fax").send_keys(contact.fax) # add email wd.find_element_by_name("email").click() wd.find_element_by_name("email").clear() wd.find_element_by_name("email").send_keys(contact.email_1) wd.find_element_by_name("email2").click() wd.find_element_by_name("email2").clear() wd.find_element_by_name("email2").send_keys(contact.email_2) wd.find_element_by_name("email3").click() wd.find_element_by_name("email3").clear() wd.find_element_by_name("email3").send_keys(contact.email_3) # add birthday wd.find_element_by_name("bday").click() Select(wd.find_element_by_name("bday")).select_by_visible_text(contact.bday) wd.find_element_by_name("bmonth").click() Select(wd.find_element_by_name("bmonth")).select_by_visible_text(contact.bmonth) wd.find_element_by_name("byear").click() wd.find_element_by_name("byear").clear() wd.find_element_by_name("byear").send_keys(contact.byear) # add date anniversary wd.find_element_by_name("aday").click() Select(wd.find_element_by_name("aday")).select_by_visible_text(contact.aday) wd.find_element_by_name("amonth").click() Select(wd.find_element_by_name("amonth")).select_by_visible_text(contact.amonth) wd.find_element_by_name("ayear").click() wd.find_element_by_name("ayear").clear() wd.find_element_by_name("ayear").send_keys(contact.ayear) # add address 2 wd.find_element_by_name("address2").click() wd.find_element_by_name("address2").clear() wd.find_element_by_name("address2").send_keys(contact.address2) # add phone number 2 wd.find_element_by_name("phone2").click() wd.find_element_by_name("phone2").clear() wd.find_element_by_name("phone2").send_keys(contact.phone2) # add notes wd.find_element_by_name("notes").click() wd.find_element_by_name("notes").clear() wd.find_element_by_name("notes").send_keys(contact.notes) wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click() wd.find_element_by_link_text("Logout").click() def delete_first_contact(self): wd = self.app.wd self.open_contact_page() # select first group wd.find_element_by_name("selected[]").click() # submit deletion wd.find_element_by_name("DeleteSel()").click() wd.switch_to_alert().accept() wd.find_element_by_link_text("home").click() def count(self): wd = self.app.wd self.open_contact_page() return len(wd.find_element_by_link_text("home page"))
nilq/baby-python
python
def XXX(self, root: TreeNode) -> int: if root is None: return 0 m = 10 ** 5 # m为最小深度 def bfs(d, node): nonlocal m if node.left is None and node.right is None: m = min(m, d) return bfs(d + 1, node.left) if node.left else None bfs(d + 1, node.right) if node.right else None bfs(1, root) return m
nilq/baby-python
python
# Copyright 2022 The Balsa Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from balsa.util import postgres class CardEst(object): """Base class for cardinality estimators.""" def __call__(self, node, join_conds): raise NotImplementedError() class PostgresCardEst(CardEst): def __init__(self): self._cache = {} def _HashKey(self, node): """Computes a hash key based on the logical contents of 'node'. Specifically, hash on the sorted sets of table IDs and their filters. NOTE: Postgres can produce slightly different cardinality estimates when all being equal but just the FROM list ordering tables differently. Here, we ignore this slight difference. """ sorted_filters = '\n'.join(sorted(node.GetFilters())) sorted_leaves = '\n'.join(sorted(node.leaf_ids())) return sorted_leaves + sorted_filters def __call__(self, node, join_conds): key = self._HashKey(node) card = self._cache.get(key) if card is None: sql_str = node.to_sql(join_conds) card = postgres.GetCardinalityEstimateFromPg(sql=sql_str) self._cache[key] = card return card
nilq/baby-python
python
#!/usr/bin/python # Copyright 2017 Telstra Open Source # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import requests import json import pprint #For the following mn topo #mn --controller=remote,ip=172.18.0.1,port=6653 --switch ovsk,protocols=OpenFlow13 --topo torus,3,3 #h1x1 ping h3x2 url = "http://localhost/api/v1/flow" headers = {'Content-Type': 'application/json'} j_data = {"src_switch":"00:00:00:00:00:00:01:01", "src_port":1, "src_vlan":0, "dst_switch":"00:00:00:00:00:00:03:02", "dst_port":1, "dst_vlan":0, "bandwidth": 2000} result = requests.post(url, json=j_data, headers=headers) print result.text
nilq/baby-python
python
# Copyright 2020 University of Groningen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test graph related functions """ import pytest import networkx as nx import polyply from .example_fixtures import example_meta_molecule @pytest.mark.parametrize('source, max_length, min_length, expected',( (4, 1, 1, [4, 1, 9, 10]), (4, 2, 1, [4, 1, 9, 10, 0, 3]), (4, 3, 3, [0, 3, 7, 8, 2]), (0, 1, 1, [0, 1, 2]) )) def test_neighbourhood(source, max_length, min_length, expected): graph = nx.balanced_tree(r=2, h=3) neighbours = polyply.src.graph_utils.neighborhood(graph, source, max_length, min_length=min_length) assert set(neighbours) == set(expected) @pytest.mark.parametrize('edges, expected',( # simple linear ([(0, 1), (1, 2), (2, 3)], False), # simple cyclic ([(0, 1), (1, 2), (2, 3), (3, 0)], False), # simple branched ([(0, 1), (1, 2), (1, 3), (3, 4)], True), # cyclic branched ([(0, 1), (1, 2), (2, 3), (3, 0), (0, 5)], True), # no nodes ([], False) )) def test_is_branched(edges, expected): graph = nx.Graph() graph.add_edges_from(edges) result = polyply.src.graph_utils.is_branched(graph) assert result == expected @pytest.mark.parametrize('nodes, expected',( ((0, 1), [(1, 4)]), # central residue ((1, 2), [(6, 9)]), )) def test_find_connecting_edges(example_meta_molecule, nodes, expected): result = polyply.src.graph_utils.find_connecting_edges(example_meta_molecule, example_meta_molecule.molecule, nodes) assert result == expected
nilq/baby-python
python
import pandas as pd from koapy import KiwoomOpenApiContext from koapy.backend.cybos.CybosPlusComObject import CybosPlusComObject kiwoom = KiwoomOpenApiContext() cybos = CybosPlusComObject() kiwoom.EnsureConnected() cybos.EnsureConnected() kiwoom_codes = kiwoom.GetCommonCodeList() cybos_codes = cybos.GetCommonCodeList() cybos_codes = [code[1:] for code in cybos_codes] kiwoom_codes = pd.DataFrame(kiwoom_codes, columns=['code']) kiwoom_codes['kiwoom'] = 'TRUE' cybos_codes = pd.DataFrame(cybos_codes, columns=['code']) cybos_codes['cybos'] = 'TRUE' df = pd.merge(kiwoom_codes, cybos_codes, how='outer', on='code') df.to_excel('output.xlsx')
nilq/baby-python
python
xs = [1, 2]
nilq/baby-python
python
# Copyright 2010-2011 Josh Kearney # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Event-based IRC Class""" import random import re import time import urllib from pyhole import irclib from pyhole import plugin class IRC(irclib.SimpleIRCClient): """An IRC connection""" def __init__(self, config, network, log, version, conf_file): irclib.SimpleIRCClient.__init__(self) self.log = log self.version = version self.conf_file = conf_file self.admins = config.get("admins", type="list") self.command_prefix = config.get("command_prefix") self.reconnect_delay = config.get("reconnect_delay", type="int") self.rejoin_delay = config.get("rejoin_delay", type="int") self.plugin_dir = config.get("plugin_dir") self.server = network.get("server") self.password = network.get("password", default="") self.port = network.get("port", type="int", default=6667) self.ssl = network.get("ssl", type="bool", default=False) self.ipv6 = network.get("ipv6", type="bool", default=False) self.nick = network.get("nick") self.identify_password = network.get("identify_password", default="") self.channels = network.get("channels", type="list") self.addressed = False self.load_plugins() self.log.info("Connecting to %s:%d as %s" % (self.server, self.port, self.nick)) self.connect(self.server, self.port, self.nick, self.password, ssl=self.ssl, ipv6=self.ipv6) def load_plugins(self, reload_plugins=False): """Load plugins and their commands respectively""" if reload_plugins: plugin.reload_plugins(self.plugin_dir, irc=self, conf_file=self.conf_file) else: plugin.load_plugins(self.plugin_dir, irc=self, conf_file=self.conf_file) self.log.info("Loaded Plugins: %s" % active_plugins()) def run_hook_command(self, mod_name, f, arg, **kwargs): """Make a call to a plugin hook""" try: f(arg, **kwargs) if arg: self.log.debug("Calling: %s.%s(\"%s\")" % (mod_name, f.__name__, arg)) else: self.log.debug("Calling: %s.%s(None)" % (mod_name, f.__name__)) except Exception, e: self.log.error(e) def run_msg_regexp_hooks(self, message, private): """Run regexp hooks""" for mod_name, f, msg_regex in plugin.hook_get_msg_regexs(): m = re.search(msg_regex, message, re.I) if m: self.run_hook_command(mod_name, f, m, private=private, full_message=message) def run_keyword_hooks(self, message, private): """Run keyword hooks""" words = message.split(" ") for mod_name, f, kw in plugin.hook_get_keywords(): for word in words: m = re.search("^%s(.+)" % kw, word, re.I) if m: self.run_hook_command(mod_name, f, m.group(1), private=private, full_message=message) def run_command_hooks(self, message, private): """Run command hooks""" for mod_name, f, cmd in plugin.hook_get_commands(): self.addressed = False if private: m = re.search("^%s$|^%s\s(.*)$" % (cmd, cmd), message, re.I) if m: self.run_hook_command(mod_name, f, m.group(1), private=private, addressed=self.addressed, full_message=message) if message.startswith(self.command_prefix): # Strip off command prefix msg_rest = message[len(self.command_prefix):] else: # Check for command starting with nick being addressed msg_start_upper = message[:len(self.nick) + 1].upper() if msg_start_upper == self.nick.upper() + ":": # Get rest of string after "nick:" and white spaces msg_rest = re.sub("^\s+", "", message[len(self.nick) + 1:]) else: continue self.addressed = True m = re.search("^%s$|^%s\s(.*)$" % (cmd, cmd), msg_rest, re.I) if m: self.run_hook_command(mod_name, f, m.group(1), private=private, addressed=self.addressed, full_message=message) def poll_messages(self, message, private=False): """Watch for known commands""" self.addressed = False self.run_command_hooks(message, private) self.run_keyword_hooks(message, private) self.run_msg_regexp_hooks(message, private) def reply(self, msg): """Send a privmsg""" if not hasattr(msg, "encode"): try: msg = str(msg) except Exception: self.log.error("msg cannot be converted to string") return msg = msg.encode("utf-8").split("\n") # 10 is completely arbitrary for now if len(msg) > 10: msg = msg[0:8] msg.append("...") for line in msg: if self.addressed: source = self.source.split("!")[0] self.connection.privmsg(self.target, "%s: %s" % (source, line)) self.log.info("-%s- <%s> %s: %s" % (self.target, self.nick, source, line)) else: self.connection.privmsg(self.target, line) if irclib.is_channel(self.target): self.log.info("-%s- <%s> %s" % (self.target, self.nick, line)) else: self.log.info("<%s> %s" % (self.nick, line)) def privmsg(self, target, msg): """Send a privmsg""" self.connection.privmsg(target, msg) def op_user(self, params): """Op a user""" params = params.split(" ", 1) self.connection.mode(params[0], "+o %s" % params[1]) def deop_user(self, params): """De-op a user""" params = params.split(" ", 1) self.connection.mode(params[0], "-o %s" % params[1]) def set_nick(self, params): """Set IRC nick""" self.nick = params self.connection.nick(params) def join_channel(self, params): """Join a channel""" channel = params.split(" ", 1) self.reply("Joining %s" % channel[0]) if irclib.is_channel(channel[0]): self.channels.append(channel[0]) if len(channel) > 1: self.connection.join(channel[0], channel[1]) else: self.connection.join(channel[0]) def part_channel(self, params): """Part a channel""" self.channels.remove(params) self.reply("Parting %s" % params) self.connection.part(params) def fetch_url(self, url, name): """Fetch a URL""" class PyholeURLopener(urllib.FancyURLopener): version = self.version urllib._urlopener = PyholeURLopener() try: return urllib.urlopen(url) except IOError: self.reply("Unable to fetch %s data" % name) return None def on_nicknameinuse(self, connection, event): """Ensure the use of unique IRC nick""" random_int = random.randint(1, 100) self.log.info("IRC nick '%s' is currently in use" % self.nick) self.nick = "%s%d" % (self.nick, random_int) self.log.info("Setting IRC nick to '%s'" % self.nick) connection.nick("%s" % self.nick) # Try to prevent nick flooding time.sleep(1) def on_welcome(self, connection, event): """Join channels upon successful connection""" if self.identify_password: self.privmsg("NickServ", "IDENTIFY %s" % self.identify_password) for channel in self.channels: c = channel.split(" ", 1) if irclib.is_channel(c[0]): if len(c) > 1: connection.join(c[0], c[1]) else: connection.join(c[0]) def on_disconnect(self, connection, event): """Attempt to reconnect after disconnection""" self.log.info("Disconnected from %s:%d" % (self.server, self.port)) self.log.info("Reconnecting in %d seconds" % self.reconnect_delay) time.sleep(self.reconnect_delay) self.log.info("Connecting to %s:%d as %s" % (self.server, self.port, self.nick)) self.connect(self.server, self.port, self.nick, self.password, ssl=self.ssl) def on_kick(self, connection, event): """Automatically rejoin channel if kicked""" source = irclib.nm_to_n(event.source()) target = event.target() nick, reason = event.arguments() if nick == self.nick: self.log.info("-%s- kicked by %s: %s" % (target, source, reason)) self.log.info("-%s- rejoining in %d seconds" % (target, self.rejoin_delay)) time.sleep(self.rejoin_delay) connection.join(target) else: self.log.info("-%s- %s was kicked by %s: %s" % (target, nick, source, reason)) def on_invite(self, connection, event): """Join a channel upon invitation""" source = event.source().split("@", 1)[0] if source in self.admins: self.join_channel(event.arguments()[0]) def on_ctcp(self, connection, event): """Respond to CTCP events""" source = irclib.nm_to_n(event.source()) ctcp = event.arguments()[0] if ctcp == "VERSION": self.log.info("Received CTCP VERSION from %s" % source) connection.ctcp_reply(source, "VERSION %s" % self.version) elif ctcp == "PING": if len(event.arguments()) > 1: self.log.info("Received CTCP PING from %s" % source) connection.ctcp_reply(source, "PING %s" % event.arguments()[1]) def on_join(self, connection, event): """Handle joins""" target = event.target() source = irclib.nm_to_n(event.source()) self.log.info("-%s- %s joined" % (target, source)) def on_part(self, connection, event): """Handle parts""" target = event.target() source = irclib.nm_to_n(event.source()) self.log.info("-%s- %s left" % (target, source)) def on_quit(self, connection, event): """Handle quits""" source = irclib.nm_to_n(event.source()) self.log.info("%s quit" % source) def on_action(self, connection, event): """Handle IRC actions""" target = event.target() source = irclib.nm_to_n(event.source()) msg = event.arguments()[0] self.log.info(unicode("-%s- * %s %s" % (target, source, msg), "utf-8")) def on_privnotice(self, connection, event): """Handle private notices""" source = irclib.nm_to_n(event.source()) msg = event.arguments()[0] self.log.info(unicode("-%s- %s" % (source, msg), "utf-8")) def on_pubnotice(self, connection, event): """Handle public notices""" target = event.target() source = irclib.nm_to_n(event.source()) msg = event.arguments()[0] self.log.info(unicode("-%s- <%s> %s" % (target, source, msg), "utf-8")) def on_privmsg(self, connection, event): """Handle private messages""" self.source = event.source().split("@", 1)[0] self.target = irclib.nm_to_n(event.source()) msg = event.arguments()[0] if self.target != self.nick: self.log.info(unicode("<%s> %s" % (self.target, msg), "utf-8")) self.poll_messages(msg, private=True) def on_pubmsg(self, connection, event): """Handle public messages""" self.source = event.source().split("@", 1)[0] self.target = event.target() nick = irclib.nm_to_n(event.source()) msg = event.arguments()[0] self.log.info(unicode("-%s- <%s> %s" % (self.target, nick, msg), "utf-8")) self.poll_messages(msg) def active_plugins(): """List active plugins""" return ", ".join(sorted(plugin.active_plugins())) def active_commands(): """List active commands""" return ", ".join(sorted(plugin.active_commands())) def active_keywords(): """List active keywords""" return ", ".join(sorted(plugin.active_keywords()))
nilq/baby-python
python
from django import forms from django.forms import ModelForm from auctions.models import Listing, Comment, Bid, Category categories = Category.objects.all().values_list('slug_name', 'name') class CreateListing(ModelForm): name = forms.ChoiceField(choices=categories, required=False) class Meta: model = Listing fields = ['title', 'description', 'price', 'image'] class CreateComment(ModelForm): class Meta: model = Comment fields = ['comment'] class CreateBid(ModelForm): class Meta: model = Bid fields = ['price']
nilq/baby-python
python
from .utils import find_closest_equivalent, Snapshot from .find_init_weights import find_weights
nilq/baby-python
python
import schema229 import os ''' Unit tests ''' def test_resolve_ref(): schema = schema229.A229Schema(os.path.join(os.path.dirname(__file__),'..','build',"schema","ASHRAE229.schema.json")) node = schema.resolve_ref("ASHRAE229.schema.json#/definitions/ASHRAE229") assert('title' not in node) def test_get_schema_node(): schema = schema229.A229Schema(os.path.join(os.path.dirname(__file__),'..','build',"schema","ASHRAE229.schema.json")) # Root node node = schema.get_schema_node([]) assert('version' in node)
nilq/baby-python
python
#!/usr/bin/python2 from math import sqrt from decimal import Decimal def check_prime(num): if not num % 2: return False for i in xrange(3, int(sqrt(num) + 1), 2): if not num % i: return False return True def reverse(num): rev_num = 0 while num: rev_num = 10 * rev_num + num % 10 num /= 10 return rev_num def reverse_s(s): try: if not isinstance(s, str): s = str(s) return s[::-1] except: return None def check_palindrome(num): return (True if num == int(reverse_s(num)) else False) def prime_sieve(limit): sieve = [True] * int(limit) sieve[0], sieve[1] = [False] * 2 for i, v in enumerate(sieve): if v: sieve[i**2::i] = ([False] * (((limit - 1) / i) - (i - 1))) return sieve def multiples(number, factor): counter = 0 while not number % factor: number = number / factor counter += 1 return (number, counter) def prime_factors(number, limit=None): original = number factors, current = ({}, 3) if not number % 2: number, factors[2] = multiples(number, 2) if limit and factor_length(factors.values()) >= limit: return factors max_factor = int(sqrt(number)) + 1 while number > 1 and current <= max_factor: if not number % current: number, factors[current] = multiples(number, current) if limit and factor_length(factors.values()) >= limit: break max_factor = int(sqrt(number)) + 1 current += 2 if number != 1 and number != original: factors[number] = 1 return factors def factors(number): return set(factor for factors in ((i, number/i) for i in xrange(1, int(sqrt(number) + 1)) if not number % i) for factor in factors) def fibn(n): n = Decimal(n) root5 = Decimal(sqrt(5)) return int(((1 + root5) ** n - (1 - root5) ** n) / ((2 ** n) * root5))
nilq/baby-python
python
# Space: O(n) # Time: O(n) import collections class Solution: def topKFrequent(self, nums, k): counts = collections.Counter(nums) res = sorted(counts.keys(), key=lambda x: counts[x], reverse=True)[:k] return res
nilq/baby-python
python
import multiprocessing import os import signal import sys import time import WarBackend as War from blessings import Terminal def cleanexit(sig, frame): if os.system("clear") != 0: os.system("cls") print("\nStopping...") sys.exit() signal.signal(signal.SIGINT, cleanexit) # Catches ^c and stops term = Terminal() global needscreenclear needscreenclear = False os.system("clear") starttime = time.time() # Statement to allow time to be kept on the amount of time the program has been running. # Todo add some terminal configuring options options = { "avthreads": 0, "numberofgames": 5, "createouput": False, "outputfilename": "" } passed_arguments = sys.argv[1:] continuetorun = True if '-h' in passed_arguments: print('' '-h | prints this help thing :)\n' '-t | Number of threads\n' '-g | Number of games to play') continuetorun = False else: if '-c' in passed_arguments: threadarg = passed_arguments.index('-c') try: threadarg_perm = passed_arguments[threadarg + 1] options["avthreads"] = float(threadarg_perm) except IndexError or ValueError: print('Invalid perameter') continuetorun = False else: options["avthreads"] = multiprocessing.cpu_count() - 1 if '-g' in passed_arguments: gamesarg = passed_arguments.index('-g') try: gamesarg_perm = passed_arguments[gamesarg + 1] options["numberofgames"] = int(gamesarg_perm) except IndexError or ValueError: print('Invalid perameter') continuetorun = False else: options["numberofgames"] = 1000000 # Playing functions def warthread(numgames, threadnum, statlist): if os.path.isfile(os.path.join(".",str(threadnum)+"-drawreport.csv")): os.remove(os.path.join(".",str(threadnum)+"-drawreport.csv")) tmpfile = open(os.path.join(".",str(threadnum)+"-drawreport.csv"),'w') tmpfile.close() else: tmpfile = open(os.path.join(".", str(threadnum) + "-drawreport.csv"), 'w') tmpfile.close() for i in range(0, numgames): result = War.playwar(fileoutput=os.path.join(".",str(threadnum)+"-drawreport.csv")) if result == 1: statlist[threadnum][0] += 1 elif result == 2: statlist[threadnum][1] += 1 elif result == 3: statlist[threadnum][2] += 1 statlist[threadnum][3] += 1 def totalup(statlist): ''' :param statlist: The current real time statistic list :return: A list of totaled data from this rt list ''' outputstlist = [] for i in range(0, 4): outputstlist.append(0) # Putting in values that way we can add to them for dive in statlist: for subdive in range(0, 4): outputstlist[subdive] += dive[subdive] return outputstlist # Main Event last_run = False if (options["numberofgames"] > 0) and(continuetorun): print("Playing %i games." % (options["numberofgames"])) rtstatlist = [] for loops in range(0, options["avthreads"]): stat = multiprocessing.Array('i', range(4)) # creating a statistic list for a thread to utalize for kount in range(0, 4): stat[kount] = 0 rtstatlist.append(stat) # Creating the thread list and spawning the threads threads = [] if options["avthreads"] == 1: wthread = multiprocessing.Process(target=warthread, args=(options["numberofgames"], 0, rtstatlist)) threads.append(wthread) else: tmpgames_playing = options["numberofgames"] for count in range(0, options["avthreads"] - 1): wthread = multiprocessing.Process(target=warthread, args=( options["numberofgames"] // options["avthreads"], count, rtstatlist)) tmpgames_playing -= options["numberofgames"] // options["avthreads"] threads.append(wthread) threads[count].start() wthread = multiprocessing.Process(target=warthread, args=((tmpgames_playing, count + 1, rtstatlist))) threads.append(wthread) threads[count + 1].start() while (totalup(rtstatlist))[3] != options["numberofgames"]: statlist = totalup(rtstatlist) # Minimizes a bug from occuring if a thread modified the rtstatlist before the print code finshed processing the first totalup if statlist[0] > 0: # Prevents divide by zero error if the display code was run before any of the threads had a chance to play a game if needscreenclear: os.system("clear") needscreenclear = False with term.location(0, 5): print("Press Esc to clear the screen (Just in case you accidentally typed garbage)") print("Player One has won %f percent of the time. " % float(statlist[0] * 100 / statlist[3])) print("Player Two has won %f percent of the time. " % float(statlist[1] * 100 / statlist[3])) print("There has been a draw %f percent of the time. \n" % float(statlist[2] / statlist[3])) print("Player One has won %i time(s)." % statlist[0]) print("Player Two has won %i time(s)." % statlist[1]) print("There have been %i draws" % statlist[2]) print("The game has been played %i time(s)." % statlist[3]) print("We are %f percent done." % (statlist[3] * 100 / options["numberofgames"])) elapsted_seconds = time.time() - starttime # elapsted_seconds = 602263 #Debug time amount. Should be 6 days, 23 hours, 17 minutes, and 43 seconds days = int(elapsted_seconds // 86400) hours = int(elapsted_seconds // 3600 - (days * 24)) minutes = int(elapsted_seconds // 60 - (hours * 60) - (days * 1440)) seconds = int(elapsted_seconds - (minutes * 60) - (hours * 3600) - (days * 86400)) print("Time Elapsed: ", days, " ", ":", hours, " ", ":", minutes, " ", ":", seconds, " ") adverage_games_per_second = statlist[3] / elapsted_seconds tremaining = (options["numberofgames"] - statlist[3]) / adverage_games_per_second advdays = int(tremaining // 86400) advhours = int(tremaining // 3600 - (advdays * 24)) advminutes = int(tremaining // 60 - (advhours * 60) - (advdays * 1440)) advseconds = int(tremaining - (advminutes * 60) - (advhours * 3600) - (advdays * 86400)) print("Time Remaining: ", advdays, " ", ":", advhours, " ", ":", advminutes, " ", ":", advseconds, " ") os.system("clear") statlist = totalup(rtstatlist) with term.location(0, 10): print("Player One has won %f percent of the time. " % float(statlist[0] * 100 / statlist[3])) print("Player Two has won %f percent of the time. " % float(statlist[1] * 100 / statlist[3])) print("There has been a draw %f percent of the time. \n" % float(statlist[2] / statlist[3])) print("Player One has won %i times." % statlist[0]) print("Player Two has won %i times." % statlist[1]) print("There have been %i draws" % statlist[2]) print("The game has been played %i time(s)" % statlist[3]) elapsted_seconds = time.time() - starttime # elapsted_seconds = 602263 #Debug time amount. Should be 6 days, 23 hours, 17 minutes, and 43 seconds days = int(elapsted_seconds // 86400) hours = int(elapsted_seconds // 3600 - (days * 24)) minutes = int(elapsted_seconds // 60 - (hours * 60) - (days * 1440)) seconds = int(elapsted_seconds - (minutes * 60) - (hours * 3600) - (days * 86400)) print("Time Elapsed: ", days, " ", ":", hours, " ", ":", minutes, " ", ":", seconds, " ")
nilq/baby-python
python
# -*- coding: utf-8 -*- # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Evaluation script for CMRC 2018 version: v5 Note: v5 formatted output, add usage description v4 fixed segmentation issues ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from collections import Counter, OrderedDict import string import re import argparse import json import sys import nltk import pdb # split Chinese with English def mixed_segmentation(in_str, rm_punc=False): in_str = in_str.lower().strip() segs_out = [] temp_str = "" sp_char = [ '-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '。', ':', '?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、', '「', '」', '(', ')', '-', '~', '『', '』' ] for char in in_str: if rm_punc and char in sp_char: continue if re.search(r'[\u4e00-\u9fa5]', char) or char in sp_char: if temp_str != "": ss = nltk.word_tokenize(temp_str) segs_out.extend(ss) temp_str = "" segs_out.append(char) else: temp_str += char #handling last part if temp_str != "": ss = nltk.word_tokenize(temp_str) segs_out.extend(ss) return segs_out # remove punctuation def remove_punctuation(in_str): in_str = in_str.lower().strip() sp_char = [ '-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '。', ':', '?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、', '「', '」', '(', ')', '-', '~', '『', '』' ] out_segs = [] for char in in_str: if char in sp_char: continue else: out_segs.append(char) return ''.join(out_segs) # find longest common string def find_lcs(s1, s2): m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)] mmax = 0 p = 0 for i in range(len(s1)): for j in range(len(s2)): if s1[i] == s2[j]: m[i + 1][j + 1] = m[i][j] + 1 if m[i + 1][j + 1] > mmax: mmax = m[i + 1][j + 1] p = i + 1 return s1[p - mmax:p], mmax # def evaluate(ground_truth_file, prediction_file): f1 = 0 em = 0 total_count = 0 skip_count = 0 for instances in ground_truth_file["data"]: for instance in instances["paragraphs"]: context_text = instance['context'].strip() for qas in instance['qas']: total_count += 1 query_id = qas['id'].strip() query_text = qas['question'].strip() answers = [ans["text"] for ans in qas["answers"]] if query_id not in prediction_file: sys.stderr.write('Unanswered question: {}\n'.format( query_id)) skip_count += 1 continue prediction = prediction_file[query_id] f1 += calc_f1_score(answers, prediction) em += calc_em_score(answers, prediction) f1_score = 100.0 * f1 / total_count em_score = 100.0 * em / total_count return f1_score, em_score, total_count, skip_count def calc_f1_score(answers, prediction): f1_scores = [] for ans in answers: ans_segs = mixed_segmentation(ans, rm_punc=True) prediction_segs = mixed_segmentation(prediction, rm_punc=True) lcs, lcs_len = find_lcs(ans_segs, prediction_segs) if lcs_len == 0: f1_scores.append(0) continue precision = 1.0 * lcs_len / len(prediction_segs) recall = 1.0 * lcs_len / len(ans_segs) f1 = (2 * precision * recall) / (precision + recall) f1_scores.append(f1) return max(f1_scores) def calc_em_score(answers, prediction): em = 0 for ans in answers: ans_ = remove_punctuation(ans) prediction_ = remove_punctuation(prediction) if ans_ == prediction_: em = 1 break return em def eval_file(dataset_file, prediction_file): ground_truth_file = json.load(open(dataset_file, 'r')) prediction_file = json.load(open(prediction_file, 'r')) F1, EM, TOTAL, SKIP = evaluate(ground_truth_file, prediction_file) AVG = (EM + F1) * 0.5 return EM, F1, AVG, TOTAL if __name__ == '__main__': EM, F1, AVG, TOTAL = eval_file(sys.argv[1], sys.argv[2]) print(EM) print(F1) print(TOTAL)
nilq/baby-python
python
#!/usr/bin/env python """ This is the base class to start the RESTful web service hosting the Blackboard API. """ import logging.config from logging.handlers import RotatingFileHandler from time import strftime from flask import Flask, Blueprint, request, jsonify from blackboard_api import settings from blackboard_api.api_1_0.blackboard import ns as blackboard from blackboard_api.api_1_0.restplus import api from blackboard_api.database import db __author__ = 'Manfred von Teichman' __version__ = '1.0' __maintainer__ = 'Manfred von Teichman' __email__ = 'vonteichman.m-tit14@it.dhbw-ravensburg.de' __status__ = 'Development' app = Flask(__name__) # Setup the logging functionality handler = RotatingFileHandler('app.log', maxBytes=1000000, backupCount=3) logging.config.fileConfig('logging.conf') log = logging.getLogger(__name__) log.setLevel(logging.INFO) log.addHandler(handler) # Catch any 404 error and return it as a json response @app.errorhandler(404) def not_found(error): return jsonify(error=str(error)), 404 # Registers the logging functionality to run after each request. @app.after_request def after_request(response): timestamp = strftime('[%Y-%b-%d %H:%M]') log.info('%s %s %s %s %s %s', timestamp, request.remote_addr, request.method, request.scheme, request.full_path, response.status) return response def configure_app(flask_app): flask_app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.SQLALCHEMY_TRACK_MODIFICATIONS flask_app.config['SWAGGER_UI_DOC_EXPANSION'] = settings.RESTPLUS_SWAGGER_UI_DOC_EXPANSION flask_app.config['RESTPLUS_VALIDATE'] = settings.RESTPLUS_VALIDATE flask_app.config['RESTPLUS_MASK_SWAGGER'] = settings.RESTPLUS_MASK_SWAGGER flask_app.config['ERROR_404_HELP'] = settings.RESTPLUS_ERROR_404_HELP # Create the app using a factory, setup its dependencies and the base url given the set prefix. def initialize_app(flask_app): configure_app(flask_app) blueprint = Blueprint('api', __name__, url_prefix='/api/v1') api.init_app(blueprint) api.add_namespace(blackboard) flask_app.register_blueprint(blueprint) db.init_app(flask_app) # Initialize the app and run it on the pre-configured hostname and port. def main(): initialize_app(app) app.run(debug=settings.FLASK_DEBUG, host=settings.FLASK_HOST, port=settings.FLASK_PORT) if __name__ == '__main__': main()
nilq/baby-python
python
""" decoded AUTH_HEADER (newlines added for readability): { "identity": { "account_number": "1234", "internal": { "org_id": "5678" }, "type": "User", "user": { "email": "test@example.com", "first_name": "Firstname", "is_active": true, "is_internal": true, "is_org_admin": false, "last_name": "Lastname", "locale": "en_US", "username": "test_username" } } "entitlements": { "insights": { "is_entitled": true } } } """ AUTH_HEADER = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6Ij" "EyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiI1Njc4In0sInR5cGUiOiJVc" "2VyIiwidXNlciI6eyJlbWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJmaXJz" "dF9uYW1lIjoiRmlyc3RuYW1lIiwiaXNfYWN0aXZlIjp0cnVlLCJpc19pbnR" "lcm5hbCI6dHJ1ZSwiaXNfb3JnX2FkbWluIjpmYWxzZSwibGFzdF9uYW1lIj" "oiTGFzdG5hbWUiLCJsb2NhbGUiOiJlbl9VUyIsInVzZXJuYW1lIjoidGVzd" "F91c2VybmFtZSJ9fSwiZW50aXRsZW1lbnRzIjp7Imluc2lnaHRzIjp7Imlz" "X2VudGl0bGVkIjp0cnVlfX19Cg==" } AUTH_HEADER_NO_ENTITLEMENTS = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6Ij" "EyMzQiLCJ0eXBlIjoiVXNlciIsInVzZXIiOnsidXNl" "cm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIiwiZW1haWwiOi" "J0ZXN0QGV4YW1wbGUuY29tIiwiZmlyc3RfbmFtZSI6" "IkZpcnN0bmFtZSIsImxhc3RfbmFtZSI6Ikxhc3RuYW" "1lIiwiaXNfYWN0aXZlIjp0cnVlLCJpc19vcmdfYWRt" "aW4iOmZhbHNlLCJpc19pbnRlcm5hbCI6dHJ1ZSwibG" "9jYWxlIjoiZW5fVVMifSwiaW50ZXJuYWwiOnsib3Jn" "X2lkIjoiNTY3OCJ9fX0KCg==" } AUTH_HEADER_SMART_MGMT_FALSE = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6" "IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiAi" "NTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1c2VyIjp7" "ImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImZp" "cnN0X25hbWUiOiJGaXJzdG5hbWUiLCJpc19hY3Rp" "dmUiOnRydWUsImlzX2ludGVybmFsIjp0cnVlLCJp" "c19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0X25hbWUi" "OiJMYXN0bmFtZSIsImxvY2FsZSI6ImVuX1VTIiwi" "dXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIn19LCJl" "bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu" "dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg==" } # this can't happen in real life, adding test anyway AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJpbnRlcm5hbCI6eyJvcmdf" "aWQiOiAiNTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1" "c2VyIjp7ImVtYWlsIjoidGVzdEBleGFtcGxlLmNv" "bSIsImZpcnN0X25hbWUiOiJGaXJzdG5hbWUiLCJp" "c19hY3RpdmUiOnRydWUsImlzX2ludGVybmFsIjp0" "cnVlLCJpc19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0" "X25hbWUiOiJMYXN0bmFtZSIsImxvY2FsZSI6ImVu" "X1VTIiwidXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1l" "In19LCJlbnRpdGxlbWVudHMiOnsic21hcnRfbWFu" "YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9" "fX0K" } """ decoded AUTH_HEADER_NO_ACCT (newlines added for readablity): { "identity": { "internal": { "org_id": "9999" }, "type": "User", "user": { "email": "nonumber@example.com", "first_name": "No", "is_active": true, "is_internal": true, "is_org_admin": false, "last_name": "Number", "locale": "en_US", "username": "nonumber" } } } """ AUTH_HEADER_NO_ACCT = { "X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJ0eXBlIjoiVXNlciIsInVzZXIiO" "nsidXNlcm5hbWUiOiJub251bWJlciIsImVtYWlsIjoibm" "9udW1iZXJAZXhhbXBsZS5jb20iLCJmaXJzdF9uYW1lIjo" "iTm8iLCJsYXN0X25hbWUiOiJOdW1iZXIiLCJpc19hY3Rp" "dmUiOnRydWUsImlzX29yZ19hZG1pbiI6ZmFsc2UsImlzX" "2ludGVybmFsIjp0cnVlLCJsb2NhbGUiOiJlbl9VUyJ9LC" "JpbnRlcm5hbCI6eyJvcmdfaWQiOiI5OTk5In19fQo=" } FETCH_BASELINES_RESULT = [ { "id": "ff35596c-f98e-11e9-aea9-98fa9b07d419", "account": "1212729", "display_name": "baseline1", "fact_count": 1, "created": "2019-10-17T16:23:34.238952Z", "updated": "2019-10-17T16:25:34.041645Z", "baseline_facts": [{"name": "fqdn", "value": "test.example1.com"}], }, { "id": "89df6310-f98e-11e9-8a65-98fa9b07d419", "account": "1212729", "display_name": "baseline2", "fact_count": 1, "created": "2019-10-17T16:23:34.238952Z", "updated": "2019-10-17T16:25:34.041645Z", "baseline_facts": [{"name": "arch", "value": "golden"}], }, ] FETCH_SYSTEMS_WITH_PROFILES_CAPTURED_DATE_RESULT = [ { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa", "created": "2019-01-31T13:00:00.100010Z", "display_name": None, "fqdn": "fake_system_99.example.com", "id": "fc1e497a-28ae-11e9-afd9-c85b761454fa", "insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "system_profile": { "captured_date": "2020-03-30T18:42:23+00:00", "salutation": "hello", "fqdn": "hostname_two", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z", "enabled_services": ["insights_client"], "installed_packages": [ "0:bash-4.4.23-6.fc29.x86_64", "this isn't parsable", "no_epoch-1.0-1.fc99.8088", ], "cpu_flags": ["maryland"], "system_memory_bytes": 640, "yum_repos": [{"name": "yummy", "enabled": False}, {"no_name": "bleh"}], "network_interfaces": [ { "name": "eth99", "mtu": 3, "ipv4_addresses": ["8.7.6.5"], "ipv6_addresses": ["00:00:02"], }, {"no_name": "foo"}, ], "system_profile_exists": True, "id": "fc1e497a-28ae-11e9-afd9-c85b761454fa", }, "tags": [], "updated": "2019-01-31T14:00:00.500000Z", }, { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb", "created": "2018-01-31T13:00:00.100010Z", "display_name": "hello", "fqdn": "fake_system_99.example.com", "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "insights_id": "00000000-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "system_profile": { "captured_date": "2020-03-30T18:42:23+00:00", "salutation": "hi", "fqdn": "hostname_one", "system_profile_exists": True, "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z", "enabled_services": ["insights_client"], "installed_packages": [ "0:bash-4.4.23-6.fc29.x86_64", "this isn't parsable", "no_epoch-1.0-1.fc99.8088", ], "network_interfaces": [ { "name": "eth99", "mtu": 3, "ipv4_addresses": ["8.7.6.5"], "ipv6_addresses": ["00:00:01"], }, {"no_name": "foo"}, ], }, "tags": [], "updated": "2018-01-31T14:00:00.500000Z", }, { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb", "created": "2018-01-31T13:00:00.100010Z", "display_name": None, "fqdn": "hostname_one", "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "insights_id": "00000000-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "system_profile": { "captured_date": "2020-03-30T18:42:23+00:00", "salutation": "hi", "fqdn": "hostname_one", "system_profile_exists": False, "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z", "enabled_services": ["insights_client"], "installed_packages": [ "0:bash-4.4.23-6.fc29.x86_64", "this isn't parsable", "no_epoch-1.0-1.fc99.8088", ], "network_interfaces": [ { "name": "eth99", "mtu": 3, "ipv4_addresses": ["8.7.6.5"], "ipv6_addresses": ["00:00:01"], }, {"no_name": "foo"}, ], }, "tags": [], "updated": "2018-01-31T14:00:00.500000Z", }, ] FETCH_SYSTEMS_WITH_PROFILES_RESULT = [ { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa", "created": "2019-01-31T13:00:00.100010Z", "display_name": None, "fqdn": "fake_system_99.example.com", "id": "fc1e497a-28ae-11e9-afd9-c85b761454fa", "insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "system_profile": { "salutation": "hello", "fqdn": "hostname_two", "installed_packages": [ "0:bash-4.4.23-6.fc29.x86_64", "this isn't parsable", "no_epoch-1.0-1.fc99.8088", ], "cpu_flags": ["maryland"], "system_memory_bytes": 640, "yum_repos": [{"name": "yummy", "enabled": False}, {"no_name": "bleh"}], "network_interfaces": [ { "name": "eth99", "mtu": 3, "ipv4_addresses": ["8.7.6.5"], "ipv6_addresses": ["00:00:02"], }, {"no_name": "foo"}, ], "enabled_services": ["insights_client"], "system_profile_exists": True, "id": "fc1e497a-28ae-11e9-afd9-c85b761454fa", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z", }, "tags": [], "updated": "2019-01-31T14:00:00.500000Z", }, { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb", "created": "2018-01-31T13:00:00.100010Z", "display_name": "hello", "fqdn": "fake_system_99.example.com", "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "insights_id": "00000000-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "system_profile": { "salutation": "hi", "fqdn": "hostname_one", "system_profile_exists": True, "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z", "enabled_services": ["insights_client"], "installed_packages": [ "0:bash-4.4.23-6.fc29.x86_64", "this isn't parsable", "no_epoch-1.0-1.fc99.8088", ], "network_interfaces": [ { "name": "eth99", "mtu": 3, "ipv4_addresses": ["8.7.6.5"], "ipv6_addresses": ["00:00:01"], }, {"no_name": "foo"}, ], }, "tags": [], "updated": "2018-01-31T14:00:00.500000Z", }, { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb", "created": "2018-01-31T13:00:00.100010Z", "display_name": None, "fqdn": "hostname_one", "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "insights_id": "00000000-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "system_profile": { "salutation": "hi", "fqdn": "hostname_one", "system_profile_exists": False, "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z", "enabled_services": ["insights_client"], "installed_packages": [ "0:bash-4.4.23-6.fc29.x86_64", "this isn't parsable", "no_epoch-1.0-1.fc99.8088", ], "network_interfaces": [ { "name": "eth99", "mtu": 3, "ipv4_addresses": ["8.7.6.5"], "ipv6_addresses": ["00:00:01"], }, {"no_name": "foo"}, ], }, "tags": [], "updated": "2018-01-31T14:00:00.500000Z", }, ] FETCH_SYSTEM_PROFILES_INV_SVC = """ { "count": 1, "total": 1, "page": 1, "per_page": 50, "results": [ { "id": "243926fa-262f-11e9-a632-c85b761454fa", "system_profile": { "arch": "x86_64", "bios_vendor": "SeaBIOS", "bios_version": "?-20180531_142017-buildhw-08.phx2.fedoraproject.org-1.fc28", "cores_per_socket": 1, "cpu_flags": [ "fpu", "vme" ], "enabled_services": ["auditd", "chronyd", "crond" ], "infrastructure_type": "virtual", "infrastructure_vendor": "kvm", "installed_packages": ["0:bash-4.4.19-7.el8", "0:chrony-3.3-3.el8", "0:dnf-4.0.9.2-4.el8", "1:NetworkManager-1.14.0-14.el8"], "installed_services": [ "arp-ethers", "auditd", "autovt@", "chronyd", "cpupower"], "kernel_modules": [ "kvm", "pcspkr", "joydev", "xfs"], "last_boot_time": "2019-03-25T19:32:18", "network_interfaces": [ { "ipv4_addresses": ["127.0.0.1"], "ipv6_addresses": ["::1"], "mac_address": "00:00:00:00:00:00", "mtu": 65536, "name": "lo", "state": "UNKNOWN", "type": "loopback" }, { "ipv4_addresses": ["192.168.0.1"], "ipv6_addresses": ["fe80::5054:ff::0001"], "mac_address": "52:54:00:00:00:00", "mtu": 1500, "name": "eth0", "state": "UP", "type": "ether" } ], "number_of_cpus": 2, "number_of_sockets": 2, "os_kernel_version": "4.18.0", "running_processes": [ "watchdog/1", "systemd-logind", "md", "ksmd", "sshd" ], "system_memory_bytes": 1917988864, "yum_repos": [ { "base_url": "https://cdn.example.com/content/freedos/1.0/i386/os", "enabled": true, "gpgcheck": true, "name": "freedos 1.0 repo i386" }, { "base_url": "https://cdn.example.com/content/freedos/1.0/z80/os", "enabled": false, "gpgcheck": true, "name": "freedos 1.0 repo z80" } ] } } ], "total": 1 } """ FETCH_SYSTEMS_WITH_PROFILES_SAME_FACTS_RESULT = [ { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fa", "created": "2019-01-31T13:00:00.100010Z", "display_name": None, "system_profile": { "salutation": "howdy", "system_profile_exists": True, "id": "fc1e497a-28ae-11e9-afd9-c85b761454fa", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z", "enabled_services": ["insights_client"], "installed_packages": [ "0:bash-4.4.23-6.fc29.x86_64", "this isn't parsable", "no_epoch-1.0-1.fc99.8088", ], }, "fqdn": "fake_system_99.example.com", "id": "fc1e497a-28ae-11e9-afd9-c85b761454fa", "insights_id": "01791a58-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "tags": [], "updated": "2019-01-31T14:00:00.500000Z", }, { "account": "9876543", "bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb", "created": "2018-01-31T13:00:00.100010Z", "display_name": None, "system_profile": { "salutation": "howdy", "system_profile_exists": True, "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z", "enabled_services": ["insights_client"], "installed_packages": [ "0:bash-4.4.23-6.fc29.x86_64", "this isn't parsable", "no_epoch-1.0-1.fc99.8088", ], }, "fqdn": "fake_system_99.example.com", "id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa", "insights_id": "00000000-28af-11e9-9ab0-c85b761454fa", "ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"], "mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"], "rhel_machine_id": None, "satellite_id": None, "subscription_manager_id": "RHN Classic and Red Hat Subscription Management", "tags": [], "updated": "2018-01-31T14:00:00.500000Z", }, ] FETCH_SYSTEM_TAGS = """ { "total": 1, "count": 1, "page": 1, "per_page": 50, "results": { "ec67f65c-2bc8-4ce8-82e2-6a27cada8d31": [ { "namespace": "insights-client", "key": "group", "value": "XmygroupX" } ] } } """ FETCH_SYSTEMS_INV_SVC = """ { "count": 2, "total": 2, "page": 1, "per_page": 50, "results": [ { "account": "1234567", "bios_uuid": "dc43976c263411e9bcf0c85b761454fa", "created": "2018-12-01T12:00:00.000000Z", "display_name": "system1.example.com", "fqdn": "system.example.com", "id": "243926fa-262f-11e9-a632-c85b761454fa", "insights_id": "TEST-ID00-0000-0000", "ip_addresses": [ "10.0.0.1", "10.0.0.2" ], "mac_addresses": [ "c2:00:d0:c8:00:01" ], "subscription_manager_id": "1234FAKE1234", "tags": [], "updated": "2018-12-31T12:00:00.000000Z", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z" }, { "account": "1234567", "bios_uuid": "ec43976c263411e9bcf0c85b761454fa", "created": "2018-12-01T12:00:00.000000Z", "display_name": "system2.example.com", "fqdn": "system2.example.com", "id": "264fb5b2-262f-11e9-9b12-c85b761454fa", "insights_id": "TEST-ID22-2222-2222", "ip_addresses": [ "10.0.0.3", "10.0.0.4" ], "mac_addresses": [ "ec2:00:d0:c8:00:01" ], "subscription_manager_id": "2222FAKE2222", "tags": [], "updated": "2018-12-31T12:00:00.000000Z", "stale_warning_timestamp": "2018-12-31T12:00:00.000000Z" } ]}""" SYSTEM_NOT_FOUND_TEMPLATE = """ { "count": 0, "page": 1, "per_page": 50, "results": [], "total": 0 } """
nilq/baby-python
python
""" Plot a traced WE trajectory onto 2D plots. # TODO: integrate into h5_plot """ import numpy as np import matplotlib.pyplot as plt import h5py def get_parents(walker_tuple, h5_file): it, wlk = walker_tuple parent = h5_file[f"iterations/iter_{it:08d}"]["seg_index"]["parent_id"][wlk] return it-1, parent def trace_walker(walker_tuple, h5_file): # Unroll the tuple into iteration/walker it, wlk = walker_tuple # Initialize our path path = [(it,wlk)] # And trace it while it > 1: it, wlk = get_parents((it, wlk), h5_file) path.append((it,wlk)) return np.array(sorted(path, key=lambda x: x[0])) def get_aux(path, h5_file, aux_name): # Initialize a list for the pcoords aux_coords = [] # Loop over the path and get the pcoords for each walker for it, wlk in path: # Here we are taking every 10 time points, feel free to adjust to see what that does aux_coords.append(h5_file[f'iterations/iter_{it:08d}/auxdata/{str(aux_name)}'][wlk][::10]) #pcoords.append(h5_file[f'iterations/iter_{it:08d}']['pcoord'][wlk][::10,:]) return np.array(aux_coords) def plot_trace(h5, walker_tuple, aux_x, aux_y=None, evolution=False, ax=None): """ Plot trace. """ if ax is None: fig, ax = plt.subplots(figsize=(7,5)) else: fig = plt.gcf() it, wlk = walker_tuple with h5py.File(h5, "r") as w: # adjustments for plothist evolution of only aux_x data if evolution: # split iterations up to provide y-values for each x-value (pcoord) iter_split = [i + (j/aux_x.shape[1]) for i in range(0, it) for j in range(0, aux_x.shape[1])] ax.plot(aux_x[:,0], iter_split, c="black", lw=2) ax.plot(aux_x[:,0], iter_split, c="white", lw=1) return path = trace_walker((it, wlk), w) # And pull aux_coords for the path calculated aux_x = get_aux(path, w, aux_x) aux_y = get_aux(path, w, aux_y) ax.plot(aux_x[:,0], aux_y[:,0], c="black", lw=2) ax.plot(aux_x[:,0], aux_y[:,0], c="cyan", lw=1) # from h5_plot_main import * # data_options = {"data_type" : "average", # "p_max" : 20, # "p_units" : "kcal", # "last_iter" : 200, # "bins" : 100 # } # h5 = "1a43_v02/wcrawl/west_i200_crawled.h5" # aux_x = "1_75_39_c2" # aux_y = "M2Oe_M1He1" # X, Y, Z = pdist_to_normhist(h5, aux_x, aux_y, **data_options) # levels = np.arange(0, data_options["p_max"] + 1, 1) # plt.contour(X, Y, Z, levels=levels, colors="black", linewidths=1) # plt.contourf(X, Y, Z, levels=levels, cmap="gnuplot_r") # plt.colorbar() # from search_aux import * # # for 1A43 V02: C2 and Dist M2-M1 - minima at val = 53° and 2.8A is alt minima = i173 s70 # iter, seg = search_aux_xy_nn(h5, aux_x, aux_y, 53, 2.8, data_options["last_iter"]) # plot_trace(h5, (iter,seg), aux_x, aux_y) # plt.show()
nilq/baby-python
python
import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin class Passthrough(BaseEstimator, TransformerMixin): """ Class for passing through features that require no preprocessing. https://stackoverflow.com/questions/54592115/appending-the-columntransformer-result-to-the-original-data-within-a-pipeline """ def fit(self, X, y=None): return self def transform(self, X): # Single-column data frames are Pandas series, which Scikit-learn doesn't know how to deal with. Make sure that # result is always a data frame. X = pd.DataFrame(X) return X
nilq/baby-python
python
from bs4 import BeautifulSoup as soup html = """ <html> <body> <ul> <li><a href="http://www.naver.com">NAVER</a></li> <li><a href="http://www.daum.net">DAUM</a></li> </ul> </body> </html> """ content = soup(html, "html.parser") links = content.find_all("a") for a in links: print(a.string, " > ", a.attrs["href"])
nilq/baby-python
python
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long # pylint: disable=too-many-lines # pylint: disable=too-many-statements from azure.cli.core.commands.parameters import ( tags_type, get_enum_type, resource_group_name_type, get_location_type ) def load_arguments(self, _): with self.argument_context('internet-analyzer profile create') as c: c.argument('resource_group', resource_group_name_type) c.argument('name', id_part=None, help='The name of the Internet Analyzer profile to be created') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('tags', tags_type) c.argument('enabled_state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The state of the Experiment') c.argument('etag', id_part=None, help='Gets a unique read-only string that changes whenever the resource is updated.') with self.argument_context('internet-analyzer profile update') as c: c.argument('resource_group', resource_group_name_type) c.argument('name', id_part=None, help='The name of the Internet Analyzer profile to be updated') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('tags', tags_type) c.argument('enabled_state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The state of the Experiment') c.argument('etag', id_part=None, help='Gets a unique read-only string that changes whenever the resource is updated.') with self.argument_context('internet-analyzer profile delete') as c: c.argument('resource_group', resource_group_name_type) c.argument('name', id_part=None, help='The name of the Internet Analyzer profile to be deleted') with self.argument_context('internet-analyzer profile list') as c: c.argument('resource_group', resource_group_name_type) with self.argument_context('internet-analyzer profile show') as c: c.argument('resource_group', resource_group_name_type) c.argument('name', id_part=None, help='The name of the Internet Analyzer profile to show') with self.argument_context('internet-analyzer preconfigured-endpoint list') as c: c.argument('resource_group', resource_group_name_type) c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile for which to list preconfigured endpoints') with self.argument_context('internet-analyzer test create') as c: c.argument('resource_group', resource_group_name_type) c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile under which the new test should be created') c.argument('name', id_part=None, help='The name of the Internet Analyzer test to be created') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('tags', tags_type) c.argument('description', id_part=None, help='The description of the details or intents of the test') c.argument('endpoint_a_name', id_part=None, help='The name of the control endpoint') c.argument('endpoint_a_endpoint', id_part=None, help='The URL of the control endpoint in <hostname>[/<custom-path>] format (e.g., www.contoso.com or www.contoso.com/some/path/to/trans.gif). Must support HTTPS. If an object path isn\'t specified explicitly, Internet Analyzer will use "/apc/trans.gif" as the object path by default, which is where the preconfigured endpoints are hosting the one-pixel image.') c.argument('endpoint_b_name', id_part=None, help='The name of the other endpoint') c.argument('endpoint_b_endpoint', id_part=None, help='The URL of the other endpoint in <hostname>[/<custom-path>] format (e.g., www.contoso.com or www.contoso.com/some/path/to/trans.gif). Must support HTTPS. If an object path isn\'t specified explicitly, Internet Analyzer will use "/apc/trans.gif" as the object path by default, which is where the preconfigured endpoints are hosting the one-pixel image.') c.argument('enabled_state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The initial of the test') with self.argument_context('internet-analyzer test update') as c: c.argument('resource_group', resource_group_name_type) c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile under which the test exists') c.argument('name', id_part=None, help='The name of the Internet Analyzer test to be updated') c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('tags', tags_type) c.argument('description', id_part=None, help='The description of the details or intents of the test') c.argument('endpoint_a_name', id_part=None, help='The name of the control endpoint') c.argument('endpoint_a_endpoint', id_part=None, help='The URL of the control endpoint in <hostname>[/<custom-path>] format (e.g., www.contoso.com or www.contoso.com/some/path/to/trans.gif). Must support HTTPS. If an object path isn\'t specified explicitly, Internet Analyzer will use "/apc/trans.gif" as the object path by default, which is where the preconfigured endpoints are hosting the one-pixel image.') c.argument('endpoint_b_name', id_part=None, help='The name of the other endpoint') c.argument('endpoint_b_endpoint', id_part=None, help='The URL of the other endpoint in <hostname>[/<custom-path>] format (e.g., www.contoso.com or www.contoso.com/some/path/to/trans.gif). Must support HTTPS. If an object path isn\'t specified explicitly, Internet Analyzer will use "/apc/trans.gif" as the object path by default, which is where the preconfigured endpoints are hosting the one-pixel image.') c.argument('enabled_state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The state of the Experiment') with self.argument_context('internet-analyzer test delete') as c: c.argument('resource_group', resource_group_name_type) c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile under which the test exists') c.argument('name', id_part=None, help='The name of the Internet Analyzer test to delete') with self.argument_context('internet-analyzer test list') as c: c.argument('resource_group', resource_group_name_type) c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile for which to list tests') with self.argument_context('internet-analyzer test show') as c: c.argument('resource_group', resource_group_name_type) c.argument('profile_name', id_part=None, help='The name of the Internet Analyzer profile under which the test exists') c.argument('name', id_part=None, help='The name of the Internet Analyzer test to show')
nilq/baby-python
python
from .base import AttackMetric from ...tags import * from ...text_process.tokenizer import Tokenizer class JaccardWord(AttackMetric): NAME = "Jaccard Word Similarity" def __init__(self, tokenizer : Tokenizer): """ Args: tokenizer: A tokenizer that will be used in this metric. Must be an instance of :py:class:`.Tokenizer` """ self.tokenizer = tokenizer @property def TAGS(self): if hasattr(self.tokenizer, "TAGS"): return self.tokenizer.TAGS return set() def calc_score(self, sentA : str, sentB : str) -> float: """ Args: sentA: First sentence. sentB: Second sentence. Returns: Jaccard word similarity of two sentences. """ tokenA = self.tokenizer.tokenize(sentA, pos_tagging=False) tokenB = self.tokenizer.tokenize(sentB, pos_tagging=False) AS=set() BS=set() for i in range(len(tokenA)): AS.add(tokenA[i]) for i in range(len(tokenB)): BS.add(tokenB[i]) return len(AS&BS)/len(AS|BS) def after_attack(self, input, adversarial_sample): if adversarial_sample is not None: return self.calc_score( input["x"], adversarial_sample ) return None
nilq/baby-python
python
from flask import Flask, render_template, request app = Flask(__name__) @app.route("/") def main(): return render_template("main_better.html") # getting basic user data @app.route('/ask/', methods=['POST', 'GET']) def ask(): if request.method == 'GET': return render_template('ask.html') else: try: return render_template('ask.html', name=request.form['name'], student=request.form['student']) except: return render_template('ask.html') # @app.route('/profile/<name>/') def hello_name(name): return render_template('profile.html', name=name) app.run()
nilq/baby-python
python
from keras.layers import Layer from keras_contrib.layers.normalization.instancenormalization import InputSpec import numpy as np import matplotlib.image as mpimg from progress.bar import Bar import datetime import time import json import csv import os import keras.backend as K import tensorflow as tf from skimage.transform import resize class ReflectionPadding2D(Layer): def __init__(self, padding=(1, 1), **kwargs): self.padding = tuple(padding) self.input_spec = [InputSpec(ndim=4)] super(ReflectionPadding2D, self).__init__(**kwargs) def compute_output_shape(self, s): return (s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3]) def call(self, x, mask=None): w_pad, h_pad = self.padding return tf.pad(x, [[0, 0], [h_pad, h_pad], [w_pad, w_pad], [0, 0]], 'REFLECT') class ImagePool(): def __init__(self, pool_size): self.pool_size = pool_size if self.pool_size > 0: self.num_imgs = 0 self.images = [] def query(self, images): if self.pool_size == 0: return images return_images = [] for image in images: if len(image.shape) == 3: image = image[np.newaxis, :, :, :] if self.num_imgs < self.pool_size: # fill up the image pool self.num_imgs = self.num_imgs + 1 if len(self.images) == 0: self.images = image else: self.images = np.vstack((self.images, image)) if len(return_images) == 0: return_images = image else: return_images = np.vstack((return_images, image)) else: # 50% chance that we replace an old synthetic image p = np.random.rand() if p > 0.5: random_id = np.random.randint(0, self.pool_size) tmp = self.images[random_id, :, :, :] tmp = tmp[np.newaxis, :, :, :] self.images[random_id, :, :, :] = image[0, :, :, :] if len(return_images) == 0: return_images = tmp else: return_images = np.vstack((return_images, tmp)) else: if len(return_images) == 0: return_images = image else: return_images = np.vstack((return_images, image)) return return_images def load_data(subfolder='', generator=False): def create_image_array(image_list, image_path, image_size, nr_of_channels): bar = Bar('Loading...', max=len(image_list)) # Define image array image_array = np.empty((len(image_list),) + (image_size) + (nr_of_channels,)) i = 0 for image_name in image_list: # If file is image... if image_name[-1].lower() == 'g': # to avoid e.g. thumbs.db files # Load image and convert into np.array image = mpimg.imread(os.path.join(image_path, image_name)) # Normalized to [0,1] # image = np.array(Image.open(os.path.join(image_path, image_name))) image = resize(image,(200,200)) # Add third dimension if image is 2D if nr_of_channels == 1: # Gray scale image -> MR image image = image[:, :, np.newaxis] # Normalize image with (max 8 bit value - 1) image = image * 2 - 1 # image = image / 127.5 - 1 # Add image to array image_array[i, :, :, :] = image i += 1 bar.next() bar.finish() return image_array # Image paths trainA_path = os.path.join('data', subfolder, 'trainA') trainB_path = os.path.join('data', subfolder, 'trainB') testA_path = os.path.join('data', subfolder, 'testA') testB_path = os.path.join('data', subfolder, 'testB') # Image file names trainA_image_names = sorted(os.listdir(trainA_path)) trainB_image_names = sorted(os.listdir(trainB_path)) testA_image_names = sorted(os.listdir(testA_path)) testB_image_names = sorted(os.listdir(testB_path)) # Examine one image to get size and number of channels im_test = mpimg.imread(os.path.join(trainA_path, trainA_image_names[0])) # im_test = np.array(Image.open(os.path.join(trainA_path, trainA_image_names[0]))) if len(im_test.shape) == 2: image_size = im_test.shape nr_of_channels = 1 else: image_size = im_test.shape[0:-1] nr_of_channels = im_test.shape[-1] trainA_images = create_image_array(trainA_image_names, trainA_path, (200,200), nr_of_channels) trainB_images = create_image_array(trainB_image_names, trainB_path, (200,200), nr_of_channels) testA_images = create_image_array(testA_image_names, testA_path, (200,200), nr_of_channels) testB_images = create_image_array(testB_image_names, testB_path, (200,200), nr_of_channels) return {"image_size": image_size, "nr_of_channels": nr_of_channels, "trainA_images": trainA_images, "trainB_images": trainB_images, "testA_images": testA_images, "testB_images": testB_images, "trainA_image_names": trainA_image_names, "trainB_image_names": trainB_image_names, "testA_image_names": testA_image_names, "testB_image_names": testB_image_names} def write_metadata_to_JSON(model, opt): # Save meta_data data = {} data['meta_data'] = [] data['meta_data'].append({ 'img shape: height,width,channels': opt['img_shape'], 'batch size': opt['batch_size'], 'save training img interval': opt['save_training_img_interval'], 'normalization function': str(model['normalization']), 'lambda_ABA': opt['lambda_ABA'], 'lambda_BAB': opt['lambda_BAB'], 'lambda_adversarial': opt['lambda_adversarial'], 'learning_rate_D': opt['learning_rate_D'], 'learning rate G': opt['learning_rate_G'], 'epochs': opt['epochs'], 'use linear decay on learning rates': opt['use_linear_decay'], 'epoch where learning rate linear decay is initialized (if use_linear_decay)': opt['decay_epoch'], 'generator iterations': opt['generator_iterations'], 'discriminator iterations': opt['discriminator_iterations'], 'use patchGan in discriminator': opt['use_patchgan'], 'beta 1': opt['beta_1'], 'beta 2': opt['beta_2'], 'REAL_LABEL': opt['REAL_LABEL'], 'number of A train examples': len(opt['A_train']), 'number of B train examples': len(opt['B_train']), 'number of A test examples': len(opt['A_test']), 'number of B test examples': len(opt['B_test']), 'discriminator sigmoid': opt['discriminator_sigmoid'], 'resize convolution': opt['use_resize_convolution'], }) with open('{}/meta_data.json'.format(opt['out_dir']), 'w') as outfile: json.dump(data, outfile, sort_keys=True) def write_loss_data_to_file(opt, history): keys = sorted(history.keys()) with open('images/{}/loss_output.csv'.format(opt['date_time']), 'w') as csv_file: writer = csv.writer(csv_file, delimiter=',') writer.writerow(keys) writer.writerows(zip(*[history[key] for key in keys])) def join_and_save(opt, images, save_path): # Join images image = np.hstack(images) # Save images if opt['channels'] == 1: image = image[:, :, 0] mpimg.imsave(save_path, image, vmin=-1, vmax=1, cmap='gray') def save_epoch_images(model, opt, epoch, num_saved_images=1): # Save training images nr_train_im_A = opt['A_train'].shape[0] nr_train_im_B = opt['B_train'].shape[0] rand_ind_A = np.random.randint(nr_train_im_A) rand_ind_B = np.random.randint(nr_train_im_B) real_image_A = opt['A_train'][rand_ind_A] real_image_B = opt['B_train'][rand_ind_B] synthetic_image_B = model['G_A2B'].predict(real_image_A[np.newaxis])[0] synthetic_image_A = model['G_B2A'].predict(real_image_B[np.newaxis])[0] reconstructed_image_A = model['G_B2A'].predict(synthetic_image_B[np.newaxis])[0] reconstructed_image_B = model['G_A2B'].predict(synthetic_image_A[np.newaxis])[0] save_path_A = '{}/train_A/epoch{}.png'.format(opt['out_dir'], epoch) save_path_B = '{}/train_B/epoch{}.png'.format(opt['out_dir'], epoch) if opt['paired_data']: real_image_Ab = opt['B_train'][rand_ind_A] real_image_Ba = opt['A_train'][rand_ind_B] join_and_save(opt, (real_image_Ab, real_image_A, synthetic_image_B, reconstructed_image_A), save_path_A) join_and_save(opt, (real_image_Ba, real_image_B, synthetic_image_A, reconstructed_image_B), save_path_B) else: join_and_save(opt, (real_image_A, synthetic_image_B, reconstructed_image_A), save_path_A) join_and_save(opt, (real_image_B, synthetic_image_A, reconstructed_image_B), save_path_B) # Save test images real_image_A = opt['A_test'][0] real_image_B = opt['B_test'][0] synthetic_image_B = model['G_A2B'].predict(real_image_A[np.newaxis])[0] synthetic_image_A = model['G_B2A'].predict(real_image_B[np.newaxis])[0] reconstructed_image_A = model['G_B2A'].predict(synthetic_image_B[np.newaxis])[0] reconstructed_image_B = model['G_A2B'].predict(synthetic_image_A[np.newaxis])[0] save_path_A = '{}/test_A/epoch{}.png'.format(opt['out_dir'], epoch) save_path_B = '{}/test_B/epoch{}.png'.format(opt['out_dir'], epoch) if opt['paired_data']: real_image_Ab = opt['B_test'][0] real_image_Ba = opt['A_test'][0] join_and_save(opt, (real_image_Ab, real_image_A, synthetic_image_B, reconstructed_image_A), save_path_A) join_and_save(opt, (real_image_Ba, real_image_B, synthetic_image_A, reconstructed_image_B), save_path_B) else: join_and_save(opt, (real_image_A, synthetic_image_B, reconstructed_image_A), save_path_A) join_and_save(opt, (real_image_B, synthetic_image_A, reconstructed_image_B), save_path_B) def save_tmp_images(model, opt, real_image_A, real_image_B, synthetic_image_A, synthetic_image_B): try: reconstructed_image_A = model['G_B2A'].predict(synthetic_image_B[np.newaxis])[0] reconstructed_image_B = model['G_A2B'].predict(synthetic_image_A[np.newaxis])[0] real_images = np.vstack((real_image_A, real_image_B)) synthetic_images = np.vstack((synthetic_image_B, synthetic_image_A)) reconstructed_images = np.vstack((reconstructed_image_A, reconstructed_image_B)) save_path = '{}/tmp.png'.format(opt['out_dir']) join_and_save(opt, (real_images, synthetic_images, reconstructed_images), save_path) except: # Ignore if file is open pass def get_lr_linear_decay_rate(opt): # Calculate decay rates # max_nr_images = max(len(opt['A_train']), len(opt['B_train'])) nr_train_im_A = opt['A_train'].shape[0] nr_train_im_B = opt['B_train'].shape[0] nr_batches_per_epoch = int(np.ceil(np.max((nr_train_im_A, nr_train_im_B)) / opt['batch_size'])) updates_per_epoch_D = 2 * nr_batches_per_epoch updates_per_epoch_G = nr_batches_per_epoch nr_decay_updates_D = (opt['epochs'] - opt['decay_epoch'] + 1) * updates_per_epoch_D nr_decay_updates_G = (opt['epochs'] - opt['decay_epoch'] + 1) * updates_per_epoch_G decay_D = opt['learning_rate_D'] / nr_decay_updates_D decay_G = opt['learning_rate_G'] / nr_decay_updates_G return decay_D, decay_G def update_lr(model, decay): new_lr = K.get_value(model.optimizer.lr) - decay if new_lr < 0: new_lr = 0 # print(K.get_value(model.optimizer.lr)) K.set_value(model.optimizer.lr, new_lr) def print_ETA(opt, start_time, epoch, nr_im_per_epoch, loop_index): passed_time = time.time() - start_time iterations_so_far = ((epoch - 1) * nr_im_per_epoch + loop_index) / opt['batch_size'] iterations_total = opt['epochs'] * nr_im_per_epoch / opt['batch_size'] iterations_left = iterations_total - iterations_so_far eta = round(passed_time / (iterations_so_far + 1e-5) * iterations_left) passed_time_string = str(datetime.timedelta(seconds=round(passed_time))) eta_string = str(datetime.timedelta(seconds=eta)) print('Elapsed time', passed_time_string, ': ETA in', eta_string) def save_model(opt, model, epoch): # Create folder to save model architecture and weights directory = os.path.join('saved_models', opt['date_time']) if not os.path.exists(directory): os.makedirs(directory) weights_path = '{}/{}_weights_epoch_{}.hdf5'.format(directory, model.name, epoch) model.save_weights(weights_path) model_path = '{}/{}_model_epoch_{}.json'.format(directory, model.name, epoch) model.save_weights(model_path) json_string = model.to_json() with open(model_path, 'w') as outfile: json.dump(json_string, outfile) print('{} has been saved in saved_models/{}/'.format(model.name, opt['date_time']))
nilq/baby-python
python
import time from umqtt.simple import MQTTClient def sub_cb(topic, msg): print((topic, msg)) c = MQTTClient("uqmtt_client", "localhost") c.connect() c.subscribe(b"foo_topic") c.publish(b"foo_topic", b"hello") while 1: c.wait_msg() c.disconnect()
nilq/baby-python
python
import math import os import pickle import sys import gym import numpy as np import quaternion import torch from torch.nn import functional as F from torchvision import transforms import skimage.morphology from PIL import Image import matplotlib if matplotlib.get_backend() == "agg": print("matplot backend is {}".format(matplotlib.get_backend())) # matplotlib.use('TkAgg') import matplotlib.pyplot as plt from .utils.map_builder import MapBuilder from .utils.fmm_planner import FMMPlanner from .utils.noisy_actions import CustomActionSpaceConfiguration from .utils.supervision import HabitatMaps from .utils.grid import get_grid, get_grid_full from .utils import pose as pu from .utils import visualizations as vu import habitat from habitat import logger from habitat.config.default import get_config as cfg_env from habitat.datasets.pointnav.pointnav_dataset import PointNavDatasetV1 from habitat_baselines.config.default import get_config as cfg_baseline import onpolicy def _preprocess_depth(depth): depth = depth[:, :, 0]*1 mask2 = depth > 0.99 depth[mask2] = 0. for i in range(depth.shape[1]): depth[:, i][depth[:, i] == 0.] = depth[:, i].max() mask1 = depth == 0 depth[mask1] = np.NaN depth = depth * 1000. return depth class Exploration_Env(habitat.RLEnv): def __init__(self, args, config_env, config_baseline, dataset, run_dir): self.args = args self.run_dir = run_dir self.num_agents = args.num_agents self.use_restrict_map = args.use_restrict_map self.use_complete_reward = args.use_complete_reward self.use_time_penalty = args.use_time_penalty self.use_repeat_penalty = args.use_repeat_penalty self.reward_decay = args.reward_decay self.use_render = args.use_render self.render_merge = args.render_merge self.save_gifs = args.save_gifs self.map_resolution = args.map_resolution self.map_size_cm = args.map_size_cm self.num_actions = 3 self.dt = 10 self.reward_gamma = 1 self.sensor_noise_fwd = \ pickle.load(open(onpolicy.__path__[0] + "/envs/habitat/model/noise_models/sensor_noise_fwd.pkl", 'rb')) self.sensor_noise_right = \ pickle.load(open(onpolicy.__path__[0] + "/envs/habitat/model/noise_models/sensor_noise_right.pkl", 'rb')) self.sensor_noise_left = \ pickle.load(open(onpolicy.__path__[0] + "/envs/habitat/model/noise_models/sensor_noise_left.pkl", 'rb')) habitat.SimulatorActions.extend_action_space("NOISY_FORWARD") habitat.SimulatorActions.extend_action_space("NOISY_RIGHT") habitat.SimulatorActions.extend_action_space("NOISY_LEFT") config_env.defrost() config_env.SIMULATOR.ACTION_SPACE_CONFIG = "CustomActionSpaceConfiguration" config_env.freeze() super().__init__(config_env, dataset) self.scene_name = self.habitat_env.sim.config.SCENE if "replica" in self.scene_name: self.scene_id = self.scene_name.split("/")[-3] else: self.scene_id = self.scene_name.split("/")[-1].split(".")[0] self.action_space = gym.spaces.Discrete(self.num_actions) self.observation_space = gym.spaces.Box(0, 255, (3, args.frame_height, args.frame_width), dtype='uint8') self.share_observation_space = gym.spaces.Box(0, 255, (3, args.frame_height, args.frame_width), dtype='uint8') self.mapper = [] for _ in range(self.num_agents): self.mapper.append(self.build_mapper()) self.curr_loc = [] self.last_loc = [] self.curr_loc_gt = [] self.last_loc_gt = [] self.last_sim_location = [] self.map = [] self.explored_map = [] self.episode_no = 0 self.res = transforms.Compose([transforms.ToPILImage(), transforms.Resize((args.frame_height, args.frame_width), interpolation=Image.NEAREST)]) self.maps_dict = [] for _ in range(self.num_agents): self.maps_dict.append({}) if self.use_render: plt.ion() self.figure, self.ax = plt.subplots(self.num_agents, 3, figsize=(6*16/9, 6), facecolor="whitesmoke", num="Scene {} Map".format(self.scene_id)) if args.render_merge: self.figure_m, self.ax_m = plt.subplots(1, 2, figsize=(6*16/9, 6), facecolor="whitesmoke", num="Scene {} Merge Map".format(self.scene_id)) def randomize_env(self): self._env._episode_iterator._shuffle_iterator() def save_trajectory_data(self): traj_dir = '{}/trajectory/{}/'.format(self.run_dir, self.scene_id) if not os.path.exists(traj_dir): os.makedirs(traj_dir) for agent_id in range(self.num_agents): filepath = traj_dir + 'episode' + str(self.episode_no) +'_agent' + str(agent_id) + ".txt" with open(filepath, "w+") as f: f.write(self.scene_name + "\n") for state in self.trajectory_states[i]: f.write(str(state)+"\n") f.flush() def save_position(self): self.agent_state = [] for agent_id in range(self.num_agents): self.agent_state.append(self._env.sim.get_agent_state()) self.trajectory_states[agent_id].append([self.agent_state[agent_id].position, self.agent_state[agent_id].rotation]) def reset(self): self.reward_gamma = 1 self.episode_no += 1 self.timestep = 0 self._previous_action = None self.trajectory_states = [[] for _ in range(self.num_agents)] self.explored_ratio_step = np.ones(self.num_agents) * (-1.0) self.merge_explored_ratio_step = -1.0 self.explored_ratio_threshold = 0.9 self.merge_ratio = 0 self.ratio = np.zeros(self.num_agents) if self.args.randomize_env_every > 0: if np.mod(self.episode_no, self.args.randomize_env_every) == 0: self.randomize_env() # Get Ground Truth Map self.explorable_map = [] self.n_rot = [] self.n_trans = [] self.init_theta = [] self.agent_n_rot = [[] for agent_id in range(self.num_agents)] self.agent_n_trans = [[] for agent_id in range(self.num_agents)] self.agent_st = [] obs = super().reset() full_map_size = self.map_size_cm//self.map_resolution # 480 for agent_id in range(self.num_agents): mapp, n_rot, n_trans, init_theta = self._get_gt_map(full_map_size, agent_id) self.explorable_map.append(mapp) self.n_rot.append(n_rot) self.n_trans.append(n_trans) self.init_theta.append(init_theta) for aa in range(self.num_agents): for a in range(self.num_agents): delta_st = self.agent_st[a] - self.agent_st[aa] delta_rot_mat, delta_trans_mat, delta_n_rot_mat, delta_n_trans_mat =\ get_grid_full(delta_st, (1, 1, self.grid_size, self.grid_size), (1, 1, full_map_size, full_map_size), torch.device("cpu")) self.agent_n_rot[aa].append(delta_n_rot_mat.numpy()) self.agent_n_trans[aa].append(delta_n_trans_mat.numpy()) self.merge_pred_map = np.zeros_like(self.explorable_map[0]) self.prev_merge_exlored_map = np.zeros_like(self.explorable_map[0]) self.prev_explored_area = [0. for _ in range(self.num_agents)] self.prev_merge_explored_area = 0 # Preprocess observations rgb = [obs[agent_id]['rgb'].astype(np.uint8) for agent_id in range(self.num_agents)] self.obs = rgb # For visualization if self.args.frame_width != self.args.env_frame_width: rgb = [np.asarray(self.res(rgb[agent_id])) for agent_id in range(self.num_agents)] state = [rgb[agent_id].transpose(2, 0, 1) for agent_id in range(self.num_agents)] depth = [_preprocess_depth(obs[agent_id]['depth']) for agent_id in range(self.num_agents)] # Initialize map and pose self.curr_loc = [] self.curr_loc_gt = [] self.last_loc_gt = [] self.last_loc = [] self.last_sim_location = [] for agent_id in range(self.num_agents): self.mapper[agent_id].reset_map(self.map_size_cm) self.curr_loc.append([self.map_size_cm/100.0/2.0, self.map_size_cm/100.0/2.0, 0.]) self.curr_loc_gt.append([self.map_size_cm/100.0/2.0, self.map_size_cm/100.0/2.0, 0.]) self.last_loc_gt.append([self.map_size_cm/100.0/2.0, self.map_size_cm/100.0/2.0, 0.]) self.last_loc.append(self.curr_loc[agent_id]) self.last_sim_location.append(self.get_sim_location(agent_id)) # Convert pose to cm and degrees for mapper mapper_gt_pose = [] for agent_id in range(self.num_agents): mapper_gt_pose.append( (self.curr_loc_gt[agent_id][0]*100.0, self.curr_loc_gt[agent_id][1]*100.0, np.deg2rad(self.curr_loc_gt[agent_id][2])) ) fp_proj = [] fp_explored = [] self.map = [] self.explored_map = [] self.current_explored_gt = [] # Update ground_truth map and explored area for agent_id in range(self.num_agents): fp_proj_t, map_t, fp_explored_t, explored_map_t, current_explored_gt = \ self.mapper[agent_id].update_map(depth[agent_id], mapper_gt_pose[agent_id]) fp_proj.append(fp_proj_t) self.map.append(map_t) fp_explored.append(fp_explored_t) self.explored_map.append(explored_map_t) self.current_explored_gt.append(current_explored_gt) # Initialize variables self.merge_pred_map = np.zeros_like(self.explorable_map[0]) self.scene_name = self.habitat_env.sim.config.SCENE self.visited = [np.zeros(self.map[0].shape) for _ in range(self.num_agents)] self.visited_vis = [np.zeros(self.map[0].shape) for _ in range(self.num_agents)] self.visited_gt = [np.zeros(self.map[0].shape) for _ in range(self.num_agents)] self.collison_map = [np.zeros(self.map[0].shape) for _ in range(self.num_agents)] self.col_width = [1 for _ in range(self.num_agents)] # Set info self.info = { 'time': [], 'fp_proj': [], 'fp_explored': [], 'sensor_pose': [], 'pose_err': [], } for agent_id in range(self.num_agents): self.info['time'].append(self.timestep) self.info['fp_proj'].append(fp_proj[agent_id]) self.info['fp_explored'].append(fp_explored[agent_id]) self.info['sensor_pose'].append([0., 0., 0.]) self.info['pose_err'].append([0., 0., 0.]) self.info['trans'] = self.n_trans self.info['rotation'] = self.n_rot self.info['theta'] = self.init_theta self.info['agent_trans'] = self.agent_n_trans self.info['agent_rotation'] = self.agent_n_rot self.info['explorable_map'] = self.explorable_map self.info['scene_id'] = self.scene_id self.save_position() return state, self.info def step(self, action): self.timestep += 1 noisy_action = [] # Action remapping for agent_id in range(self.num_agents): if action[agent_id] == 2: # Forward action[agent_id] = 1 noisy_action.append(habitat.SimulatorActions.NOISY_FORWARD) elif action[agent_id] == 1: # Right action[agent_id] = 3 noisy_action.append(habitat.SimulatorActions.NOISY_RIGHT) elif action[agent_id] == 0: # Left action[agent_id] = 2 noisy_action.append(habitat.SimulatorActions.NOISY_LEFT) for agent_id in range(self.num_agents): self.last_loc[agent_id] = np.copy(self.curr_loc[agent_id]) self.last_loc_gt[agent_id] = np.copy(self.curr_loc_gt[agent_id]) self._previous_action = action obs = [] rew = [] done = [] info = [] for agent_id in range(self.num_agents): if self.args.noisy_actions: obs_t, rew_t, done_t, info_t = super().step(noisy_action[agent_id], agent_id) else: obs_t, rew_t, done_t, info_t = super().step(action[agent_id], agent_id) obs.append(obs_t) rew.append(rew_t) done.append(done_t) info.append(info_t) # Preprocess observations rgb = [obs[agent_id]['rgb'].astype(np.uint8) for agent_id in range(self.num_agents)] self.obs = rgb # For visualization if self.args.frame_width != self.args.env_frame_width: rgb = [np.asarray(self.res(rgb[agent_id])) for agent_id in range(self.num_agents)] state = [rgb[agent_id].transpose(2, 0, 1) for agent_id in range(self.num_agents)] depth = [_preprocess_depth(obs[agent_id]['depth']) for agent_id in range(self.num_agents)] # Get base sensor and ground-truth pose dx_gt = [] dy_gt = [] do_gt = [] for agent_id in range(self.num_agents): dx_gt_t, dy_gt_t, do_gt_t = self.get_gt_pose_change(agent_id) dx_gt.append(dx_gt_t) dy_gt.append(dy_gt_t) do_gt.append(do_gt_t) dx_base = [] dy_base = [] do_base = [] for agent_id in range(self.num_agents): dx_base_t, dy_base_t, do_base_t = self.get_base_pose_change( action[agent_id], (dx_gt[agent_id], dy_gt[agent_id], do_gt[agent_id])) dx_base.append(dx_base_t) dy_base.append(dy_base_t) do_base.append(do_base_t) for agent_id in range(self.num_agents): self.curr_loc[agent_id] = pu.get_new_pose(self.curr_loc[agent_id], (dx_base[agent_id], dy_base[agent_id], do_base[agent_id])) for agent_id in range(self.num_agents): self.curr_loc_gt[agent_id] = pu.get_new_pose(self.curr_loc_gt[agent_id], (dx_gt[agent_id], dy_gt[agent_id], do_gt[agent_id])) if not self.args.noisy_odometry: self.curr_loc = self.curr_loc_gt dx_base, dy_base, do_base = dx_gt, dy_gt, do_gt # Convert pose to cm and degrees for mapper mapper_gt_pose = [] for agent_id in range(self.num_agents): mapper_gt_pose.append( (self.curr_loc_gt[agent_id][0] * 100.0, self.curr_loc_gt[agent_id][1] * 100.0, np.deg2rad(self.curr_loc_gt[agent_id][2])) ) fp_proj = [] fp_explored = [] self.map = [] self.explored_map = [] self.current_explored_gt = [] # Update ground_truth map and explored area for agent_id in range(self.num_agents): fp_proj_t, map_t, fp_explored_t, explored_map_t, current_explored_gt = \ self.mapper[agent_id].update_map(depth[agent_id], mapper_gt_pose[agent_id]) fp_proj.append(fp_proj_t) self.map.append(map_t) fp_explored.append(fp_explored_t) self.explored_map.append(explored_map_t) self.current_explored_gt.append(current_explored_gt) # Update collision map for agent_id in range(self.num_agents): if action[agent_id] == 1: x1, y1, t1 = self.last_loc[agent_id] x2, y2, t2 = self.curr_loc[agent_id] if abs(x1 - x2) < 0.05 and abs(y1 - y2) < 0.05: self.col_width[agent_id] += 2 self.col_width[agent_id] = min(self.col_width[agent_id], 9) else: self.col_width[agent_id] = 1 dist = pu.get_l2_distance(x1, x2, y1, y2) if dist < self.args.collision_threshold: # Collision length = 2 width = self.col_width[agent_id] buf = 3 for i in range(length): for j in range(width): wx = x1 + 0.05*((i+buf) * np.cos(np.deg2rad(t1)) + (j-width//2) * np.sin(np.deg2rad(t1))) wy = y1 + 0.05*((i+buf) * np.sin(np.deg2rad(t1)) - (j-width//2) * np.cos(np.deg2rad(t1))) r, c = wy, wx r, c = int(r*100/self.map_resolution), \ int(c*100/self.map_resolution) [r, c] = pu.threshold_poses([r, c], self.collison_map[agent_id].shape) self.collison_map[agent_id][r, c] = 1 # Set info self.info = { 'time': [], 'fp_proj': [], 'fp_explored': [], 'sensor_pose': [], 'pose_err': [], 'explored_reward': [], 'explored_ratio': [], 'merge_explored_reward': 0.0, 'merge_explored_ratio': 0.0, } for agent_id in range(self.num_agents): self.info['time'].append(self.timestep) self.info['fp_proj'].append(fp_proj[agent_id]) self.info['fp_explored'].append(fp_explored[agent_id]) self.info['sensor_pose'].append([dx_base[agent_id], dy_base[agent_id], do_base[agent_id]]) self.info['pose_err'].append([dx_gt[agent_id] - dx_base[agent_id], dy_gt[agent_id] - dy_base[agent_id], do_gt[agent_id] - do_base[agent_id]]) agent_explored_area, agent_explored_ratio, merge_explored_area, merge_explored_ratio, curr_merge_explored_map = self.get_global_reward() # log step self.merge_ratio += merge_explored_ratio if self.merge_ratio >= self.explored_ratio_threshold and self.merge_explored_ratio_step == -1.0: self.merge_explored_ratio_step = self.timestep self.info['merge_explored_ratio_step'] = self.timestep for agent_id in range(self.num_agents): self.ratio[agent_id] += agent_explored_ratio[agent_id] if self.ratio[agent_id] >= self.explored_ratio_threshold and self.explored_ratio_step[agent_id] == -1.0: self.explored_ratio_step[agent_id] = self.timestep self.info["agent{}_explored_ratio_step".format(agent_id)] = self.timestep agents_explored_map = np.zeros_like(self.explored_map[0]) self.info['merge_explored_reward'] = merge_explored_area self.info['merge_explored_ratio'] = merge_explored_ratio for agent_id in range(self.num_agents): self.info['explored_reward'].append(agent_explored_area[agent_id]) self.info['explored_ratio'].append(agent_explored_ratio[agent_id]) if self.timestep % self.args.num_local_steps == 0: agents_explored_map = np.maximum(agents_explored_map, self.transform(self.current_explored_gt[agent_id], agent_id)) if self.timestep % self.args.num_local_steps == 0 and self.merge_ratio < self.explored_ratio_threshold and self.use_repeat_penalty: self.info['merge_explored_reward'] -= (agents_explored_map[self.prev_merge_exlored_map == 1].sum() * (25./10000) * 0.02) self.prev_merge_exlored_map = curr_merge_explored_map self.save_position() if self.info['time'][0] >= self.args.max_episode_length: done = [True for _ in range(self.num_agents)] if self.merge_ratio >= self.explored_ratio_threshold and self.use_complete_reward: self.info['merge_explored_reward'] += 1.0 if self.args.save_trajectory_data: self.save_trajectory_data() else: done = [False for _ in range(self.num_agents)] return state, rew, done, self.info def get_reward_range(self): # This function is not used, Habitat-RLEnv requires this function return (0., 1.0) def get_reward(self, observations, agent_id): # This function is not used, Habitat-RLEnv requires this function return 0. def get_global_reward(self): agent_explored_rewards = [] agent_explored_ratios = [] # calculate individual reward curr_merge_explored_map = np.zeros_like(self.explored_map[0]) # global merge_explorable_map = np.zeros_like(self.explored_map[0]) # global for agent_id in range(self.num_agents): curr_agent_explored_map = self.explored_map[agent_id] * self.explorable_map[agent_id] curr_merge_explored_map = np.maximum(curr_merge_explored_map, self.transform(curr_agent_explored_map, agent_id)) merge_explorable_map = np.maximum(merge_explorable_map, self.transform(self.explorable_map[agent_id], agent_id)) curr_agent_explored_area = curr_agent_explored_map.sum() agent_explored_reward = (curr_agent_explored_area - self.prev_explored_area[agent_id]) * 1.0 self.prev_explored_area[agent_id] = curr_agent_explored_area # converting to m^2 * Reward Scaling 0.02 * reward time penalty agent_explored_rewards.append(agent_explored_reward * (25./10000) * 0.02 * self.reward_gamma) reward_scale = self.explorable_map[agent_id].sum() agent_explored_ratios.append(agent_explored_reward/reward_scale) # calculate merge reward curr_merge_explored_area = curr_merge_explored_map.sum() merge_explored_reward_scale = merge_explorable_map.sum() merge_explored_reward = (curr_merge_explored_area - self.prev_merge_explored_area) * 1.0 self.prev_merge_explored_area = curr_merge_explored_area merge_explored_ratio = merge_explored_reward / merge_explored_reward_scale merge_explored_reward = merge_explored_reward * (25./10000.) * 0.02 * self.reward_gamma if self.use_time_penalty: self.reward_gamma *= self.reward_decay return agent_explored_rewards, agent_explored_ratios, merge_explored_reward, merge_explored_ratio, curr_merge_explored_map def get_done(self, observations, agent_id): # This function is not used, Habitat-RLEnv requires this function return False def get_info(self, observations, agent_id): # This function is not used, Habitat-RLEnv requires this function info = {} return info def seed(self, seed): self._env.seed(seed) self.rng = np.random.RandomState(seed) def get_spaces(self): return self.observation_space, self.action_space def build_mapper(self): params = {} params['frame_width'] = self.args.env_frame_width params['frame_height'] = self.args.env_frame_height params['fov'] = self.args.hfov params['resolution'] = self.map_resolution params['map_size_cm'] = self.map_size_cm params['agent_min_z'] = 25 params['agent_max_z'] = 150 params['agent_height'] = self.args.camera_height * 100 params['agent_view_angle'] = 0 params['du_scale'] = self.args.du_scale params['vision_range'] = self.args.vision_range params['visualize'] = self.use_render params['obs_threshold'] = self.args.obs_threshold params['num_local_steps'] = self.args.num_local_steps self.selem = skimage.morphology.disk(self.args.obstacle_boundary / self.map_resolution) mapper = MapBuilder(params) return mapper def get_sim_location(self, agent_id): agent_state = super().habitat_env.sim.get_agent_state(agent_id) x = -agent_state.position[2] y = -agent_state.position[0] axis = quaternion.as_euler_angles(agent_state.rotation)[0] if (axis % (2*np.pi)) < 0.1 or (axis % (2*np.pi)) > 2*np.pi - 0.1: o = quaternion.as_euler_angles(agent_state.rotation)[1] else: o = 2*np.pi - quaternion.as_euler_angles(agent_state.rotation)[1] if o > np.pi: o -= 2 * np.pi return x, y, o def get_gt_pose_change(self, agent_id): curr_sim_pose = self.get_sim_location(agent_id) dx, dy, do = pu.get_rel_pose_change( curr_sim_pose, self.last_sim_location[agent_id]) self.last_sim_location[agent_id] = curr_sim_pose return dx, dy, do def get_base_pose_change(self, action, gt_pose_change): dx_gt, dy_gt, do_gt = gt_pose_change if action == 1: # Forward x_err, y_err, o_err = self.sensor_noise_fwd.sample()[0][0] elif action == 3: # Right x_err, y_err, o_err = self.sensor_noise_right.sample()[0][0] elif action == 2: # Left x_err, y_err, o_err = self.sensor_noise_left.sample()[0][0] else: # Stop x_err, y_err, o_err = 0., 0., 0. x_err = x_err * self.args.noise_level y_err = y_err * self.args.noise_level o_err = o_err * self.args.noise_level return dx_gt + x_err, dy_gt + y_err, do_gt + np.deg2rad(o_err) def transform(self, inputs, agent_id): inputs = torch.from_numpy(inputs) n_rotated = F.grid_sample(inputs.unsqueeze(0).unsqueeze( 0).float(), self.n_rot[agent_id].float(), align_corners=True) n_map = F.grid_sample( n_rotated.float(), self.n_trans[agent_id].float(), align_corners=True) n_map = n_map[0, 0, :, :].numpy() return n_map def get_short_term_goal(self, inputs): args = self.args self.extrinsic_rew = [] self.intrinsic_rew = [] self.relative_angle = [] def discretize(dist): dist_limits = [0.25, 3, 10] dist_bin_size = [0.05, 0.25, 1.] if dist < dist_limits[0]: ddist = int(dist/dist_bin_size[0]) elif dist < dist_limits[1]: ddist = int((dist - dist_limits[0])/dist_bin_size[1]) + \ int(dist_limits[0]/dist_bin_size[0]) elif dist < dist_limits[2]: ddist = int((dist - dist_limits[1])/dist_bin_size[2]) + \ int(dist_limits[0]/dist_bin_size[0]) + \ int((dist_limits[1] - dist_limits[0])/dist_bin_size[1]) else: ddist = int(dist_limits[0]/dist_bin_size[0]) + \ int((dist_limits[1] - dist_limits[0])/dist_bin_size[1]) + \ int((dist_limits[2] - dist_limits[1])/dist_bin_size[2]) return ddist # Get Map prediction map_pred = inputs['map_pred'] exp_pred = inputs['exp_pred'] output = [np.zeros((args.goals_size + 1)) for _ in range(self.num_agents)] for agent_id in range(self.num_agents): grid = np.rint(map_pred[agent_id]) explored = np.rint(exp_pred[agent_id]) # Get pose prediction and global policy planning window start_x, start_y, start_o, gx1, gx2, gy1, gy2 = inputs['pose_pred'][agent_id] gx1, gx2, gy1, gy2 = int(gx1), int(gx2), int(gy1), int(gy2) planning_window = [gx1, gx2, gy1, gy2] # Get last loc last_start_x, last_start_y = self.last_loc[agent_id][0], self.last_loc[agent_id][1] r, c = last_start_y, last_start_x last_start = [int(r * 100.0/self.map_resolution - gx1), int(c * 100.0/self.map_resolution - gy1)] last_start = pu.threshold_poses(last_start, grid.shape) # Get curr loc self.curr_loc[agent_id] = [start_x, start_y, start_o] r, c = start_y, start_x start = [int(r * 100.0/self.map_resolution - gx1), int(c * 100.0/self.map_resolution - gy1)] start = pu.threshold_poses(start, grid.shape) # TODO: try reducing this self.visited[agent_id][gx1:gx2, gy1:gy2][start[0]-2:start[0]+3, start[1]-2:start[1]+3] = 1 steps = 25 # ! wrong for i in range(steps): x = int(last_start[0] + (start[0] - last_start[0]) * (i+1) / steps) y = int(last_start[1] + (start[1] - last_start[1]) * (i+1) / steps) self.visited_vis[agent_id][gx1:gx2, gy1:gy2][x, y] = 1 # Get last loc ground truth pose last_start_x, last_start_y = self.last_loc_gt[agent_id][0], self.last_loc_gt[agent_id][1] r, c = last_start_y, last_start_x last_start = [int(r * 100.0/self.map_resolution), int(c * 100.0/self.map_resolution)] last_start = pu.threshold_poses( last_start, self.visited_gt[agent_id].shape) # Get ground truth pose start_x_gt, start_y_gt, start_o_gt = self.curr_loc_gt[agent_id] r, c = start_y_gt, start_x_gt start_gt = [int(r * 100.0/self.map_resolution), int(c * 100.0/self.map_resolution)] start_gt = pu.threshold_poses(start_gt, self.visited_gt[agent_id].shape) steps = 25 # ! wrong for i in range(steps): x = int(last_start[0] + (start_gt[0] - last_start[0]) * (i+1) / steps) y = int(last_start[1] + (start_gt[1] - last_start[1]) * (i+1) / steps) self.visited_gt[agent_id][x, y] = 1 # Get goal goal = inputs['goal'][agent_id] goal = pu.threshold_poses(goal, grid.shape) # Get intrinsic reward for global policy # Negative reward for exploring explored areas i.e. # for choosing explored cell as long-term goal self.extrinsic_rew.append(-pu.get_l2_distance(10, goal[0], 10, goal[1])) self.intrinsic_rew.append(-exp_pred[agent_id][goal[0], goal[1]]) # Get short-term goal stg = self._get_stg(grid, explored, start, np.copy(goal), planning_window, agent_id) # Find GT action if self.args.use_eval or self.args.use_render or not self.args.train_local: gt_action = 0 else: gt_action = self._get_gt_action(1 - self.explorable_map[agent_id], start, [int(stg[0]), int(stg[1])], planning_window, start_o, agent_id) (stg_x, stg_y) = stg relative_dist = pu.get_l2_distance(stg_x, start[0], stg_y, start[1]) relative_dist = relative_dist*5./100. angle_st_goal = math.degrees(math.atan2(stg_x - start[0], stg_y - start[1])) angle_agent = (start_o) % 360.0 if angle_agent > 180: angle_agent -= 360 relative_angle = (angle_agent - angle_st_goal) % 360.0 if relative_angle > 180: relative_angle -= 360 output[agent_id][0] = int((relative_angle % 360.)/5.) output[agent_id][1] = discretize(relative_dist) output[agent_id][2] = gt_action self.relative_angle.append(relative_angle) if self.use_render: gif_dir = '{}/gifs/{}/episode_{}/all/'.format(self.run_dir, self.scene_id, self.episode_no) if not os.path.exists(gif_dir): os.makedirs(gif_dir) self.render(inputs, grid, map_pred, gif_dir) if self.render_merge: gif_dir = '{}/gifs/{}/episode_{}/merge/'.format(self.run_dir, self.scene_id, self.episode_no) if not os.path.exists(gif_dir): os.makedirs(gif_dir) self.render_merged_map(inputs, grid, map_pred, gif_dir) return output def _get_gt_map(self, full_map_size, agent_id): self.scene_name = self.habitat_env.sim.config.SCENE # logger.error('Computing map for %s', self.scene_name) # Get map in habitat simulator coordinates self.map_obj = HabitatMaps(self.habitat_env) if self.map_obj.size[0] < 1 or self.map_obj.size[1] < 1: logger.error("Invalid map: {}/{}".format(self.scene_name, self.episode_no)) return None print(self._env.sim.get_agent_state(agent_id).position.tolist()) agent_y = self._env.sim.get_agent_state(agent_id).position.tolist()[1]*100. # cm if self.use_restrict_map: sim_map = self.map_obj.get_restrict_map(agent_y, -50., 50.0) else: sim_map = self.map_obj.get_map() sim_map[sim_map > 0] = 1. # Transform the map to align with the agent min_x, min_y = self.map_obj.origin/100.0 x, y, o = self.get_sim_location(agent_id) x, y = -x - min_x, -y - min_y range_x, range_y = self.map_obj.max/100. - self.map_obj.origin/100. map_size = sim_map.shape scale = 2. self.grid_size = int(scale*max(map_size)) grid_map = np.zeros((self.grid_size, self.grid_size)) grid_map[(self.grid_size - map_size[0])//2: (self.grid_size - map_size[0])//2 + map_size[0], (self.grid_size - map_size[1])//2: (self.grid_size - map_size[1])//2 + map_size[1]] = sim_map if map_size[0] > map_size[1]: self.agent_st.append(torch.tensor([[ (x - range_x/2.) * 2. / (range_x * scale) \ * map_size[1] * 1. / map_size[0], (y - range_y/2.) * 2. / (range_y * scale), 180.0 + np.rad2deg(o) ]])) else: self.agent_st.append(torch.tensor([[ (x - range_x/2.) * 2. / (range_x * scale), (y - range_y/2.) * 2. / (range_y * scale) * map_size[0] * 1. / map_size[1], 180.0 + np.rad2deg(o) ]])) rot_mat, trans_mat, n_rot_mat, n_trans_mat = get_grid_full(self.agent_st[agent_id], (1, 1, self.grid_size, self.grid_size), (1, 1, full_map_size, full_map_size), torch.device("cpu")) grid_map = torch.from_numpy(grid_map).float() grid_map = grid_map.unsqueeze(0).unsqueeze(0) translated = F.grid_sample(grid_map, trans_mat, align_corners=True) rotated = F.grid_sample(translated, rot_mat, align_corners=True) episode_map = torch.zeros((full_map_size, full_map_size)).float() if full_map_size > self.grid_size: episode_map[(full_map_size - self.grid_size)//2: (full_map_size - self.grid_size)//2 + self.grid_size, (full_map_size - self.grid_size)//2: (full_map_size - self.grid_size)//2 + self.grid_size] = \ rotated[0, 0] else: episode_map = rotated[0, 0, (self.grid_size - full_map_size)//2: (self.grid_size - full_map_size)//2 + full_map_size, (self.grid_size - full_map_size)//2: (self.grid_size - full_map_size)//2 + full_map_size] episode_map = episode_map.numpy() episode_map[episode_map > 0] = 1. return episode_map, n_rot_mat, n_trans_mat, 180.0 + np.rad2deg(o) def _get_stg(self, grid, explored, start, goal, planning_window, agent_id): [gx1, gx2, gy1, gy2] = planning_window x1 = min(start[0], goal[0]) x2 = max(start[0], goal[0]) y1 = min(start[1], goal[1]) y2 = max(start[1], goal[1]) dist = pu.get_l2_distance(goal[0], start[0], goal[1], start[1]) buf = max(20., dist) x1 = max(1, int(x1 - buf)) x2 = min(grid.shape[0]-1, int(x2 + buf)) y1 = max(1, int(y1 - buf)) y2 = min(grid.shape[1]-1, int(y2 + buf)) rows = explored.sum(1) rows[rows > 0] = 1 ex1 = np.argmax(rows) ex2 = len(rows) - np.argmax(np.flip(rows)) cols = explored.sum(0) cols[cols > 0] = 1 ey1 = np.argmax(cols) ey2 = len(cols) - np.argmax(np.flip(cols)) ex1 = min(int(start[0]) - 2, ex1) ex2 = max(int(start[0]) + 2, ex2) ey1 = min(int(start[1]) - 2, ey1) ey2 = max(int(start[1]) + 2, ey2) x1 = max(x1, ex1) x2 = min(x2, ex2) y1 = max(y1, ey1) y2 = min(y2, ey2) traversible = skimage.morphology.binary_dilation( grid[x1:x2, y1:y2], self.selem) != True traversible[self.collison_map[agent_id] [gx1:gx2, gy1:gy2][x1:x2, y1:y2] == 1] = 0 traversible[self.visited[agent_id] [gx1:gx2, gy1:gy2][x1:x2, y1:y2] == 1] = 1 traversible[int(start[0]-x1)-1:int(start[0]-x1)+2, int(start[1]-y1)-1:int(start[1]-y1)+2] = 1 if goal[0]-2 > x1 and goal[0]+3 < x2\ and goal[1]-2 > y1 and goal[1]+3 < y2: traversible[int(goal[0]-x1)-2:int(goal[0]-x1)+3, int(goal[1]-y1)-2:int(goal[1]-y1)+3] = 1 else: goal[0] = min(max(x1, goal[0]), x2) goal[1] = min(max(y1, goal[1]), y2) def add_boundary(mat): h, w = mat.shape new_mat = np.ones((h+2, w+2)) new_mat[1:h+1, 1:w+1] = mat return new_mat traversible = add_boundary(traversible) planner = FMMPlanner(traversible, 360//self.dt) reachable = planner.set_goal([goal[1]-y1+1, goal[0]-x1+1]) stg_x, stg_y = start[0] - x1 + 1, start[1] - y1 + 1 for i in range(self.args.short_goal_dist): stg_x, stg_y, replan = planner.get_short_term_goal([stg_x, stg_y]) if replan: stg_x, stg_y = start[0], start[1] else: stg_x, stg_y = stg_x + x1 - 1, stg_y + y1 - 1 return (stg_x, stg_y) def _get_gt_action(self, grid, start, goal, planning_window, start_o, agent_id): [gx1, gx2, gy1, gy2] = planning_window x1 = min(start[0], goal[0]) x2 = max(start[0], goal[0]) y1 = min(start[1], goal[1]) y2 = max(start[1], goal[1]) dist = pu.get_l2_distance(goal[0], start[0], goal[1], start[1]) buf = max(5., dist) x1 = max(0, int(x1 - buf)) x2 = min(grid.shape[0], int(x2 + buf)) y1 = max(0, int(y1 - buf)) y2 = min(grid.shape[1], int(y2 + buf)) path_found = False goal_r = 0 while not path_found: traversible = skimage.morphology.binary_dilation( grid[gx1:gx2, gy1:gy2][x1:x2, y1:y2], self.selem) != True traversible[self.visited[agent_id] [gx1:gx2, gy1:gy2][x1:x2, y1:y2] == 1] = 1 traversible[int(start[0]-x1)-1:int(start[0]-x1)+2, int(start[1]-y1)-1:int(start[1]-y1)+2] = 1 traversible[int(goal[0]-x1)-goal_r:int(goal[0]-x1)+goal_r+1, int(goal[1]-y1)-goal_r:int(goal[1]-y1)+goal_r+1] = 1 scale = 1 planner = FMMPlanner(traversible, 360//self.dt, scale) reachable = planner.set_goal([goal[1]-y1, goal[0]-x1]) stg_x_gt, stg_y_gt = start[0] - x1, start[1] - y1 for i in range(1): stg_x_gt, stg_y_gt, replan = \ planner.get_short_term_goal([stg_x_gt, stg_y_gt]) if replan and buf < 100.: buf = 2*buf x1 = max(0, int(x1 - buf)) x2 = min(grid.shape[0], int(x2 + buf)) y1 = max(0, int(y1 - buf)) y2 = min(grid.shape[1], int(y2 + buf)) elif replan and goal_r < 50: goal_r += 1 else: path_found = True stg_x_gt, stg_y_gt = stg_x_gt + x1, stg_y_gt + y1 angle_st_goal = math.degrees(math.atan2(stg_x_gt - start[0], stg_y_gt - start[1])) angle_agent = (start_o) % 360.0 if angle_agent > 180: angle_agent -= 360 relative_angle = (angle_agent - angle_st_goal) % 360.0 if relative_angle > 180: relative_angle -= 360 if relative_angle > 15.: gt_action = 1 elif relative_angle < -15.: gt_action = 0 else: gt_action = 2 return gt_action def render(self, inputs, grid, map_pred, gif_dir): for agent_id in range(self.num_agents): goal = inputs['goal'][agent_id] goal = pu.threshold_poses(goal, grid.shape) start_x, start_y, start_o, gx1, gx2, gy1, gy2 = inputs['pose_pred'][agent_id] gx1, gx2, gy1, gy2 = int(gx1), int(gx2), int(gy1), int(gy2) start_x_gt, start_y_gt, start_o_gt = self.curr_loc_gt[agent_id] # predicted map and pose vis_grid_local = vu.get_colored_map(np.rint(map_pred[agent_id]), self.collison_map[agent_id][gx1:gx2, gy1:gy2], self.visited_vis[agent_id][gx1:gx2, gy1:gy2], self.visited_gt[agent_id][gx1:gx2, gy1:gy2], [goal], self.explored_map[agent_id][gx1:gx2, gy1:gy2], self.explorable_map[agent_id][gx1:gx2, gy1:gy2], self.map[agent_id][gx1:gx2, gy1:gy2] * self.explored_map[agent_id][gx1:gx2, gy1:gy2]) vis_grid_local = np.flipud(vis_grid_local) pos_local = (start_x - gy1 * self.map_resolution/100.0, start_y - gx1 * self.map_resolution/100.0, start_o) pos_gt_local = (start_x_gt - gy1 * self.map_resolution/100.0, start_y_gt - gx1 * self.map_resolution/100.0, start_o_gt) # ground truth map and pose vis_grid_gt = vu.get_colored_map(self.map[agent_id], self.collison_map[agent_id], self.visited_gt[agent_id], self.visited_gt[agent_id], [(goal[0] + gx1, goal[1] + gy1)], self.explored_map[agent_id], self.explorable_map[agent_id], self.map[agent_id]*self.explored_map[agent_id]) vis_grid_gt = np.flipud(vis_grid_gt) pos = (start_x, start_y, start_o) pos_gt = (start_x_gt, start_y_gt, start_o_gt) ax = self.ax[agent_id] if self.num_agents > 1 else self.ax vu.visualize_all(agent_id, self.figure, ax, self.obs[agent_id], vis_grid_local[:, :, ::-1], vis_grid_gt[:, :, ::-1], pos_local, pos_gt_local, pos, pos_gt, gif_dir, self.timestep, self.use_render, self.save_gifs) def render_merged_map(self, inputs, grid, map_pred, gif_dir): merge_map = np.zeros_like(self.explored_map[0]) merge_collision_map = np.zeros_like(self.explored_map[0]) merge_visited_gt = np.zeros_like(self.explored_map[0]) merge_visited_vis = np.zeros_like(self.explored_map[0]) merge_explored_map = np.zeros_like(self.explored_map[0]) merge_explorable_map = np.zeros_like(self.explored_map[0]) merge_gt_explored = np.zeros_like(self.explored_map[0]) all_pos = [] all_pos_gt = [] all_goals = [] for agent_id in range(self.num_agents): start_x, start_y, start_o, gx1, gx2, gy1, gy2 = inputs['pose_pred'][agent_id] gx1, gx2, gy1, gy2 = int(gx1), int(gx2), int(gy1), int(gy2) goal = inputs['goal'][agent_id] goal = pu.threshold_poses(goal, grid.shape) start_x_gt, start_y_gt, start_o_gt = self.curr_loc_gt[agent_id] pos_map = np.zeros_like(self.explored_map[0]) pos_gt_map = np.zeros_like(self.explored_map[0]) goal_map = np.zeros_like(self.explored_map[0]) pos_map[int(start_y * 100.0/5.0), int(start_x * 100.0/5.0)] = 1 pos_gt_map[int(start_y_gt * 100.0/5.0), int(start_x_gt * 100.0/5.0)] = 1 goal_map[int(goal[0] + gx1), int(goal[1] + gy1)] = 1 pos_map = self.transform(pos_map, agent_id) pos_gt_map = self.transform(pos_gt_map, agent_id) goal_map = self.transform(goal_map, agent_id) (index_b, index_a) = np.unravel_index(np.argmax(pos_map, axis=None), pos_map.shape) (index_gt_b, index_gt_a) = np.unravel_index(np.argmax(pos_gt_map, axis=None), pos_gt_map.shape) (index_goal_a, index_goal_b) = np.unravel_index(np.argmax(goal_map, axis=None), goal_map.shape) pos = (index_a * 5.0/100.0, index_b * 5.0/100.0, start_o + self.init_theta[agent_id]) pos_gt = (index_gt_a * 5.0/100.0, index_gt_b * 5.0/100.0, start_o_gt + self.init_theta[agent_id]) goal = (index_goal_a, index_goal_b, 0) all_pos.append(pos) all_pos_gt.append(pos_gt) all_goals.append(goal) pred_map = np.zeros_like(self.explored_map[0]) pred_map[gx1:gx2, gy1:gy2]= np.rint(map_pred[agent_id]) self.merge_pred_map = np.maximum(self.merge_pred_map, self.transform(pred_map, agent_id)) merge_map = np.maximum(merge_map, self.transform(self.map[agent_id], agent_id)) merge_visited_gt = np.maximum(merge_visited_gt, self.transform(self.visited_gt[agent_id], agent_id)) merge_visited_vis = np.maximum(merge_visited_vis, self.transform(self.visited_vis[agent_id], agent_id)) merge_collision_map[self.transform(self.collison_map[agent_id], agent_id) == 1] = 1 merge_explorable_map[self.transform(self.explorable_map[agent_id], agent_id) == 1] = 1 merge_explored_map = np.maximum(merge_explored_map, self.transform(self.explored_map[agent_id], agent_id)) merge_gt_explored = np.maximum(merge_gt_explored, self.transform(self.map[agent_id] * self.explored_map[agent_id], agent_id)) vis_grid_gt = vu.get_colored_map(merge_map, merge_collision_map, merge_visited_gt, merge_visited_gt, all_goals, merge_explored_map, merge_explorable_map, merge_gt_explored) vis_grid_pred = vu.get_colored_map(self.merge_pred_map, merge_collision_map, merge_visited_vis, merge_visited_gt, all_goals, merge_explored_map, merge_explorable_map, merge_gt_explored) vis_grid_gt = np.flipud(vis_grid_gt) vis_grid_pred = np.flipud(vis_grid_pred) vu.visualize_map(self.figure_m, self.ax_m, vis_grid_gt[:, :, ::-1], vis_grid_pred[:, :, ::-1], all_pos_gt, all_pos, gif_dir, self.timestep, self.use_render, self.save_gifs)
nilq/baby-python
python
from django.apps import AppConfig class StandardizingApiConfig(AppConfig): name = 'standardizing_api'
nilq/baby-python
python
# Generated by Django 3.2.9 on 2021-11-28 04:44 from django.db import migrations, models import tinymce.models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Place', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=200, verbose_name='Заголовок')), ('description_short', models.TextField(verbose_name='Краткое описание')), ('description_long', tinymce.models.HTMLField(verbose_name='Полное описание')), ('lng', models.FloatField(verbose_name='Долгота')), ('lat', models.FloatField(verbose_name='Широта')), ], ), ]
nilq/baby-python
python
import torch import torch.nn as nn from graphgallery.nn.layers.pytorch import GCNConv, Sequential, activations, InnerProductDecoder class GAE(nn.Module): def __init__(self, in_features, *, out_features=16, hids=[32], acts=['relu'], dropout=0., bias=False): super().__init__() encoder = [] encoder.append(nn.Dropout(dropout)) for hid, act in zip(hids, acts): encoder.append(GCNConv(in_features, hid, bias=bias)) encoder.append(activations.get(act)) encoder.append(nn.Dropout(dropout)) in_features = hid encoder.append(GCNConv(in_features, out_features, bias=bias)) encoder = Sequential(*encoder) self.encoder = encoder self.decoder = InnerProductDecoder() def forward(self, x, adj): z = self.encoder(x, adj) return z class VGAE(nn.Module): def __init__(self, in_features, *, out_features=16, hids=[32], acts=['relu'], dropout=0., bias=False): super().__init__() conv = [] conv.append(nn.Dropout(dropout)) for hid, act in zip(hids, acts): conv.append(GCNConv(in_features, hid, bias=bias)) conv.append(activations.get(act)) conv.append(nn.Dropout(dropout)) in_features = hid self.mu_conv = GCNConv(in_features, out_features, bias=bias) self.logstd_conv = GCNConv(in_features, out_features, bias=bias) self.conv = Sequential(*conv) self.decoder = InnerProductDecoder() def forward(self, x, adj): h = self.conv(x, adj) mu = self.mu_conv(h, adj) if self.training: logstd = self.logstd_conv(h, adj) std = torch.exp(logstd) eps = torch.randn_like(std) z = eps.mul(std).add_(mu) return z, mu, logstd else: return mu
nilq/baby-python
python
""" TODAS AS QUESTÕES SENDO COMPUTADAS BEM COMO AS SUAS ALTERNATIVAS E A SUA DEVIDA RESPOSTA CORRETA. DICIONÁRIO EM PYTHON. """ questionsX = { 'Pergunta 1': { 'pergunta': 'Qual é o século que ocorreu o período chamado iluminismo, o século das luzes?', 'alternativas': {'a': 'XIX -> Século 19', 'b': 'XVI -> Século 16', 'c': 'XVIII -> Século 18', 'd': 'XV -> Século 15'}, 'resposta_correta': 'c', }, 'Pergunta 2': { 'pergunta': 'Quem é considerado o rei do futebol?', 'alternativas': {'a': 'Pelé', 'b': 'Zico', 'c': 'Cruijff', 'd': 'Beckenbauer'}, 'resposta_correta': 'a', }, 'Pergunta 3': { 'pergunta': 'Qual é o país que lembra o formato de uma bota no mapa?', 'alternativas': {'a': 'Espanha', 'b': 'Itália', 'c': 'Brasil', 'd': 'Portugal'}, 'resposta_correta': 'b', }, 'Pergunta 4': { 'pergunta': 'Onde é a capital do Canadá?', 'alternativas': {'a': 'Toronto', 'b': 'Vancouver', 'c': 'Alberta', 'd': 'Ottawa'}, 'resposta_correta': 'd', }, 'Pergunta 5': { 'pergunta': 'Quem é conhecido por ser o inventor da lâmpada?', 'alternativas': {'a': 'Albert Einstein', 'b': 'Thomas Edison', 'c': 'Isaac Newton', 'd': 'Charles Darwin'}, 'resposta_correta': 'b', }, } questionsY = { 'Pergunta 1': { 'pergunta': 'Quem é o grande nome na história da Microsoft?', 'alternativas': {'a': 'Bill Gates', 'b': 'Steve Jobs', 'c': 'Jeff Bezos', 'd': 'Elon Musk'}, 'resposta_correta': 'a', }, 'Pergunta 2': { 'pergunta': 'Na série The Office (USA), qual é o nome do personagem da área de Relações Humanas?', 'alternativas': {'a': 'Kevin Malone', 'b': 'Andy Bernard', 'c': 'Kelly Kapoor', 'd': 'Toby Flenderson'}, 'resposta_correta': 'd', }, 'Pergunta 3': { 'pergunta': 'A famosa grande barreira de coral fica situada próximo de qual região?', 'alternativas': {'a': 'Haiti', 'b': 'México', 'c': 'Austrália', 'd': 'Madagascar'}, 'resposta_correta': 'c', }, 'Pergunta 4': { 'pergunta': 'Quem foi o aluno que morreu dentro da literatura Harry Potter - Cálice de fogo, durante o torneio tribruxo?', 'alternativas': {'a': 'Cedrico Diggory', 'b': 'Neville Longbottom', 'c': 'Rony Weasley', 'd': 'Cho Chang'}, 'resposta_correta': 'a', }, 'Pergunta 5': { 'pergunta': 'Quem é o grande líder da Amazon?', 'alternativas': {'a': 'Steve Ballmer', 'b': 'Jeff Bezos', 'c': 'Jack Dorsey', 'd': 'Mark Zuckerberg'}, 'resposta_correta': 'b', }, } questionsW = { 'Pergunta 1': { 'pergunta': 'Qual desses países não interliga alguma fronteira com o Brasil? Considerando a América do Sul.', 'alternativas': {'a': 'Peru', 'b': 'Bolívia', 'c': 'Chile', 'd': 'Uruguai'}, 'resposta_correta': 'c', }, 'Pergunta 2': { 'pergunta': 'Qual é o nome daquele clássico bicho verde em Star Wars?', 'alternativas': {'a': 'Capitão fantástico', 'b': 'Hulk', 'c': 'Barney', 'd': 'Yoda'}, 'resposta_correta': 'd', }, 'Pergunta 3': { 'pergunta': 'Qual é o país mais populoso do planeta?', 'alternativas': {'a': 'Estados Unidos', 'b': 'Índia', 'c': 'China', 'd': 'Rússia'}, 'resposta_correta': 'c', }, 'Pergunta 4': { 'pergunta': 'Roma fica em qual país?', 'alternativas': {'a': 'Itália', 'b': 'França', 'c': 'Suécia', 'd': 'Inglaterra'}, 'resposta_correta': 'a', }, 'Pergunta 5': { 'pergunta': 'Cristiano Ronaldo é um atleta profissional de qual esporte?', 'alternativas': {'a': 'Tênis', 'b': 'Futebol', 'c': 'Beisebol', 'd': 'Basquetebol'}, 'resposta_correta': 'b', }, } THEFINAL = { 'Pergunta 1': { 'pergunta': 'Qual é a empresa que está causando o maior impacto na educação do país?', 'alternativas': {'a': 'Latam', 'b': 'Razer', 'c': 'Jovens Gênios', 'd': 'Unilever'}, 'resposta_correta': 'c', }, }
nilq/baby-python
python
# flag = 'r2con{Sit down next to my friendLight matchStay}' var_60h = 0xDEADBEEFDEADBEEFCAFE1337CAFE13370102030405060708090A.to_bytes(26, 'big') var_40h = 0xDEADBEEFCAFE13371337CAFE133713370102030405060708090A.to_bytes(26, 'little') First_arr = [ 0x97, 0xCD, 0xD2, 0xD6, 0xC0, 0xC7, 0xCD, 0x84, 0xEC, 0x91, 0xAD, 0x62, 0xF5, 0xF1, 0x65, 0x22, 0x58, 0x82, 0xB1, 0x37, 0x61, 0x3E, 0x5D, 0x2B, 0x14, 0x4C ] Second_arr = [ 0x9C, 0xCD, 0xE1, 0x8E, 0xB0, 0x92, 0xD7, 0x91, 0xC0, 0x9E, 0xB2 ] Third_arr = [ 0x97, 0xE2, 0xE7, 0x9D ] print('r2con{', end='') for i in range(0, len(First_arr)): print(chr((((First_arr[i]-var_40h[i])^var_60h[i])) & 0xff), end='') for i in range(0, len(Second_arr)): print(chr((((Second_arr[i]-var_40h[i])^var_60h[i])) & 0xff), end='') for i in range(0, len(Third_arr)): print(chr((((Third_arr[i]-var_40h[i])^var_60h[i])) & 0xff), end='') print('}')
nilq/baby-python
python
import tensorflow as tf from absl import flags, app from libs.inference import YoloInf from libs.evals.coco import GetCocoEval FLAGS = flags.FLAGS flags.DEFINE_string('ckpt', default=None, help='Checkpoint file path') flags.DEFINE_string('img_prefix', default=None, help='Image directory path to evaluate', short_name='i') flags.DEFINE_string('coco_gt', default=None, help='COCO GT file path', short_name='g') flags.DEFINE_float('conf_thr', default=0.05, help='Inference confidence threshold') flags.DEFINE_list('img_exts', default=['.png', '.jpg', '.jpeg'], help='Image extensions') flags.mark_flag_as_required('ckpt') flags.mark_flag_as_required('img_prefix') flags.mark_flag_as_required('coco_gt') flags.mark_flag_as_required('conf_thr') flags.mark_flag_as_required('img_exts') # Save some gpu memories physical_devices = tf.config.list_physical_devices('GPU') for physical_device in physical_devices: tf.config.experimental.set_memory_growth(device=physical_device, enable=True) def main(_argv): yolo_inf = YoloInf(ckpt_path=FLAGS.ckpt) coco_eval = GetCocoEval( img_prefix=FLAGS.img_prefix, coco_gt_path=FLAGS.coco_gt, yolo_inf=yolo_inf, conf_thr=FLAGS.conf_thr, img_exts=FLAGS.img_exts, ) coco_eval.get(verbose=True) if __name__ == '__main__': app.run(main)
nilq/baby-python
python
import torch import torch.nn as nn import numpy as np import sys sys.path.append('..') from networks import HSwish, HSigmoid, Swish, Sigmoid def compute_memory(module, inp, out): if isinstance(module, (nn.ReLU, nn.ReLU6, nn.ELU, nn.LeakyReLU)): return compute_ReLU_memory(module, inp, out) elif isinstance(module, nn.PReLU): return compute_PReLU_memory(module, inp, out) elif isinstance(module, (Sigmoid, HSigmoid)): return compute_Sigmoid_memory(module, inp, out) elif isinstance(module, (Swish, HSwish)): return compute_Swish_memory(module, inp, out) elif isinstance(module, nn.Conv2d): return compute_Conv2d_memory(module, inp, out) elif isinstance(module, nn.ConvTranspose2d): return compute_ConvTranspose2d_memory(module, inp, out) elif isinstance(module, nn.BatchNorm2d): return compute_BatchNorm2d_memory(module, inp, out) elif isinstance(module, nn.Linear): return compute_Linear_memory(module, inp, out) elif isinstance(module, ( nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d)): return compute_Pool2d_memory(module, inp, out) else: print("[Memory]: {} is not supported!".format(type(module).__name__)) return 0, 0 pass def num_params(module): return sum(p.numel() for p in module.parameters() if p.requires_grad) # why conditioned if p.requires_grad ??? def compute_ReLU_memory(module, inp, out): assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.ELU, nn.LeakyReLU)) batch_size = inp.size()[0] mread = batch_size * inp.size()[1:].numel() mwrite = batch_size * inp.size()[1:].numel() return (mread, mwrite) def compute_PReLU_memory(module, inp, out): assert isinstance(module, (nn.PReLU)) batch_size = inp.size()[0] mread = batch_size * (inp.size()[1:].numel() + num_params(module)) mwrite = batch_size * inp.size()[1:].numel() return (mread, mwrite) def compute_Sigmoid_memory(module, inp, out): assert isinstance(module, (Sigmoid, HSigmoid)) batch_size = inp.size()[0] mread = batch_size * inp.size()[1:].numel() mwrite = batch_size * inp.size()[1:].numel() return (mread, mwrite) def compute_Swish_memory(module, inp, out): assert isinstance(module, (Swish, HSwish)) batch_size = inp.size()[0] mread = batch_size * (inp.size()[1:].numel() + inp.size()[1:].numel()) mwrite = batch_size * inp.size()[1:].numel() return (mread, mwrite) def compute_Conv2d_memory(module, inp, out): assert isinstance(module, nn.Conv2d) assert len(inp.size()) == 4 and len(inp.size()) == len(out.size()) batch_size = inp.size()[0] in_c = inp.size()[1] out_c, out_h, out_w = out.size()[1:] # This includes weighs with bias if the module contains it. mread = batch_size * (inp.size()[1:].numel() + num_params(module)) mwrite = batch_size * out_c * out_h * out_w return (mread, mwrite) def compute_ConvTranspose2d_memory(module, inp, out): assert isinstance(module, nn.ConvTranspose2d) assert len(inp.size()) == 4 and len(inp.size()) == len(out.size()) batch_size = inp.size()[0] in_c = inp.size()[1] out_c, out_h, out_w = out.size()[1:] # This includes weighs with bias if the module contains it. mread = batch_size * (inp.size()[1:].numel() + num_params(module)) mwrite = batch_size * out_c * out_h * out_w return (mread, mwrite) def compute_BatchNorm2d_memory(module, inp, out): assert isinstance(module, nn.BatchNorm2d) assert len(inp.size()) == 4 and len(inp.size()) == len(out.size()) batch_size, in_c, in_h, in_w = inp.size() mread = batch_size * (inp.size()[1:].numel() + 2 * in_c) mwrite = inp.size().numel() return (mread, mwrite) def compute_Linear_memory(module, inp, out): assert isinstance(module, nn.Linear) assert len(inp.size()) == 2 and len(out.size()) == 2 batch_size = inp.size()[0] mread = batch_size * (inp.size()[1:].numel() + num_params(module)) mwrite = out.size().numel() return (mread, mwrite) def compute_Pool2d_memory(module, inp, out): assert isinstance(module, ( nn.MaxPool2d, nn.AvgPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d)) assert len(inp.size()) == 4 and len(inp.size()) == len(out.size()) batch_size = inp.size()[0] mread = batch_size * inp.size()[1:].numel() mwrite = batch_size * out.size()[1:].numel() return (mread, mwrite)
nilq/baby-python
python
#!/usr/bin/env python # ============================================================================= # MODULE DOCSTRING # ============================================================================= """ This scripts is used to generate graphs from smiles for the D-GIN publication. """ # ============================================================================= # GLOBAL IMPORTS # ============================================================================= import logging log = logging.getLogger(__name__) import random import os from itertools import repeat from multiprocessing import Pool from functools import partial from pathlib import Path import argparse import datetime import graph_networks from graph_networks.AtomGraph import AtomGraph from graph_networks.utilities import readChemblXls, CDPLmolFromSmiles, pickleGraphs, LOG_LEVELS # ============================================================================= # GLOBAL FIELDS # ============================================================================= PROJECT_PATH = Path(os.path.dirname(graph_networks.__file__)).parent.absolute() # ============================================================================= # Methods # ============================================================================= def multi_threading(data_combined,featurization): ''' PRIVATE METHOD method for the pool instance used in various scripots (e.g. during graph generation). \n Input \n data_combined (tuple): tuple of two lists: first is a list of the data (name,smiles,properties), \n second is a list of property names. \n Returns: \n (AtomGraph): graph instances of the molecule. ''' try: property_names = data_combined[-1] data = data_combined[:-1] indices = [i for i, x in enumerate(data) if x == ''] mol = CDPLmolFromSmiles(data[1],False) if mol is None: logging.debug("Could not process "+str(data[0])+" "+str(data[2])+" because of its multi comp!") return None graph = AtomGraph() graph(mol,featurization=featurization) graph.setName(data[0]) graph.setSmiles(data[1]) for i,property_name in enumerate(property_names[2:]): if 'logs' in property_name.lower(): if float(data[2+i]) >0.0 or float(data[2+i]) < (-10.0): return None graph.setProperty(property_name.lower(),(float(data[2+i])+10.0)) elif 'logp' in property_name.lower(): graph.setProperty(property_name.lower(),(float(data[2+i])+3.0)) elif 'logd' in property_name.lower(): graph.setProperty(property_name.lower(),(float(data[2+i])+1.60)) else: graph.setProperty('other',float(data[2+i])) if not 'logd' in graph.properties: graph.setProperty('logd',False) if not 'logp' in graph.properties: graph.setProperty('logp',False) if not 'logs' in graph.properties: graph.setProperty('logs',False) if not 'other' in graph.properties: graph.setProperty('other',False) except Exception as e: logging.debug("Could not process "+str(data[0])+" "+str(data[2])+" because of "+str(e)) return None return graph # ============================================================================= # Main Run Method # ============================================================================= def run(args): ''' The main method for the graph generation. runs the GNN training or testing. ''' try: if not os.path.isdir(args.output_path_train): raise FileExistsError("The output path does not exist - please create one with the corresponding name.") logging.debug("Start read FILE and generate data!") data = readChemblXls(path_to_xls=args.input_file_path,col_entries=args.columns,sheet_index=args.sheet_index,skip_rows=args.skip_rows,n_entries=args.n_entries) logging.debug("Finished FILE and data reading with overall nr of entries: "+str(len(data))) print("Finished FILE and data generation with overall nr of entries: "+str(len(data))) graph_list = [] print("Start graph generation.") pool = Pool(processes=int(args.n_processes)) logging.debug("Start muli threading and graph list generation!") graph_list = pool.starmap(partial(multi_threading),zip(data, repeat(args.featurization))) logging.debug("Finished muli threading and graph list generation!") pool.close() pool.join() graph_list = list(filter(None, graph_list)) print("Finished graph generation with overall nr of entries: "+str(len(graph_list))) logging.info("Finished graph generation with overall nr of entries: "+str(len(graph_list))) split= int(len(graph_list)*args.train_test_split) random.seed(1) random.shuffle(graph_list) # logd_train = list() # with open('/home/owieder/projects/old_logd_train.txt') as f: # lines = f.readlines() # for line in lines: # logd_train.append(line.split(' ')[0]) # logd_test = list() # with open('/home/owieder/projects/old_logd_test.txt') as f: # lines = f.readlines() # for line in lines: # logd_test.append(line.split(' ')[0]) # sorted_graph_list_train = list() # sorted_graph_list_test = list() # for name in logd_train: # for graph in graph_list: # if name == graph.name: # sorted_graph_list_train.append(graph) # for name in logd_test: # for graph in graph_list: # if name == graph.name: # sorted_graph_list_test.append(graph) # logs_train = list() # with open('/home/owieder/projects/old_logs_train.txt') as f: # lines = f.readlines() # for line in lines: # logs_train.append(line.split(' ')[0]) # logs_test = list() # with open('/home/owieder/projects/old_logs_test.txt') as f: # lines = f.readlines() # for line in lines: # logs_test.append(line.split(' ')[0]) # sorted_graph_list_train = list() # sorted_graph_list_test = list() # for name in logs_train: # for graph in graph_list: # if name == graph.name: # sorted_graph_list_train.append(graph) # for name in logs_test: # for graph in graph_list: # if name == graph.name: # sorted_graph_list_test.append(graph) logD_graph_list_train_eval = graph_list[:split] logD_graph_list_test = graph_list[split:] logging.info("Train/Evaluation graph list length: "+str(len(logD_graph_list_train_eval))) logging.info("Test graph list length: "+str(len(logD_graph_list_test))) print("Start pickling...") logging.debug("Start pickling graph lists!") pickleGraphs(args.output_path_train,logD_graph_list_train_eval,args.pickle_split) logging.debug("Finished train/eval pickling!") pickleGraphs(args.output_path_test,logD_graph_list_test,args.pickle_split) logging.debug("Finished test pickling!") except Exception as e: logging.error("Could not finish the graph generation due to "+str(e)) # ============================================================================= # MAIN # ============================================================================= if __name__ == "__main__": parser = argparse.ArgumentParser("Graph Generation Tool",description="Uses xls files with the names, smiles and different properties in each column to generate pickled graph representation for the D-GIN publication. The xls file needs to contain in the first row the name/description for eaach column. These names are taken for the property names.") parser.add_argument('--input_file_path',required=True,help='REQUIRED! The path to the xls file.',type=str) parser.add_argument('--output_path_train',required=True,help='REQUIRED! The path to the output folder FOR TRAINING.') parser.add_argument('--output_path_test',required=True,help='REQUIRED! The path to the output folder FOR TESTING.') parser.add_argument('--columns',required=True,nargs='+', type=int,help='REQUIRED! Select the column for the name, smiles and other properties. The first to entries here need to be the name and smiles! Other Property names are extraced from the first row. e.g. if names are in column 0, smiles in column 7 and logD/logS endpoints in column 8 and 3 then use --columns 0 7 8 3') parser.add_argument('--log_dir',help='REQUIRED! The log directory for the graph generation script.',required=True) parser.add_argument('--featurization',type=str,help="Define the featurization type of the graph. Allowed featurizations are: " + "'DMPNN','DGIN', 'DGIN3', 'DGIN4', 'DGIN5', 'DGIN6', 'DGIN7', 'DGIN8', 'DGIN9' ") parser.add_argument('--skip_rows',type=int,help='How many rows should be skipped in addition to the first row of names/descriptions. So e.g. --skip_rows 2 skips one additional row. Default = 1',default=1) parser.add_argument('--sheet_index',type=int,help="Sheet_index (int): Which sheet should be adressed. Default: 0 ",default=0) parser.add_argument('--n_entries',type=int,help="Number of entries to be considered in the xls file. Default: 10000 ",default=10000) parser.add_argument('--n_processes',type=int,help="Number of processes used on your machine. Default: 3 ",default=3) parser.add_argument('--train_test_split',type=float,help="Split for training/testing. e.g. 0.9 means that 90 percent of the " + "data is taken as training, the rest (10 percent) as testing data. Default: 0.9 ",default=0.9) parser.add_argument('--log_verbosity', default=2, type=int, help="Verbosity (between 1-4 occurrences with more leading to more " "verbose logging). CRITICAL=0, ERROR=1, WARN=2, INFO=3, " "DEBUG=4 - Default=3") parser.add_argument('--pickle_split',type=int,help="Number of pickled data instances. Default: 5 ",default=5) args = parser.parse_args() #Path(args.log_dir).mkdir(parents=True, exist_ok=False) logging.basicConfig(filename=args.log_dir+'/gen_graph.log', level=LOG_LEVELS[args.log_verbosity]) logging.info("NEW!!!! Start graph generation. "+ datetime.datetime.now().strftime('%D:%H.%f')[:-4]) logging.info("input_file_path:"+str(args.input_file_path)) logging.info("output_path_train:"+str(args.output_path_train)) logging.info("output_path_test:"+str(args.output_path_test)) logging.info("train_test_split:"+str(args.train_test_split)) logging.info("featurization:"+str(args.featurization)) print("Start graph generation - might take some time, depending on the amount of data!") run(args) print("Finished! For more details look into the log file: "+str(args.log_dir)) logging.info("Finished graph generation. "+ datetime.datetime.now().strftime('%D:%H.%f')[:-4]+'\n')
nilq/baby-python
python
import math import itertools import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objects as go import scipy.stats as ss import scikit_posthocs as sp from dash_table.Format import Format, Scheme from Bio import Phylo from ete3 import Tree from plotly.subplots import make_subplots # ------------------------------------------------------------------------------------- # --------------------------------------- Classes ------------------------------------- class DrawTree(): def __init__(self, newicktree, template, topology, color_map, branch_len, font_family): self.newicktree = Phylo.read(newicktree, "newick") self.template = template self.topology = topology self.color_map = color_map self.branch_len = branch_len self.font_family = font_family def create_square_tree(self): def get_x_coordinates(tree): """Associates to each clade an x-coord. returns dict {clade: x-coord} """ if self.branch_len: xcoords = tree.depths(unit_branch_lengths=True) else: xcoords = tree.depths() # tree.depth() maps tree clades to depths (by branch length). # returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth is the distance from root to clade # If there are no branch lengths, assign unit branch lengths if not max(xcoords.values()): xcoords = tree.depths(unit_branch_lengths=True) return xcoords def get_y_coordinates(tree, dist=1.3): """ returns dict {clade: y-coord} The y-coordinates are (float) multiple of integers (i*dist below) dist depends on the number of tree leafs """ maxheight = tree.count_terminals() # Counts the number of tree leafs. # Rows are defined by the tips/leafs ycoords = dict( (leaf, maxheight - i * dist) for i, leaf in enumerate(reversed(tree.get_terminals())) ) def calc_row(clade): for subclade in clade: if subclade not in ycoords: calc_row(subclade) # This is intermediate placement of internal nodes ycoords[clade] = (ycoords[clade.clades[0]] + ycoords[clade.clades[-1]]) / 2 if tree.root.clades: calc_row(tree.root) return ycoords def get_clade_lines( orientation="horizontal", y_curr=0, x_start=0, x_curr=0, y_bot=0, y_top=0, line_color="white", line_width=2, root_clade = False ): """define a shape of type 'line', for branch """ branch_line = dict( type="line", layer="below", line=dict(color=line_color, width=line_width) ) if root_clade: branch_line.update(x0=-0.01, y0=y_curr, x1=-0.01, y1=y_curr) return branch_line elif orientation == "horizontal": branch_line.update(x0=x_start, y0=y_curr, x1=x_curr, y1=y_curr) elif orientation == "vertical": branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top) else: raise ValueError("Line type can be 'horizontal' or 'vertical'") return branch_line def draw_clade( clade, x_start, line_shapes, line_color="white", line_width=2, x_coords=0, y_coords=0, init_clade=False, ): """Recursively draw the tree branches, down from the given clade""" x_curr = x_coords[clade] y_curr = y_coords[clade] # Draw a horizontal line from start to here if init_clade: branch_line = get_clade_lines( orientation="horizontal", y_curr=y_curr, x_start=x_start, x_curr=x_curr, line_color=line_color, line_width=line_width, root_clade=True, ) else: branch_line = get_clade_lines( orientation="horizontal", y_curr=y_curr, x_start=x_start, x_curr=x_curr, line_color=line_color, line_width=line_width, root_clade=False, ) line_shapes.append(branch_line) if clade.clades: # Draw a vertical line connecting all children y_top = y_coords[clade.clades[0]] y_bot = y_coords[clade.clades[-1]] line_shapes.append( get_clade_lines( orientation="vertical", x_curr=x_curr, y_bot=y_bot, y_top=y_top, line_color=line_color, line_width=line_width, ) ) # Draw descendants for child in clade: draw_clade(child, x_curr, line_shapes, x_coords=x_coords, y_coords=y_coords, line_color=line_color) if 'dark' in self.template: text_color = 'white' else: text_color = 'black' line_color = self.color_map[self.topology] tree = self.newicktree tree.ladderize() x_coords = get_x_coordinates(tree) y_coords = get_y_coordinates(tree) line_shapes = [] draw_clade( tree.root, 0, line_shapes, line_color=line_color, line_width=2, x_coords=x_coords, y_coords=y_coords, init_clade=True, ) my_tree_clades = x_coords.keys() X = [] Y = [] text = [] for cl in my_tree_clades: X.append(x_coords[cl]) Y.append(y_coords[cl]) # Add confidence values if internal node if not cl.name: if not cl.name: text.append(" ") else: text.append(cl.name) else: text.append(cl.name) axis = dict( showline=False, visible=False, zeroline=False, showgrid=False, showticklabels=False, title="", # y title ) label_legend = ["Tree_1"] nodes = [] for elt in label_legend: node = dict( type="scatter", x=X, y=Y, mode="markers+text", marker=dict(color=text_color, size=5), text=text, # vignet information of each node textposition='middle right', textfont=dict(color=text_color, size=12), showlegend=False, name=elt, ) nodes.append(node) # Set graph x-range if self.branch_len: x_range = [-0.5, (max(x_coords.values())+2)] show_xaxis = False elif max(x_coords.values()) < 0.1: x_range = [0, (max(x_coords.values())+(max(x_coords.values())*1.25))] show_xaxis = True elif max(x_coords.values()) < 0.5: x_range = [0, 0.5] show_xaxis = True elif max(x_coords.values()) < 1: x_range = [0, 1] show_xaxis = True elif max(x_coords.values()) == 1: x_range = [0, max(x_coords.values())+2] show_xaxis = False else: x_range = [0, max(x_coords.values())+2] show_xaxis = False layout = dict( autosize=True, showlegend=False, template=self.template, dragmode="pan", margin=dict(t=20, b=10, r=20, l=10), xaxis=dict( showline=True, zeroline=False, visible=show_xaxis, showgrid=False, showticklabels=True, range=x_range, ), yaxis=axis, hovermode="closest", shapes=line_shapes, font=dict(family=self.font_family,size=14), ) fig = go.Figure(data=nodes, layout=layout) return fig def create_angular_tree(self): def get_x_coordinates(tree): """Associates to each clade an x-coord. returns dict {clade: x-coord} """ # xcoords = tree.depths(unit_branch_lengths=True) # print("===========================") # nodes = [n for n in tree.find_clades()] # nodes = tree.get_terminals() + tree.get_nonterminals() # print(tree.root.clades) # root_xcoord = {tree.root.clades[1]:0} terminal_nodes = tree.get_terminals() internal_nodes = tree.get_nonterminals() terminal_xcoords = dict((leaf, i) for i, leaf in enumerate(terminal_nodes)) internal_xcoords = dict( (leaf, i+0.5) for leaf, i in zip(internal_nodes, range(1, len(internal_nodes))) ) xcoords = {**terminal_xcoords, **internal_xcoords} # print(xcoords) # print("===========================") # tree.depth() maps tree clades to depths (by branch length). # returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth # is the distance from root to clade # If there are no branch lengths, assign unit branch lengths if not max(xcoords.values()): xcoords = tree.depths(unit_branch_lengths=True) return xcoords def get_y_coordinates(tree, dist=1): """ returns dict {clade: y-coord} The y-coordinates are (float) multiple of integers (i*dist below) dist depends on the number of tree leafs """ maxheight = tree.count_terminals() # Counts the number of tree leafs. # Rows are defined by the tips/leafs # root_ycoord = {tree.root:maxheight} terminal_nodes = tree.get_terminals() internal_nodes = tree.get_nonterminals() terminal_ycoords = dict((leaf, 1) for _, leaf in enumerate(terminal_nodes)) internal_ycoords = dict( (leaf, i) for leaf, i in zip(internal_nodes, reversed(range(1, len(internal_nodes)))) ) ycoords = {**terminal_ycoords, **internal_ycoords} def calc_row(clade): for subclade in clade: if subclade not in ycoords: calc_row(subclade) ycoords[clade] = (ycoords[clade.clades[0]] + ycoords[clade.clades[-1]]) / 2 if tree.root.clades: calc_row(tree.root) return ycoords def get_clade_lines( orientation="horizontal", y_curr=0, last_y_curr=0, x_start=0, x_curr=0, y_bot=0, y_top=0, line_color="rgb(25,25,25)", line_width=0.5, init_flag=False, ): """define a shape of type 'line', for branch """ branch_line = dict( type="line", layer="below", line=dict(color=line_color, width=line_width) ) if orientation == "horizontal": if init_flag: branch_line.update(x0=x_start, y0=y_curr, x1=x_curr, y1=y_curr) else: branch_line.update( x0=x_start, y0=last_y_curr, x1=x_curr, y1=y_curr) elif orientation == "vertical": branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top) else: raise ValueError("Line type can be 'horizontal' or 'vertical'") return branch_line def draw_clade( clade, x_start, line_shapes, line_color="rgb(15,15,15)", line_width=1, x_coords=0, y_coords=0, last_clade_y_coord=0, init_flag=True ): """Recursively draw the tree branches, down from the given clade""" x_curr = x_coords[clade] y_curr = y_coords[clade] # Draw a horizontal line from start to here branch_line = get_clade_lines( orientation="horizontal", y_curr=y_curr, last_y_curr=last_clade_y_coord, x_start=x_start, x_curr=x_curr, line_color=line_color, line_width=line_width, init_flag=init_flag, ) line_shapes.append(branch_line) if clade.clades: # Draw descendants for child in clade: draw_clade(child, x_curr, line_shapes, x_coords=x_coords, y_coords=y_coords, last_clade_y_coord=y_coords[clade], init_flag=False, line_color=line_color) if 'dark' in self.template: text_color = 'white' else: text_color = 'black' line_color = self.color_map[self.topology] # Load in Tree object and ladderize tree = self.newicktree tree.ladderize() # Get coordinates + put into dictionary # dict(keys=clade_names, values=) x_coords = get_x_coordinates(tree) y_coords = get_y_coordinates(tree) line_shapes = [] draw_clade( tree.root, 0, line_shapes, line_color=line_color, line_width=2, x_coords=x_coords, y_coords=y_coords, ) # my_tree_clades = x_coords.keys() X = [] Y = [] text = [] for cl in my_tree_clades: X.append(x_coords[cl]) Y.append(y_coords[cl]) # Add confidence values if internal node if not cl.name: text.append(cl.confidence) else: text.append(cl.name) axis = dict( showline=False, zeroline=False, showgrid=False, visible=False, showticklabels=False, ) label_legend = ["Tree_1"] nodes = [] for elt in label_legend: node = dict( type="scatter", x=X, y=Y, mode="markers+text", marker=dict(color=text_color, size=5), text=text, # vignet information of each node textposition='right', textfont=dict(color=text_color, size=25), showlegend=False, name=elt, ) nodes.append(node) layout = dict( template=self.template, dragmode="select", autosize=True, showlegend=True, xaxis=dict( showline=True, zeroline=False, visible=False, showgrid=False, showticklabels=True, range=[0, (max(x_coords.values())+2)] ), yaxis=axis, hovermode="closest", shapes=line_shapes, legend={"x": 0, "y": 1}, font=dict(family="Open Sans"), ) fig = dict(data=nodes, layout=layout) return fig def create_circular_tree(self): def get_circular_tree_data(tree, order='level', dist=1, start_angle=0, end_angle=360, start_leaf='first'): """Define data needed to get the Plotly plot of a circular tree Source code found at: https://chart-studio.plotly.com/~empet/14834.embed """ # tree: an instance of Bio.Phylo.Newick.Tree or Bio.Phylo.PhyloXML.Phylogeny # order: tree traversal method to associate polar coordinates to its nodes # dist: the vertical distance between two consecutive leafs in the associated rectangular tree layout # start_angle: angle in degrees representing the angle of the first leaf mapped to a circle # end_angle: angle in degrees representing the angle of the last leaf # the list of leafs mapped in anticlockwise direction onto circles can be tree.get_terminals() # or its reversed version tree.get_terminals()[::-1]. # start leaf: is a keyword with two possible values" # 'first': to map the leafs in the list tree.get_terminals() onto a circle, # in the counter-clockwise direction # 'last': to map the leafs in the list, tree.get_terminals()[::-1] start_angle *= np.pi/180 # conversion to radians end_angle *= np.pi/180 def get_radius(tree): """ Associates to each clade root its radius, equal to the distance from that clade to the tree root returns dict {clade: node_radius} """ if self.branch_len: node_radius = tree.depths(unit_branch_lengths=True) else: node_radius = tree.depths() # If the tree did not record the branch lengths assign the unit branch length # (ex: the case of a newick tree "(A, (B, C), (D, E))") if not np.count_nonzero(node_radius.values()): node_radius = tree.depths(unit_branch_lengths=True) return node_radius def get_vertical_position(tree): """ returns a dict {clade: ycoord}, where y-coord is the cartesian y-coordinate of a clade root in a rectangular phylogram """ n_leafs = tree.count_terminals() # Counts the number of tree leafs. # Assign y-coordinates to the tree leafs if start_leaf == 'first': node_ycoord = dict((leaf, k) for k, leaf in enumerate(tree.get_terminals())) elif start_leaf == 'last': node_ycoord = dict((leaf, k) for k, leaf in enumerate(reversed(tree.get_terminals()))) else: raise ValueError("start leaf can be only 'first' or 'last'") def assign_ycoord(clade):#compute the y-coord for the root of this clade for subclade in clade: if subclade not in node_ycoord: # if the subclade root hasn't a y-coord yet assign_ycoord(subclade) node_ycoord[clade] = 0.5 * (node_ycoord[clade.clades[0]] + node_ycoord[clade.clades[-1]]) if tree.root.clades: assign_ycoord(tree.root) return node_ycoord node_radius = get_radius(tree) node_ycoord = get_vertical_position(tree) y_vals = node_ycoord.values() ymin, ymax = min(y_vals), max(y_vals) ymin -= dist # this dist subtraction is necessary to avoid coincidence of the first and last leaf angle # when the interval [ymin, ymax] is mapped onto [0, 2pi], def ycoord2theta(y): # maps an y in the interval [ymin-dist, ymax] to the interval [radian(start_angle), radian(end_angle)] return start_angle + (end_angle - start_angle) * (y-ymin) / float(ymax-ymin) def get_points_on_lines(linetype='radial', x_left=0, x_right=0, y_right=0, y_bot=0, y_top=0): """ - define the points that generate a radial branch and the circular arcs, perpendicular to that branch - a circular arc (angular linetype) is defined by 10 points on the segment of ends (x_bot, y_bot), (x_top, y_top) in the rectangular layout, mapped by the polar transformation into 10 points that are spline interpolated - returns for each linetype the lists X, Y, containing the x-coords, resp y-coords of the line representative points """ if linetype == 'radial': theta = ycoord2theta(y_right) X = [x_left*np.cos(theta), x_right*np.cos(theta), None] Y = [x_left*np.sin(theta), x_right*np.sin(theta), None] elif linetype == 'angular': theta_b = ycoord2theta(y_bot) theta_t = ycoord2theta(y_top) t = np.linspace(0,1, 10)# 10 points that span the circular arc theta = (1-t) * theta_b + t * theta_t X = list(x_right * np.cos(theta)) + [None] Y = list(x_right * np.sin(theta)) + [None] else: raise ValueError("linetype can be only 'radial' or 'angular'") return X,Y def get_line_lists(clade, x_left, xlines, ylines, xarc, yarc): """Recursively compute the lists of points that span the tree branches""" # xlines, ylines - the lists of x-coords, resp y-coords of radial edge ends # xarc, yarc - the lists of points generating arc segments for tree branches x_right = node_radius[clade] y_right = node_ycoord[clade] X,Y = get_points_on_lines(linetype='radial', x_left=x_left, x_right=x_right, y_right=y_right) xlines.extend(X) ylines.extend(Y) if clade.clades: y_top = node_ycoord[clade.clades[0]] y_bot = node_ycoord[clade.clades[-1]] X,Y = get_points_on_lines(linetype='angular', x_right=x_right, y_bot=y_bot, y_top=y_top) xarc.extend(X) yarc.extend(Y) # get and append the lists of points representing the branches of the descedants for child in clade: get_line_lists(child, x_right, xlines, ylines, xarc, yarc) xlines = [] ylines = [] xarc = [] yarc = [] get_line_lists(tree.root, 0, xlines, ylines, xarc, yarc) xnodes = [] ynodes = [] for clade in tree.find_clades(order='preorder'): #it was 'level' theta = ycoord2theta(node_ycoord[clade]) xnodes.append(node_radius[clade]*np.cos(theta)) ynodes.append(node_radius[clade]*np.sin(theta)) return xnodes, ynodes, xlines, ylines, xarc, yarc if 'dark' in self.template: text_color = 'white' else: text_color = 'black' line_color = self.color_map[self.topology] tree = self.newicktree tree.ladderize() traverse_order = 'preorder' all_clades=list(tree.find_clades(order=traverse_order)) for k in range(len((all_clades))): all_clades[k].id=k xnodes, ynodes, xlines, ylines, xarc, yarc = get_circular_tree_data(tree, order=traverse_order, start_leaf='last') tooltip=[] clade_names=[] color=[] for clade in tree.find_clades(order=traverse_order): if self.branch_len: branch_length = 1 else: branch_length = clade.branch_length if clade.name and clade.confidence and clade.branch_length: tooltip.append(f"name: {clade.name}<br>branch-length: {branch_length}\ <br>confidence: {int(clade.confidence)}") color.append[clade.confidence.value] clade_names.append(clade.name) elif clade.name is None and clade.branch_length is not None and clade.confidence is not None: color.append(clade.confidence) clade_names.append(clade.name) tooltip.append(f"branch-length: {branch_length}\ <br>confidence: {int(clade.confidence)}") elif clade.name and clade.branch_length and clade.confidence is None: tooltip.append(f"name: {clade.name}<br>branch-length: {branch_length}") color.append(-1) clade_names.append(clade.name) else: tooltip.append('') color.append(-1) clade_names.append(clade.name) trace_nodes=dict(type='scatter', x=xnodes, y= ynodes, mode='markers+text', marker=dict(color=text_color, size=8), text=clade_names, textposition='top center', textfont=dict(color=text_color, size=12), hoverinfo='text', hovertemplate=tooltip, ) trace_radial_lines=dict(type='scatter', x=xlines, y=ylines, mode='lines', line=dict(color=line_color, width=1), hoverinfo='none', ) trace_arcs=dict(type='scatter', x=xarc, y=yarc, mode='lines', line=dict(color=line_color, width=1, shape='spline'), hoverinfo='none', ) layout=dict( font=dict(family=self.font_family,size=14), autosize=True, showlegend=False, template=self.template, xaxis=dict(visible=False), yaxis=dict(visible=False), hovermode='closest', margin=dict(t=20, b=10, r=20, l=10, pad=20), ) fig = go.Figure(data=[trace_radial_lines, trace_arcs, trace_nodes], layout=layout) return fig class RFDistance(): def __init__(self, t1, t2): self.t1 = Tree(t1) self.t2 = Tree(t2) self.compare = self.t1.compare(self.t2) def NormRF(self): return self.compare['norm_rf'] def RF(self): return self.compare['rf'] def MaxRF(self): return self.compare['max_rf'] # ------------------------------------------------------------------------------------- # ------------------------------ Alt Data Graph Functions ----------------------------- def make_alt_data_str_figure( alt_data_to_graph, chromosome_df, color_mapping, topology_df, window_size, template, dataRange, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, whole_genome, ): # sort dataframe topology_df.sort_values(by=["Window"], inplace=True) topology_df.fillna("NULL", inplace=True) # Build graph if whole_genome: fig = px.histogram( topology_df, x="Window", y=[1]*len(topology_df), category_orders={"Chromosome": chromosome_df['Chromosome']}, color=alt_data_to_graph, color_discrete_sequence=list(color_mapping.values()), nbins=int(chromosome_df["End"].max()/window_size), facet_row="Chromosome", ) fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1])) fig.update_layout( template=template, margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0 ), title={ 'text': str(alt_data_to_graph), 'x':0.5, 'xanchor': 'center', 'yanchor': 'top', }, hovermode="x unified", font=dict(family=font_family,), height=100*len(topology_df["Chromosome"].unique()) ) else: fig = px.histogram( topology_df, x="Window", y=[1]*len(topology_df), color=alt_data_to_graph, color_discrete_sequence=list(color_mapping.values()), nbins=int(chromosome_df["End"].max()/window_size), ) fig.update_layout( template=template, margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0 ), title={ 'text': str(alt_data_to_graph), 'x':0.5, 'xanchor': 'center', 'yanchor': 'top', }, hovermode="x unified", font=dict(family=font_family,), ) if dataRange: fig.update_xaxes( title="Position", range=dataRange, showline=True, showgrid=xaxis_gridlines, linewidth=axis_line_width, ) else: fig.update_xaxes( title="Position", showline=True, showgrid=xaxis_gridlines, linewidth=axis_line_width, ) fig.update_yaxes( title="y-axis", range=[0, 1], nticks=1, showline=True, showgrid=yaxis_gridlines, linewidth=axis_line_width, ) return fig def make_alt_data_int_figure( alt_data_to_graph, color_mapping, topology_df, chromosome_df, template, dataRange, y_max, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, whole_genome, ): # sort dataframe topology_df = topology_df.sort_values(by=["Window"]) y_range = [0, (y_max*1.1)] # Build graph if whole_genome: fig = px.line( topology_df, x="Window", y=alt_data_to_graph, category_orders={"Chromosome": chromosome_df['Chromosome']}, color_discrete_sequence=list(color_mapping.values()), facet_row="Chromosome", ) fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1])) fig.update_layout( template=template, margin=dict( l=60, r=50, b=40, t=40, ), title={ 'text': str(alt_data_to_graph), 'x':0.5, 'xanchor': 'center', 'yanchor': 'top', }, hovermode="x unified", font=dict(family=font_family,), height=100*len(topology_df["Chromosome"].unique()), ) else: fig = px.line( topology_df, x="Window", y=alt_data_to_graph, color_discrete_sequence=list(color_mapping.values()), ) fig.update_layout( template=template, margin=dict( l=60, r=50, b=40, t=40, ), title={ 'text': str(alt_data_to_graph), 'x':0.5, 'xanchor': 'center', 'yanchor': 'top', }, hovermode="x unified", font=dict(family=font_family,), ) # Update X-axis if dataRange: fig.update_xaxes( title="Position", range=dataRange, showline=True, showgrid=xaxis_gridlines, linewidth=axis_line_width, ) else: fig.update_xaxes( title="Position", showline=True, showgrid=xaxis_gridlines, linewidth=axis_line_width, ) if y_max < 0.1: fig.update_yaxes( fixedrange=True, linewidth=axis_line_width, range=y_range, showgrid=yaxis_gridlines, showline=True, title="Edit me", showexponent = 'all', exponentformat = 'e', ) else: fig.update_yaxes( fixedrange=True, linewidth=axis_line_width, range=y_range, showgrid=yaxis_gridlines, showline=True, title="Edit me", ) return fig # ---------------------------------------------------------------------------------------- # -------------------------- Single Chromosome Graph Functions --------------------------- def build_histogram_with_rug_plot( topology_df, chromosome, chromosome_df, template, current_topologies, window_size, color_mapping, dataRange, topoOrder, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, ): # --- Set up topology data --- # Extract current topology data if (type(current_topologies) == str) or (type(current_topologies) == int): wanted_rows = topology_df[topology_df["TopologyID"] == current_topologies] elif type(current_topologies) == list: wanted_rows = topology_df[topology_df["TopologyID"].isin(current_topologies)] # Add in psuedodata for missing current_topologies (fixes issue where topology is dropped from legend) if len(wanted_rows['TopologyID'].unique()) < len(current_topologies): missing_topologies = [t for t in current_topologies if t not in wanted_rows['TopologyID'].unique()] for mt in missing_topologies: missing_row_data = [chromosome, 0, 'NA', mt] + ['NULL']*(len(wanted_rows.columns)-4) missing_row = pd.DataFrame(data={i:j for i,j in zip(wanted_rows.columns, missing_row_data)}, index=[0]) wanted_rows = pd.concat([wanted_rows, missing_row]) # Group data by topology ID grouped_topology_df = wanted_rows.sort_values(['TopologyID'],ascending=False).groupby(by='TopologyID') # Set row heights based on number of current_topologies being shown if len(current_topologies) <= 6: subplot_row_heights = [1, 1] elif len(current_topologies) <= 8: subplot_row_heights = [4, 2] else: subplot_row_heights = [8, 2] # Build figure # fig = make_subplots(rows=2, cols=1, row_heights=subplot_row_heights, vertical_spacing=0.05, shared_xaxes=True) fig = make_subplots(rows=2, cols=1, vertical_spacing=0.05, shared_xaxes=True) for topology, data in grouped_topology_df: fig.add_trace( go.Scatter( x=data['Window'], y=data['TopologyID'], name=topology, legendgroup=topology, mode='markers', marker_symbol='line-ns-open', marker_line_width=1, marker_color=[color_mapping[topology]]*len(data), ), # go.Box( # x=data['Window'], # y=data['TopologyID'], # boxpoints='all', # jitter=0, # legendgroup=topology, # marker_symbol='line-ns-open', # marker_color=color_mapping[topology], # name=topology, # ), row=1, col=1, ) fig.add_trace( go.Bar( x=data['Window'], y=[1]*len(data), name=topology, legendgroup=topology, showlegend=False, marker_color=color_mapping[topology], marker_line_width=0, ), row=2, col=1 ) # Update layout + axes fig.update_layout( template=template, legend_title_text='Topology', margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, itemsizing='constant' ), hovermode="x unified", font=dict(family=font_family,), ) fig.update_xaxes( rangemode="tozero", range=dataRange, linewidth=axis_line_width, showgrid=xaxis_gridlines, row=1, col=1 ) fig.update_xaxes( rangemode="tozero", range=dataRange, linewidth=axis_line_width, title='Position', showgrid=xaxis_gridlines, row=2, col=1, ) fig.update_yaxes( rangemode="tozero", categoryarray=topoOrder, linewidth=axis_line_width, showgrid=yaxis_gridlines, showticklabels=False, fixedrange=True, ticklen=0, title="", type='category', row=1, col=1, ) fig.update_yaxes( rangemode="tozero", fixedrange=True, linewidth=axis_line_width, nticks=1, showgrid=yaxis_gridlines, showticklabels=False, ticklen=0, title="", row=2, col=1, ) return fig def build_rug_plot( topology_df, chromosome, template, current_topologies, color_mapping, dataRange, topoOrder, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, ): # --- Group wanted data --- if (type(current_topologies) == str) or (type(current_topologies) == int): wanted_rows = topology_df[topology_df["TopologyID"] == current_topologies] elif type(current_topologies) == list: wanted_rows = topology_df[topology_df["TopologyID"].isin(current_topologies)] # Add in psuedodata for missing current_topologies (fixes issue where topology is dropped from legend) if len(wanted_rows['TopologyID'].unique()) < len(current_topologies): missing_topologies = [t for t in current_topologies if t not in wanted_rows['TopologyID'].unique()] for mt in missing_topologies: missing_row_data = [chromosome, 0, 'NA', mt] + ['NULL']*(len(wanted_rows.columns)-4) missing_row = pd.DataFrame(data={i:j for i,j in zip(wanted_rows.columns, missing_row_data)}, index=[0]) wanted_rows = pd.concat([wanted_rows, missing_row]) else: pass # --- Group data by topology ID grouped_topology_df = wanted_rows.groupby(by='TopologyID') # --- Build figure --- fig = go.Figure() for topology, data in grouped_topology_df: fig.add_trace(go.Scatter( x=data['Window'], y=data['TopologyID'], name=topology, legendgroup=topology, mode='markers', marker_symbol='line-ns-open', marker_size=int(100/len(grouped_topology_df)), marker_line_width=1, marker_color=[color_mapping[topology]]*len(data), )) # Update figure layout + axes fig.update_layout( template=template, legend_title_text='Topology', xaxis_title_text='Position', margin=dict( l=60, r=60, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', ), hovermode="x unified", font=dict(family=font_family,), ) fig.update_xaxes( rangemode="tozero", range=dataRange, linewidth=axis_line_width, showgrid=xaxis_gridlines, showline=True, ) fig.update_yaxes( fixedrange=True, title="", showline=True, showgrid=yaxis_gridlines, linewidth=axis_line_width, showticklabels=False, type='category', categoryarray=topoOrder, ) fig.for_each_annotation(lambda a: a.update(text="")) return fig def build_tile_plot( topology_df_filtered, chromosome_df, template, current_topologies, color_mapping, dataRange, window_size, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, ): # Extract current topology data if (type(current_topologies) == str) or (type(current_topologies) == int): wanted_rows = topology_df_filtered[topology_df_filtered["TopologyID"] == current_topologies] elif type(current_topologies) == list: wanted_rows = topology_df_filtered[topology_df_filtered["TopologyID"].isin(current_topologies)] # fig = px.histogram( # wanted_rows, # x="Window", # y=[1]*len(wanted_rows), # color="TopologyID", # color_discrete_map=color_mapping, # nbins=int(chromosome_df["End"].max()/window_size) # ) grouped_topology_df = wanted_rows.groupby(by='TopologyID') # Build figure fig = go.Figure() for topology, data in grouped_topology_df: fig.add_trace( go.Scatter( x=data['Window'], y=[1]*len(data), name=topology, legendgroup=topology, mode='markers', marker_symbol='line-ns-open', marker_size=225, # marker_line_width=2, marker_color=[color_mapping[topology]]*len(data), # showlegend = False ), ) # Update layout + axes fig.update_layout( template=template, legend_title_text='Topology', margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', ), hovermode="x unified", font=dict(family=font_family,), ) fig.update_xaxes( linewidth=axis_line_width, rangemode="tozero", range=dataRange, showgrid=xaxis_gridlines, ) fig.update_yaxes( fixedrange=True, linewidth=axis_line_width, # range=[0, 1], showline=False, showgrid=yaxis_gridlines, showticklabels=False, ticklen=0, title="", ) return fig def build_alt_data_graph( alt_data_to_graph, chromosome_df, color_mapping, topology_df, window_size, template, dataRange, y_max, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, ): # Check input type and graph accordingly try: input_type = type(topology_df[alt_data_to_graph].dropna().to_list()[0]) except IndexError: return no_data_graph(template) if input_type == str: alt_data_graph_data = make_alt_data_str_figure( alt_data_to_graph, chromosome_df, color_mapping, topology_df, window_size, template, dataRange, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, False, ) else: alt_data_graph_data = make_alt_data_int_figure( alt_data_to_graph, color_mapping, topology_df, chromosome_df, template, dataRange, y_max, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, False, ) return alt_data_graph_data def build_whole_genome_alt_data_graph( alt_data_to_graph, chromosome_df, color_mapping, topology_df, window_size, template, y_max, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, ): # Check input type and graph accordingly try: input_type = type(topology_df[alt_data_to_graph].dropna().to_list()[0]) except IndexError: return no_data_graph(template) if input_type == str: alt_data_graph_data = make_alt_data_str_figure( alt_data_to_graph, chromosome_df, color_mapping, topology_df, window_size, template, None, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, True, ) else: alt_data_graph_data = make_alt_data_int_figure( alt_data_to_graph, color_mapping, topology_df, chromosome_df, template, None, y_max, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, True, ) return alt_data_graph_data def build_gff_figure( data, dataRange, template, axis_line_width, xaxis_gridlines, yaxis_gridlines, font_family, ): regionStart, regionEnd = dataRange # Show gene names if showing less than 1Mb of data # if abs(regionEnd - regionStart) <= 10000000: if abs(regionEnd - regionStart) <= 10000000: show_gene_names = True else: show_gene_names = False # Separate # group data by feature and gene name attr_group = data.groupby(by=['feature', 'attribute', 'strand']) positive_text_pos = "top center" negative_text_pos = "top center" features_graphed = list() fig = go.Figure() y_idx = 1 curr_feature = dict() for fg, gene_data in attr_group: feature, gene, strand = fg feature_strand = f"{feature} ({strand})" x_values = sorted(gene_data['start'].to_list() + gene_data['end'].to_list()) # Update y-axis value if new feature if not curr_feature: curr_feature[feature_strand] = y_idx y_idx += 1 elif feature_strand in curr_feature.keys(): pass else: curr_feature[feature_strand] = y_idx y_idx += 1 # Set legend show if feature in list already if feature_strand in features_graphed: show_legend = False else: show_legend = True features_graphed.append(feature_strand) # Set color, y-values, and arrow direction if strand == '+': colorValue = 'red' y_values = [curr_feature[feature_strand]]*len(x_values) markerSymbol = ['square']*(len(x_values)-1) + ['triangle-right'] text_pos = positive_text_pos text_val = [gene] + ['']*(len(x_values)-1) if positive_text_pos == "top center": positive_text_pos = "bottom center" elif positive_text_pos == "bottom center": positive_text_pos = "top center" else: colorValue = '#009BFF' y_values = [curr_feature[feature_strand]]*len(x_values) markerSymbol = ['triangle-left'] + ['square']*(len(x_values)-1) text_pos = negative_text_pos text_val = ['']*(len(x_values)-1) + [gene] if negative_text_pos == "top center": negative_text_pos = "bottom center" elif negative_text_pos == "bottom center": negative_text_pos = "top center" if show_gene_names: fig.add_trace(go.Scatter( x=x_values, y=y_values, name=feature_strand, legendgroup=feature_strand, mode='markers+lines+text', marker_symbol=markerSymbol, marker_size=8, marker_color=colorValue, text=text_val, textposition=text_pos, textfont=dict( size=10, ), hovertemplate=None, showlegend=show_legend, )) else: fig.add_trace(go.Scatter( x=x_values, y=y_values, name=feature_strand, legendgroup=feature_strand, mode='markers+lines', marker_symbol=markerSymbol, marker_size=8, marker_color=colorValue, # hoverinfo=['all'], hovertemplate=None, showlegend=show_legend, )) fig.update_layout( hovermode="x unified", showlegend=True, legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', ), template=template, title='', margin=dict( l=62, r=50, b=20, t=20, ), height=150*len(features_graphed), font=dict(family=font_family,), ) fig.update_xaxes( range=dataRange, title='Position', matches="x", rangemode="tozero", linewidth=axis_line_width, showgrid=xaxis_gridlines, ) fig.update_yaxes( range=[0, len(features_graphed)+1], fixedrange=True, showticklabels=False, showgrid=yaxis_gridlines, title='', linewidth=axis_line_width, ) return fig # ---------------------------------------------------------------------------------------- # ------------------------------- Quantile Graph Functions ------------------------------- def get_quantile_coordinates( chromLengths, QUANTILES, WINDOWSIZE, ): quantileCoordinates = pd.DataFrame(columns=chromLengths["Chromosome"], index=range(1, QUANTILES+1)) for row in chromLengths.itertuples(index=False): chrom, _, end = row chunkSize = end // QUANTILES for i in range(QUANTILES): q = i + 1 if q == 1: quantileCoordinates.at[q, chrom] = [0, chunkSize] else: quantileCoordinates.at[q, chrom] = [chunkSize*(q-1) + WINDOWSIZE, chunkSize*q] return quantileCoordinates def calculateFrequencies( quantileCoordinates, input_df, chromLengths, QUANTILES, ): quantileFrequencies = pd.DataFrame(columns=chromLengths["Chromosome"], index=range(1, QUANTILES+1)) topos = input_df["TopologyID"].unique() for chrom in quantileCoordinates.columns: for q, quantile in enumerate(quantileCoordinates[chrom], 1): quantileData = input_df[(input_df['Window'] >= quantile[0]) & (input_df['Window'] <= quantile[1]) & (input_df['Chromosome'] == chrom)] topoQD = quantileData['TopologyID'].value_counts().to_dict() # Add missing topologies as count=0 for i in topos: if i not in topoQD.keys(): topoQD[i] = 0 quantileFrequencies.at[q, chrom] = topoQD continue return quantileFrequencies def plot_frequencies( quantileFrequencies, n_quantiles, template, color_mapping, axis_line_width, xaxis_gridlines, yaxis_gridlines, ): def reorganizeDF(df): new_df = pd.DataFrame(columns=['Chr', 'Quantile', 'TopologyID', 'Frequency']) nidx = 0 for c in df.columns: for idx in df.index: chromTotal = sum([v for v in df.at[idx, c].values()]) for topo, freq in zip(df.at[idx, c].keys(), df.at[idx, c].values()): new_df.at[nidx, 'TopologyID'] = topo new_df.at[nidx, 'Chr'] = c new_df.at[nidx, 'Quantile'] = idx try: new_df.at[nidx, 'Frequency'] = int(freq)/chromTotal except ZeroDivisionError: new_df.at[nidx, 'Frequency'] = 0.0 nidx += 1 return new_df # Organize DataFrame organizedDF= reorganizeDF(quantileFrequencies) # Create line graph fig = px.line( organizedDF, x='Quantile', y='Frequency', color='TopologyID', facet_col='Chr', facet_col_wrap=1, facet_row_spacing=0.01, color_discrete_map=color_mapping, ) fig.update_traces(texttemplate='%{text:.3}', textposition='top center') if len(organizedDF["Chr"].unique()) == 1: fig.update_layout( uniformtext_minsize=12, template=template, legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', ), height=300, ) else: fig.update_layout( uniformtext_minsize=12, template=template, legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', ), height=100*len(organizedDF["Chr"].unique()), ) fig.update_xaxes( range=[1, n_quantiles], rangemode="tozero", linewidth=axis_line_width, showgrid=xaxis_gridlines, ) fig.update_yaxes( range=[0, 1], fixedrange=True, showgrid=yaxis_gridlines, linewidth=axis_line_width, ) fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1])) return fig def calculate_topo_quantile_frequencies(df, current_topologies, additional_data, n_quantiles): final_df = pd.DataFrame(columns=["TopologyID", "Frequency", "Quantile"]) for topology in current_topologies: topo_df = pd.DataFrame(columns=["TopologyID", "Frequency", "Quantile"]) tidx = 0 df = df.sort_values(by=additional_data) df = df.assign(Quantile = pd.qcut(df[additional_data].rank(method='first'), q=n_quantiles, labels=False)) df['Quantile'] = df['Quantile'].apply(lambda x: x+1) df_group = df.groupby(by="Quantile") for rank, data in df_group: counts = data["TopologyID"].value_counts() for t, f in zip(counts.index, counts): if t == topology: topo_df.at[tidx, "TopologyID"] = t topo_df.at[tidx, "Frequency"] = f/len(df) topo_df.at[tidx, "Quantile"] = rank tidx += 1 break else: continue # -- Concat dfs -- final_df = pd.concat([final_df, topo_df]) return final_df def plot_frequencies_topo_quantile( final_df, template, color_mapping, axis_line_width, xaxis_gridlines, yaxis_gridlines, graph_title, additional_data ): fig = px.line( final_df, x="Quantile", y="Frequency", color="TopologyID", color_discrete_map=color_mapping, markers=True, ) fig.update_layout( template=template, title=graph_title, title_x=0.5, margin=dict( t=80 ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, # itemsizing='constant' ), ) fig.update_xaxes( title=f"{additional_data} Quantiles", linewidth=axis_line_width, showgrid=xaxis_gridlines, tick0=0, dtick=1, ) fig.update_yaxes( rangemode="tozero", linewidth=axis_line_width, showgrid=yaxis_gridlines, title='% Windows Observed', ) return fig # --------------------------------------------------------------------------------- # -------------------------------- Whole Genome Graph Functions ------------------------------- def build_topology_frequency_pie_chart( df, template, color_mapping, font_family, ): """Returns pie graph for whole genome topology frequencies""" fig = px.pie( df, values='Frequency', names='TopologyID', color="TopologyID", color_discrete_map=color_mapping, template=template, title='Whole Genome Topology Frequencies', ) fig.update_traces(textposition='inside') fig.update_layout( margin=dict(l=120, r=20, t=40, b=10), uniformtext_minsize=12, uniformtext_mode='hide', legend=dict(itemclick=False, itemdoubleclick=False), title_x=0.5, font=dict(family=font_family,), ) return fig def build_rf_graph( df, ref_topo, template, color_mapping, axis_line_width, font_family, ): fig = px.bar( df, x="TopologyID", y="normRF-Distance", color="TopologyID", color_discrete_map=color_mapping, text='normRF-Distance') fig.update_traces(texttemplate='%{text:.2f}', textposition='inside') fig.update_layout( title=f"Normalized RF-Distance from {ref_topo}", title_x=0.5, template=template, font=dict(family=font_family,), ) fig.update_xaxes(linewidth=axis_line_width) fig.update_yaxes(linewidth=axis_line_width, range=[0, 1]) return fig def build_whole_genome_rug_plot( df, chrom_df, chromGroup, template, color_mapping, currTopologies, topoOrder, window_size, axis_line_width, xaxis_gridlines, yaxis_gridlines, wg_squish_expand, font_family, ): df = df[(df['TopologyID'].isin(currTopologies)) & (df['Chromosome'].isin(chromGroup))] grouped_topology_df = df.groupby(by='TopologyID') num_chroms = len(df['Chromosome'].unique()) chrom_row_dict = {chrom:i for chrom, i in zip(sorted(df['Chromosome'].unique()), range(1, len(df['Chromosome'].unique())+1, 1))} chrom_shapes = [] row_height = [2]*num_chroms # --- Build figure --- # If chromosome name longer than 5 characters, use subplot titles # instead of row ittles if df.Chromosome.map(len).max() > 5: fig = make_subplots( rows=num_chroms, subplot_titles=chrom_row_dict.keys(), shared_xaxes=True, cols=1, row_heights=row_height, ) else: fig = make_subplots( rows=num_chroms, row_titles=[c for c in chrom_row_dict.keys()], shared_xaxes=True, cols=1, row_heights=row_height, ) for topology, data in grouped_topology_df: add_legend = True for chrom in chrom_row_dict.keys(): chrom_data = data[data["Chromosome"] == chrom] chrom_length_data = chrom_df[chrom_df['Chromosome'] == chrom] chrom_length = chrom_length_data['End'].max() if len(chrom_data) == 0: fig.add_trace( go.Scatter( x=[0], y=[topology], name=topology, legendgroup=topology, mode='markers', marker_symbol='line-ns-open', marker_color=[color_mapping[topology]]*len(chrom_data), showlegend = False, ), row=chrom_row_dict[chrom], col=1, ) elif add_legend: fig.add_trace( go.Scatter( x=chrom_data['Window'], y=chrom_data['TopologyID'], name=topology, legendgroup=topology, mode='markers', # marker_size=int(25/len(grouped_topology_df)), marker_symbol='line-ns-open', marker_color=[color_mapping[topology]]*len(chrom_data), ), # go.Box( # x=chrom_data['Window'], # y=chrom_data['TopologyID'], # boxpoints='all', # jitter=0, # legendgroup=topology, # marker_symbol='line-ns-open', # marker_color=color_mapping[topology], # name=topology, # ), row=chrom_row_dict[chrom], col=1, ) chrom_shapes.append(dict(type="line", xref="x", yref="y", x0=chrom_length, x1=chrom_length, y0=-1, y1=len(currTopologies), line_width=2)) add_legend = False else: fig.add_trace( go.Scatter( x=chrom_data['Window'], y=chrom_data['TopologyID'], name=topology, legendgroup=topology, mode='markers', # marker_size=int(25/len(grouped_topology_df)), marker_symbol='line-ns-open', marker_color=[color_mapping[topology]]*len(chrom_data), showlegend = False, ), # go.Box( # x=chrom_data['Window'], # y=chrom_data['TopologyID'], # boxpoints='all', # jitter=0, # marker_symbol='line-ns-open', # marker_color=color_mapping[topology], # legendgroup=topology, # showlegend = False, # name=topology, # ), row=chrom_row_dict[chrom], col=1, ) chrom_ref = chrom_row_dict[chrom] chrom_shapes.append(dict(type="rect", xref=f"x{chrom_ref}", yref=f"y{chrom_ref}", x0=chrom_length, x1=chrom_length, y0=-1, y1=len(currTopologies), line_width=2)) # Update layout + axes fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1])) fig.update_xaxes( rangemode="tozero", range=[0, (chrom_df['End'].max()+(2*window_size))], fixedrange=True, linewidth=axis_line_width, ticklen=0, matches="x", showgrid=xaxis_gridlines, ) fig.update_yaxes( fixedrange=True, title="", showgrid=yaxis_gridlines, showticklabels=False, linewidth=axis_line_width, categoryarray=topoOrder, ) if wg_squish_expand == 'expand': if num_chroms < 5: fig.update_layout( template=template, legend_title_text='Topology', legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), height=160*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) else: fig.update_layout( template=template, legend_title_text='Topology', legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), height=100*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) elif wg_squish_expand == 'squish': if num_chroms < 5: fig.update_layout( template=template, legend_title_text='Topology', legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), height=125*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) else: fig.update_layout( template=template, legend_title_text='Topology', legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), height=50*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) else: if num_chroms < 5: fig.update_layout( template=template, legend_title_text='Topology', legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), height=105*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) else: fig.update_layout( template=template, legend_title_text='Topology', legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), height=20*num_chroms, shapes=chrom_shapes, title_x=0.5, margin=dict( t=10, b=30, ), font=dict(family=font_family,), ) # Rotate chromosome names to 0-degrees for annotation in fig['layout']['annotations']: annotation['textangle']=0 annotation['align']="center" return fig def build_whole_genome_tile_plot( df, chrom_df, template, color_mapping, currTopologies, topoOrder, window_size, axis_line_width, chromGroup, xaxis_gridlines, yaxis_gridlines, wg_squish_expand, font_family, ): """ Max chromosomes per graph if # current_topologies <= 3: 20 Max chromosomes per graph if # current_topologies > 3: 20/2 Returns: List of figures to display """ df = df[df['TopologyID'].isin(currTopologies)] df = df[df['Chromosome'].isin(chromGroup)] grouped_topology_df = df.groupby(by='TopologyID') num_chroms = len(df['Chromosome'].unique()) chrom_row_dict = {chrom:i for chrom, i in zip(sorted(df['Chromosome'].unique()), range(1, len(df['Chromosome'].unique())+1, 1))} chrom_shapes = [] # --- Build figure --- # If longest chromosome name longer # than 5 characters, use subplot titles # instead of row titles if df.Chromosome.map(len).max() > 5: fig = make_subplots( rows=num_chroms, cols=1, shared_xaxes=True, subplot_titles=chrom_row_dict.keys(), vertical_spacing=0.03, ) else: fig = make_subplots( rows=num_chroms, cols=1, shared_xaxes=True, row_titles=[c for c in chrom_row_dict.keys()], vertical_spacing=0.001, ) for topology, data in grouped_topology_df: add_legend = True for chrom in chrom_row_dict.keys(): chrom_data = data[data["Chromosome"] == chrom] chrom_length_data = chrom_df[chrom_df['Chromosome'] == chrom] chrom_length = chrom_length_data['End'].max() if add_legend: fig.add_trace( go.Histogram( x=chrom_data['Window'], y=[1]*len(chrom_data), nbinsx=int(chrom_length/window_size), name=topology, legendgroup=topology, marker_line_width=0, marker_color=color_mapping[topology], ), row=chrom_row_dict[chrom], col=1, ) chrom_shapes.append(dict(type="line", xref="x", yref="y", x0=chrom_length, x1=chrom_length, y0=0, y1=1, line_width=2)) add_legend = False else: fig.add_trace( go.Histogram( x=chrom_data['Window'], y=[1]*len(chrom_data), nbinsx=int(chrom_length/window_size), name=topology, legendgroup=topology, marker_line_width=0, marker_color=color_mapping[topology], showlegend = False ), row=chrom_row_dict[chrom], col=1, ) chrom_ref = chrom_row_dict[chrom] chrom_shapes.append(dict(type="rect", xref=f"x{chrom_ref}", yref=f"y{chrom_ref}", x0=chrom_length, x1=chrom_length, y0=0, y1=1, line_width=2)) # Update layout + axes if wg_squish_expand == 'expand': if num_chroms < 5: fig.update_layout( barmode="relative", template=template, legend_title_text='Topology', margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), hovermode="x unified", height=130*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) else: fig.update_layout( barmode="relative", template=template, legend_title_text='Topology', margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), hovermode="x unified", height=100*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) elif wg_squish_expand == 'squish': if num_chroms < 5: fig.update_layout( barmode="relative", template=template, legend_title_text='Topology', margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), hovermode="x unified", height=80*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) else: fig.update_layout( barmode="relative", template=template, legend_title_text='Topology', margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), hovermode="x unified", height=50*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) else: if num_chroms < 5: fig.update_layout( barmode="relative", template=template, legend_title_text='Topology', margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), hovermode="x unified", height=55*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) else: fig.update_layout( barmode="relative", template=template, legend_title_text='Topology', margin=dict( l=60, r=50, b=40, t=40, ), legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', itemsizing='constant', ), hovermode="x unified", height=20*num_chroms, shapes=chrom_shapes, title_x=0.5, font=dict(family=font_family,), ) fig.update_xaxes( linewidth=axis_line_width, fixedrange=True, rangemode="tozero", range=[0, chrom_df['End'].max()], ticklen=0, showgrid=xaxis_gridlines, ) fig.update_yaxes( # categoryarray=topoOrder, range=[0, 1], fixedrange=True, linewidth=axis_line_width, showgrid=yaxis_gridlines, showticklabels=False, title="", ticklen=0, ) # Rotate chromosome names to 0-degrees for annotation in fig['layout']['annotations']: annotation['textangle']=0 annotation['align']="center" return fig def build_whole_genome_bar_plot( df, template, color_mapping, currTopologies, axis_line_width, chromGroup, xaxis_gridlines, yaxis_gridlines, font_family, ): # Filter df to chromosomes in group df = df[df['Chromosome'].isin(chromGroup)] df = df[df['TopologyID'].isin(currTopologies)] number_of_chrom_rows = len(df["Chromosome"].unique()) // 3 fig = px.bar( df, x='TopologyID', y='Frequency', facet_col='Chromosome', facet_col_wrap=3, facet_row_spacing=0.05, color='TopologyID', template=template, color_discrete_map=color_mapping, text='Frequency', height=int(500*number_of_chrom_rows), ) fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1])) fig.update_traces(texttemplate='%{text:.2}', textposition='outside') # Remove y-axis labels for axis in fig.layout: if type(fig.layout[axis]) == go.layout.YAxis: fig.layout[axis].title.text = '' fig.update_layout( uniformtext_minsize=12, legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0, traceorder='normal', ), margin=dict(l=10, r=10, t=10, b=10), title="", annotations = list(fig.layout.annotations) + [go.layout.Annotation( x=-0.07, y=0.5, font=dict( size=12, # color='white', ), showarrow=False, text="Frequency", textangle=-90, xref="paper", yref="paper" ) ], title_x=0.5, font=dict(family=font_family,), ) fig.update_xaxes( title="", linewidth=axis_line_width, showgrid=xaxis_gridlines, ) fig.update_yaxes( range=[0, 1.1], matches='y', linewidth=axis_line_width, showgrid=yaxis_gridlines, ) return fig def build_whole_genome_pie_charts( df, template, color_mapping, chromGroup, font_family, ): # Filter df to chromosomes in group df = df[df['Chromosome'].isin(chromGroup)] number_of_chrom_rows = (len(df["Chromosome"].unique()) // 3)+(math.ceil(len(df["Chromosome"].unique()) % 3)) specs = [[{'type':'domain'}, {'type':'domain'}, {'type':'domain'}] for _ in range(number_of_chrom_rows)] fig = make_subplots( rows=number_of_chrom_rows, cols=3, specs=specs, vertical_spacing=0.03, horizontal_spacing=0.001, subplot_titles=sorted(df["Chromosome"].unique()), column_widths=[2]*3, ) col_pos = 1 row_num = 1 for c in sorted(df['Chromosome'].unique()): chrom_df = df[df["Chromosome"] == c] fig.add_trace(go.Pie(labels=chrom_df["TopologyID"], values=chrom_df['Frequency'], marker_colors=list(color_mapping.values())), row=row_num, col=col_pos) if col_pos == 3: col_pos = 1 row_num += 1 else: col_pos += 1 fig.update_traces(textposition='inside') fig.update_layout( uniformtext_minsize=12, showlegend=True, template=template, height=int(200*number_of_chrom_rows), font=dict(family=font_family,), ) return fig # --------------------------------------------------------------------------------- # --------------------------- Stats DataFrame Generators -------------------------- def _get_valid_cols(topology_df): valid_cols = list() for i in topology_df.columns[4:]: data = topology_df[i].unique() flag = None for j in data: if type(j) == str: flag = False break else: flag = True if flag: valid_cols.append(i) else: continue return valid_cols def basic_stats_dfs(topology_df): """Generate dataframes of basic statistics :param topology_df: Current View Tree Viewer input file dataframe :type topology_df: Object """ # Calculate current view topologies topo_freq_df = pd.DataFrame(topology_df["TopologyID"].value_counts()/len(topology_df)) if len(topo_freq_df) > 25: # If more than 25 topologies loaded, just show top 25 topo_freq_df = topo_freq_df.head(25) remainder_freq = 1.0 - sum(topo_freq_df['TopologyID']) topo_freq_df.at["Other", "TopologyID"] = remainder_freq topo_names = [i for i in topo_freq_df.index] topo_freqs = [round(i, 4) for i in topo_freq_df["TopologyID"]] # Calculate median + average of additional data if len(topology_df.columns) > 4: valid_cols = _get_valid_cols(topology_df) additional_dt_names = [i for i in valid_cols] additional_dt_avg = [topology_df[i].mean() for i in valid_cols] additional_dt_std = [topology_df[i].std() for i in valid_cols] topo_freq_df = pd.DataFrame( { "TopologyID": topo_names, "Frequency": topo_freqs, } ) additional_data_df = pd.DataFrame( { "Additional Data": additional_dt_names, "Average": additional_dt_avg, "Std Dev": additional_dt_std, } ) return topo_freq_df, additional_data_df else: # No additional data types present in file topo_freq_df = pd.DataFrame( { "TopologyID": topo_names, "Frequency": topo_freqs, } ) return topo_freq_df, pd.DataFrame() def current_view_topo_freq_chart(basic_stats_topo_freqs, template, color_mapping): """Return pie chart figure object for local topology frequencies :param basic_stats_topo_freqs: Dataframe of topology frequencies :type basic_stats_topo_freqs: DataFrame :return: Plotly express pie chart :rtype: Figure object """ if "Other" in basic_stats_topo_freqs["TopologyID"].to_list(): fig = px.bar( basic_stats_topo_freqs, x='TopologyID', y="Frequency", color="TopologyID", color_discrete_map=color_mapping, text="Frequency", ) fig.update_layout( template=template, uniformtext_minsize=12, uniformtext_mode='hide', ) fig.update_traces(textposition='outside') return fig else: fig = px.pie( basic_stats_topo_freqs, values="Frequency", names="TopologyID", color="TopologyID", color_discrete_map=color_mapping, template=template, title="Current View Topology Frequencies", ) fig.update_layout( legend=dict(itemclick=False, itemdoubleclick=False), margin=dict(l=120, r=20, t=40, b=10), uniformtext_minsize=12, uniformtext_mode='hide', title_x=0.5, ) fig.update_traces(textposition='inside') return fig def whole_genome_datatable(tv_df): valid_cols = _get_valid_cols(tv_df[4:]) for i in tv_df.columns.to_list()[4:]: if i in valid_cols: continue else: tv_df.drop(labels=i, axis=1, inplace=True) df_group = tv_df.groupby(by="TopologyID") out_df = pd.DataFrame(columns=["TopologyID", "Additional Data", "Num. Windows", "Average", "Std Dev"]) idx = 0 for topology, data in df_group: additional_datatypes = [i for i in data.columns[4:]] for datatype in additional_datatypes: dt_data = data[datatype] mean = dt_data.mean() stdev = dt_data.std() out_df.at[idx, "TopologyID"] = topology out_df.at[idx, "Additional Data"] = datatype out_df.at[idx, "Num. Windows"] = len(dt_data) out_df.at[idx, "Average"] = mean out_df.at[idx, "Std Dev"] = stdev idx += 1 continue columns = [{'id': c, 'name': ["Per-Topology Whole Genome Comparison", c], 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal)} for c in out_df.columns] data = out_df.to_dict('records') return data, columns # --- post-hoc tests --- def mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment): return sp.posthoc_mannwhitney(tv_df, val_col=additional_data_type, group_col='TopologyID', p_adjust=pval_adjustment) def dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment): return sp.posthoc_dunn(tv_df, val_col=additional_data_type, group_col='TopologyID', p_adjust=pval_adjustment) def tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha): return sp.posthoc_tukey_hsd(tv_df[additional_data_type], tv_df["TopologyID"], alpha=alpha) # --- Significance tests --- def kruskal_wallis_H_test(tv_df, additional_data_type, posthoc_type, pval_adjustment, alpha): """Return dataframe with Kruskal-Wallis H test information for each topology """ d = [tv_df.loc[ids, additional_data_type].values for ids in tv_df.groupby('TopologyID').groups.values()] H, p = ss.kruskal(*d, nan_policy='omit') if posthoc_type == "Mann-Whitney rank test": posthoc = mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment) posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"]) idx = 0 for c1 in posthoc.columns: for c2, pval in zip(posthoc.index, posthoc[c1]): if c1 == c2: # Remove self-self comparisons continue posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}" posthoc_df.at[idx, "p-value"] = float(pval) idx += 1 data = posthoc_df.to_dict('records') columns = [ {'id': posthoc_type, 'name': posthoc_type}, {'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)}, ] elif posthoc_type == "Dunn's test": posthoc = dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment) posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"]) idx = 0 for c1 in posthoc.columns: for c2, pval in zip(posthoc.index, posthoc[c1]): if c1 == c2: # Remove self-self comparisons continue posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}" posthoc_df.at[idx, "p-value"] = float(pval) idx += 1 data = posthoc_df.to_dict('records') columns = [ {'id': posthoc_type, 'name': posthoc_type}, {'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)}, ] elif posthoc_type == "TukeyHSD": posthoc = tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha) posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"]) idx = 0 for c1 in posthoc.columns: for c2, pval in zip(posthoc.index, posthoc[c1]): if c1 == c2: # Remove self-self comparisons continue posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}" posthoc_df.at[idx, "p-value"] = float(pval) idx += 1 data = posthoc_df.to_dict('records') columns = [ {'id': posthoc_type, 'name': posthoc_type}, {'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)}, ] else: pass return posthoc, data, columns, H, p def one_way_anova(tv_df, additional_data_type, posthoc_type, pval_adjustment, alpha): d = [tv_df.loc[ids, additional_data_type].values for ids in tv_df.groupby('TopologyID').groups.values()] F, p = ss.f_oneway(*d) if posthoc_type == "Mann-Whitney rank test": posthoc = mann_whitney_posthoc(tv_df, additional_data_type, pval_adjustment) posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"]) idx = 0 for c1 in posthoc.columns: for c2, pval in zip(posthoc.index, posthoc[c1]): posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}" posthoc_df.at[idx, "p-value"] = float(pval) idx += 1 data = posthoc_df.to_dict('records') columns = [ {'id': posthoc_type, 'name': posthoc_type}, {'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)}, ] elif posthoc_type == "Dunn's test": posthoc = dunns_test_posthoc(tv_df, additional_data_type, pval_adjustment) posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"]) idx = 0 for c1 in posthoc.columns: for c2, pval in zip(posthoc.index, posthoc[c1]): posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}" posthoc_df.at[idx, "p-value"] = float(pval) idx += 1 data = posthoc_df.to_dict('records') columns = [ {'id': posthoc_type, 'name': posthoc_type}, {'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)}, ] elif posthoc_type == "TukeyHSD": posthoc = tukeyHSD_posthoc(tv_df, additional_data_type, pval_adjustment, alpha) posthoc_df = pd.DataFrame(columns=[posthoc_type, "p-value"]) idx = 0 for c1 in posthoc.columns: for c2, pval in zip(posthoc.index, posthoc[c1]): posthoc_df.at[idx, posthoc_type] = f"{c1} vs {c2}" posthoc_df.at[idx, "p-value"] = float(pval) idx += 1 data = posthoc_df.to_dict('records') columns = [ {'id': posthoc_type, 'name': posthoc_type}, {'id': 'p-value', 'name': 'p-value', 'type': 'numeric', 'format': Format(precision=4, scheme=Scheme.decimal_or_exponent)}, ] else: pass return posthoc, data, columns, F, p def stats_test_heatmap(posthoc, template): fig = go.Figure(data=go.Heatmap( z=posthoc.values, x=posthoc.columns, y=posthoc.index, zmin=0, zmax=1, colorscale='Viridis', colorbar=dict(title='p-value'), hovertemplate = 'p-value: %{z}<extra></extra>', )) fig.update_layout( template=template, coloraxis_colorbar=dict(title="log(p-value)"), margin=dict( t=60, ), ) return fig def frequency_distribution(data, name, template): """Return frequency density distribution""" fig = px.histogram(data, x=name, histnorm='density') fig.update_layout(template=template, margin=dict(t=20, pad=30)) return fig def mean_frequency_of_alt_data_per_topology(tv_df, topologies, additional_data_type): out_df = pd.DataFrame(columns=["TopologyID", "Total Windows", f"Mean ({additional_data_type})"]) idx = 1 for i in topologies: topo_df = tv_df[tv_df["TopologyID"] == i] additional_data_mean = topo_df[f"{additional_data_type}"].mean() out_df.at[idx, "TopologyID"] = i out_df.at[idx, "Total Windows"] = len(topo_df) out_df.at[idx, f"Mean ({additional_data_type})"] = additional_data_mean idx += 1 continue return out_df.to_dict('records') # --------------------------------------------------------------------------------- # ------------------------- Graph Customization Functions ------------------------- def set_topology_colors(data, color): df = pd.read_json(data) # Set colors to current_topologies sorted_topologies = df.assign(freq=df.groupby('TopologyID')['TopologyID'].transform('count')).sort_values(by=['freq','TopologyID'],ascending=[False,True]).loc[:,['TopologyID']] unique_topos = sorted_topologies["TopologyID"].unique() color_list = (color * ((len(unique_topos) // len(color))))+ color[:len(unique_topos) % len(color)] output_dict = dict() for s, c in zip(unique_topos, color_list): output_dict[s] = c return output_dict def get_RFxpos(hoverdata, df): hoverdata = hoverdata['points'][0] if ('customdata' in hoverdata.keys()) or ('marker.color' in hoverdata.keys()): return int(hoverdata['x']) else: return df.loc[hoverdata['binNumber']]['Window'] def get_Treexpos(hoverdata, df): hoverdata = hoverdata['points'][0] if ('customdata' in hoverdata.keys()) or ('marker.color' in hoverdata.keys()): return int(hoverdata['x']) else: return int(hoverdata['x']) # --------------------------------------------------------------------------------- # ------------------------- Init + Empty Graph Functions -------------------------- def no_data_graph(template): """This function returns a blank figure with a "NO DATA" watermark""" fig = go.Figure() fig.update_layout( template=template, title='', annotations=[ dict( name="draft watermark", text="NO DATA", textangle=0, opacity=0.5, font=dict(color="white", size=50), xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, ) ], ) fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) return fig def init_data_graph(template): """ This function returns a blank figure with a "NO DATA LOADED" watermark. """ fig = go.Figure() fig.update_layout( template=template, annotations=[ dict( name="draft watermark", text="NO DATA LOADED", textangle=0, opacity=0.9, font=dict(color="white", size=50), xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, ) ], ) fig.update_xaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False) fig.update_yaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False) return fig def init_stats_graph(template): """ This function returns a blank figure with a "NO DATA" watermark. """ fig = go.Figure() fig.update_layout( template=template, annotations=[ dict( name="draft watermark", text="NO DATA", textangle=0, opacity=0.9, font=dict(color="white", size=35), xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, ) ], ) fig.update_xaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False) fig.update_yaxes(range=[0.2, 1], showgrid=False, visible=False, zeroline=False) return fig def loading_data_graph(template): """ This function returns a blank figure with a "NO DATA" watermark. """ fig = go.Figure() fig.update_layout( template=template, annotations=[ dict( name="draft watermark", text="GATHERING DATA...", textangle=0, opacity=0.9, font=dict(color="white", size=100), xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, ) ], ) fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) return fig def init_RF_graph(template): """ This function returns a blank figure with a "NO DATA" watermark. """ fig = go.Figure() fig.update_layout( template=template, annotations=[ dict( name="draft watermark", text="Hover Over Data to Activate", textangle=0, opacity=0.9, font=dict(color="white", size=100), xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, ) ], ) fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) return fig def no_tree_data(template, msg): """ This function returns a blank figure with a "NO DATA" watermark. """ fig = go.Figure() fig.update_layout( template=template, annotations=[ dict( name="draft watermark", text=msg, textangle=0, opacity=0.9, font=dict(size=25), xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, ) ], ) fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) return fig def zoom_in_gff(template): """ This function returns a blank figure with a "NO DATA" watermark. """ fig = go.Figure() fig.update_layout( height=300, template=template, annotations=[ dict( name="draft watermark", text="Zoom in to minimum 5Mb to view", textangle=0, opacity=0.9, font=dict(color="white", size=25), xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, ) ], ) fig.update_xaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) fig.update_yaxes(showgrid=False, range=[0.2, 1], zeroline=False, visible=False) return fig # --------------------------------------------------------------------------------- # --------------------------- Input File Verification ----------------------------- def validate_chrom_lengths(chromDF, tvDF): """Ensure all chromosomes in chromDF are present in tvDF. Chromosome length file can contain for chromosomes than TV file, but not the other way around. Return True if all are found, False if not.""" chrom_names = chromDF['Chromosome'].unique() tv_chrom_names = tvDF['Chromosome'].unique() missing_chromosomes = [] valid = True issue_files = [] # Check chromosome length file against TV file # for c in chrom_names: # if c not in tv_chrom_names: # missing_chromosomes.append(c) # valid = False # issue_files.append("Chromosome Length File") # continue # else: # continue # Check TV file against chromosome length file for c in tv_chrom_names: if c not in chrom_names: missing_chromosomes.append(c) valid = False issue_files.append("Tree Viewer File") continue else: continue try: if not valid: missing_chroms = ", ".join(missing_chromosomes) if len(issue_files) > 1: missing_files = " & ".join(list(set(issue_files))) else: missing_files = issue_files[0] msg = f"ERROR: Chromosome(s) {missing_chroms} is missing from {missing_files}, please validate consistency of chromosomes between files" return msg, False else: return None, True except UnboundLocalError: return None, True def get_taxa_from_tree(tree): """Collect leaf names from tree""" if tree == "NoTree": return "NoTree" tree = Tree(tree) taxa = [] for leaf in tree.iter_leaves(): taxa.append(leaf.name) return sorted(taxa) def get_valid_init_tree(trees): """Returns first NewickTree entry that is not NoTree""" for i in range(len(trees)): if trees[i] == "NoTree": continue else: return trees[i] def validate_gff_gtf_filename(f): """Ensure file extension is gff or gtf""" if "gtf" in f.lower(): return True elif "gff" in f.lower(): return True else: return False def get_y_max_list(alt_dropdown_options, topology_df): """Generate list of max y-values for additinal data""" y_maxes = [] for i in alt_dropdown_options: try: data_type = type(topology_df[i][0]) except KeyError: data_type = str if data_type == str: y_maxes.append(1) else: y_maxes.append(topology_df[i].max()) return y_maxes def validate_tree_viewer_input(df): """Return False when required headers are not present/correct""" def fix_column_names(columns): """ Fix column names """ if columns[:4] == ["Chromosome", "Window", "NewickTree", "TopologyID"]: return columns else: return ["Chromosome", "Window", "NewickTree", "TopologyID"] + columns[4:] def check_newick(df): """Check if string contains basic newick characters""" if "(" not in df["NewickTree"][0]: return False elif ")" not in df["NewickTree"][0]: return False elif ";" not in df["NewickTree"][0]: return False else: return True def check_window(df): """Return False if row type is not int""" if type(df["Window"][0]) == np.int32: return True elif type(df["Window"][0]) == np.int64: return True else: return False # Fix required headers if needed cols = fix_column_names(list(df.columns)) df.columns = cols # Check reuqired column types newick_check = check_newick(df) window_check = check_window(df) if not newick_check: return False elif not window_check: return False else: return df def tv_header_validation(df): """Return False if first four required column headers are not valid""" required_cols = list(df.columns[:4]) try: assert required_cols == ["Chromosome", "Window", "NewickTree", "TopologyID"] return True except AssertionError: return False # --------------------------------------------------------------------------------- # --------------------------- Tree Prune Export Tools ----------------------------- def prune_tree(x, prune_taxa_choices): if x == "NoTree": return "NoTree" else: tree = Tree(x) try: tree.prune(prune_taxa_choices, preserve_branch_length=True) except ValueError: # Assumes taxa in dropdown selection # is not found in a particular topology/tree # Solution is to check list and remove taxa # not present in tree tree_taxa = tree.get_leaf_names() trimmed_taxa_list = [t for t in prune_taxa_choices if t in tree_taxa] tree.prune(trimmed_taxa_list, preserve_branch_length=True) return tree.write() def remove_heterotachy_info(l): """Remove any information in brackets - ete3 does not support this format of newick""" # --- Ensure tree is NaN value, if so return NoTree --- if type(l) == float: return "NoTree" if ("[" not in l) and ("]" not in l): return l open_brackets = [i for i, x in enumerate(l) if x == "["] close_brackets = [i for i, x in enumerate(l) if x == "]"] final_string = f'{l[:open_brackets[0]]}' for ob, cb in zip(open_brackets[1:], close_brackets[:-1]): final_string += l[cb+1:ob] final_string += l[close_brackets[-1]+1:] return final_string def tv_topobinner(df): """Bin tree topologies that have RF-distance of 0""" trees = df['NewickTree'] topologies = dict() topoCount = 1 for n, t in enumerate(trees): if t == "NoTree": continue elif len(topologies.keys()) == 0: topologies[n] = {'count': 1, 'idx': [n]} continue else: # Iterate through topology list # add new topology if no rf == 0 # increase count if rf == 0 with topology new_topology = True for idx in topologies.keys(): t1 = Tree(remove_heterotachy_info(t)) t2 = Tree(remove_heterotachy_info(df.at[idx, 'NewickTree'])) comparison = t1.compare(t2) rf = comparison['rf'] if rf == 0: topologies[idx]['count'] += 1 topologies[idx]['idx'].append(n) new_topology = False break else: continue if new_topology: topologies[n] = {'count': 1, 'idx': [n]} continue else: continue # Sort topologies dictionary by 'count' topologies = {k: v for k, v in sorted(topologies.items(), key=lambda item: item[1]['count'], reverse=True)} # Update DataFrame TopologyID column with results for topology in topologies.keys(): idx = topologies[topology]['idx'] topoName = f'topology{topoCount}' for i in idx: df.at[i, 'TopologyID'] = topoName continue topoCount += 1 return df def mygrouper(n, iterable): args = [iter(iterable)] * n return ([e for e in t if e != None] for t in itertools.zip_longest(*args)) def make_topo_freq_table(df_grouped): dataTableDF = pd.DataFrame(columns=["Chromosome", "TopologyID", 'Frequency'], index=range(len(df_grouped))) idx = 0 for chrom, data in df_grouped: chromFreqs = data["TopologyID"].value_counts()/len(data) freqTopoOrder = [i for i in chromFreqs.index] freqs = [f for f in chromFreqs] for t, f in zip(freqTopoOrder, freqs): dataTableDF.at[idx, 'Chromosome'] = chrom dataTableDF.at[idx, 'TopologyID'] = t dataTableDF.at[idx, 'Frequency'] = round(f, 3) idx += 1 continue return dataTableDF def get_gridline_bools(axis_gridlines): """If gridlines ON, return True else False""" if 'xaxis' in axis_gridlines: xaxis_gridlines = True else: xaxis_gridlines = False if 'yaxis' in axis_gridlines: yaxis_gridlines = True else: yaxis_gridlines = False return xaxis_gridlines, yaxis_gridlines # --------------------------------------------------------------------------------- # ----------------------------- Template Generaters ------------------------------- def project_ini_template(): content = """[MAIN]\nProjectDir = /path/to/Project\nTreeViewerFile = /path/to/TreeViewerInput.xlsx\nChromLengths = /path/to/ChromosomeLengths.bed\n\n[ADDITIONAL]\n# Load multiple gff/gtf files by listing them with ";" separating the files\nGFF_GTF = None""" return content def tree_viewer_template(): content = pd.DataFrame(columns=["Chromosome", "Window", "NewickTree", "TopologyID"]) return content def chrom_len_template(): content = pd.DataFrame({"Chromosome": ["chr1", "chr2", "chr3"], "Start": [0, 0, 0], "Stop": [1000000, 1500000, 2000000]}) return content # --------------------------------------------------------------------------------- # ------------------------------- Misc. Functions --------------------------------- def divide_input_into_cpu_size_chunks(l, n): """Divides chromosomes into sets of size n, where n is the number of cores available to use""" for i in range(0, len(l), n): yield l[i:i + n] def filter_numeric_dtypes(df): filtered_names = [] for name, data_type in zip(df.dtypes.index[4:], df.dtypes[4:]): if str(data_type) == 'object': continue else: filtered_names.append(name) return filtered_names
nilq/baby-python
python
import logging import os import subprocess from datetime import datetime, timezone, timedelta from pathlib import Path import django_rq import novaclient import vm_manager from vm_manager.constants import INSTANCE_DELETION_RETRY_WAIT_TIME, \ INSTANCE_DELETION_RETRY_COUNT, \ INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME, \ INSTANCE_CHECK_SHUTOFF_RETRY_COUNT, LINUX from vm_manager.models import VMStatus from vm_manager.utils.utils import get_nectar, generate_hostname_url from guacamole.models import GuacamoleConnection logger = logging.getLogger(__name__) def delete_vm_worker(instance): logger.info(f"About to delete vm at addr: {instance.get_ip_addr()} " f"for user {instance.user.username}") if instance.guac_connection: GuacamoleConnection.objects.filter(instance=instance).delete() instance.guac_connection = None instance.save() n = get_nectar() try: n.nova.servers.stop(instance.id) except novaclient.exceptions.NotFound: logger.error(f"Trying to delete an instance that's missing " f"from OpenStack {instance}") # Check if the Instance is Shutoff before requesting OS to Delete it logger.info(f"Checking whether {instance} is ShutOff " f"after {INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME} " f"seconds and Delete it") scheduler = django_rq.get_scheduler('default') scheduler.enqueue_in( timedelta(seconds=INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME), _check_instance_is_shutoff_and_delete, instance, INSTANCE_CHECK_SHUTOFF_RETRY_COUNT, _delete_volume_once_instance_is_deleted, (instance, INSTANCE_DELETION_RETRY_COUNT)) def _check_instance_is_shutoff_and_delete( instance, retries, func, func_args): scheduler = django_rq.get_scheduler('default') if not instance.check_shutdown_status() and retries > 0: # If the instance is not Shutoff, schedule the recheck logger.info(f"{instance} is not shutoff yet! Will check again in " f"{INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME} seconds") scheduler.enqueue_in( timedelta(seconds=INSTANCE_CHECK_SHUTOFF_RETRY_WAIT_TIME), _check_instance_is_shutoff_and_delete, instance, retries - 1, func, func_args) return if retries <= 0: # TODO - not sure we should delete the instance anyway ... logger.info(f"Ran out of retries. {instance} shutoff took too long." f"Proceeding to delete Openstack instance anyway!") # Delete the instance vm_status = VMStatus.objects.get_vm_status_by_instance( instance, instance.boot_volume.requesting_feature) vm_status.status_progress = 66 # Hack: since this won't be displayed when we are deleting a # desktop, use the progress message for the shelving case. vm_status.status_message = 'Instance shelving' vm_status.save() _delete_instance_worker(instance) # The 'func' will do the next step; e.g. delete the volume # or mark the volume as shelved. scheduler.enqueue_in( timedelta(seconds=INSTANCE_DELETION_RETRY_WAIT_TIME), func, *func_args) def _delete_instance_worker(instance): n = get_nectar() try: n.nova.servers.delete(instance.id) logger.info(f"Instructed OpenStack to delete {instance}") except novaclient.exceptions.NotFound: logger.info(f"Instance {instance} already deleted") except Exception as e: logger.error(f"something went wrong with the instance deletion " f"call for {instance}, it raised {e}") def _delete_volume_once_instance_is_deleted(instance, retries): n = get_nectar() try: my_instance = n.nova.servers.get(instance.id) logger.debug(f"Instance delete status is retries: {retries} " f"openstack instance: {my_instance}") except novaclient.exceptions.NotFound: logger.info(f"Instance {instance.id} successfully deleted, " f"we can delete the volume now!") instance.deleted = datetime.now(timezone.utc) instance.save() _delete_volume(instance.boot_volume) return except Exception as e: logger.error(f"something went wrong with the instance get " f"call for {instance}, it raised {e}") return # Openstack still has the instance, and was able to return it to us if retries == 0: _delete_instance_worker(instance) scheduler = django_rq.get_scheduler('default') # Note in this case I'm using `minutes=` not `seconds=` to give # a long wait time that should be sufficient scheduler.enqueue_in( timedelta(minutes=INSTANCE_DELETION_RETRY_WAIT_TIME), _delete_volume_once_instance_is_deleted, instance, retries - 1) return if retries <= 0: error_message = f"ran out of retries trying to delete" instance.error(error_message) instance.boot_volume.error(error_message) logger.error(f"{error_message} {instance}") return _delete_instance_worker(instance) scheduler = django_rq.get_scheduler('default') scheduler.enqueue_in( timedelta(seconds=INSTANCE_DELETION_RETRY_WAIT_TIME), _delete_volume_once_instance_is_deleted, instance, retries - 1) def _delete_volume(volume): n = get_nectar() delete_result = str(n.cinder.volumes.delete(volume.id)) volume.deleted = datetime.now(timezone.utc) volume.save() logger.debug(f"Delete result is {delete_result}") return
nilq/baby-python
python
""" Unit tests for FeatureNormalizer """ import nose.tools import sys import numpy sys.path.append('..') from dcase_framework.features import FeatureNormalizer, FeatureContainer, FeatureExtractor import os def test_accumulate_finalize(): FeatureExtractor(store=True, overwrite=True).extract( audio_file=os.path.join('material', 'test.wav'), extractor_name='mfcc', extractor_params={ 'mfcc': { 'n_mfcc': 10 } }, storage_paths={ 'mfcc': os.path.join('material', 'test.mfcc.cpickle') } ) # Test 1 feature_container = FeatureContainer().load(filename=os.path.join('material', 'test.mfcc.cpickle')) feature_normalizer = FeatureNormalizer().accumulate(feature_container=feature_container).finalize() nose.tools.eq_(feature_normalizer['N'][0], 501) numpy.testing.assert_array_equal(feature_normalizer['mean'][0][0], numpy.mean(feature_container.feat[0], axis=0)) numpy.testing.assert_array_equal(feature_normalizer['S1'][0], numpy.sum(feature_container.feat[0], axis=0)) numpy.testing.assert_array_equal(feature_normalizer['S2'][0], numpy.sum(feature_container.feat[0]**2, axis=0)) # Test 2 feature_container = FeatureContainer().load(filename=os.path.join('material', 'test.mfcc.cpickle')) feature_normalizer = FeatureNormalizer() feature_normalizer.accumulate(feature_container=feature_container) feature_normalizer.accumulate(feature_container=feature_container) feature_normalizer.finalize() nose.tools.eq_(feature_normalizer['N'][0], 501*2) numpy.testing.assert_array_equal(feature_normalizer['mean'][0][0], numpy.mean(feature_container.feat[0], axis=0)) numpy.testing.assert_array_equal(feature_normalizer['S1'][0], numpy.sum(feature_container.feat[0], axis=0)*2) numpy.testing.assert_array_equal(feature_normalizer['S2'][0], numpy.sum(feature_container.feat[0] ** 2, axis=0)*2) def test_with_statement(): FeatureExtractor(store=True, overwrite=True).extract( audio_file=os.path.join('material', 'test.wav'), extractor_name='mfcc', extractor_params={ 'mfcc': { 'n_mfcc': 10 } }, storage_paths={ 'mfcc': os.path.join('material', 'test.mfcc.cpickle') } ) feature_container = FeatureContainer().load(filename=os.path.join('material', 'test.mfcc.cpickle')) with FeatureNormalizer() as feature_normalizer: feature_normalizer.accumulate(feature_container) nose.tools.eq_(feature_normalizer['N'][0], 501) numpy.testing.assert_array_equal(feature_normalizer['mean'][0][0], numpy.mean(feature_container.feat[0], axis=0)) numpy.testing.assert_array_equal(feature_normalizer['S1'][0], numpy.sum(feature_container.feat[0], axis=0)) numpy.testing.assert_array_equal(feature_normalizer['S2'][0], numpy.sum(feature_container.feat[0] ** 2, axis=0)) test_accumulate_finalize()
nilq/baby-python
python
"""A package for computing Pfaffians""" import cmath import math import numpy as np import scipy.linalg as la import scipy.sparse as sp def householder_real(x): """(v, tau, alpha) = householder_real(x) Compute a Householder transformation such that (1-tau v v^T) x = alpha e_1 where x and v a real vectors, tau is 0 or 2, and alpha a real number (e_1 is the first unit vector) """ assert x.shape[0] > 0 sigma = np.dot(x[1:], x[1:]) if sigma == 0: return (np.zeros(x.shape[0]), 0, x[0]) else: norm_x = math.sqrt(x[0] ** 2 + sigma) v = x.copy() # depending on whether x[0] is positive or negatvie # choose the sign if x[0] <= 0: v[0] -= norm_x alpha = +norm_x else: v[0] += norm_x alpha = -norm_x v = v / np.linalg.norm(v) return (v, 2, alpha) def householder_complex(x): """(v, tau, alpha) = householder_real(x) Compute a Householder transformation such that (1-tau v v^T) x = alpha e_1 where x and v a complex vectors, tau is 0 or 2, and alpha a complex number (e_1 is the first unit vector) """ assert x.shape[0] > 0 sigma = np.dot(np.conj(x[1:]), x[1:]) if sigma == 0: return (np.zeros(x.shape[0]), 0, x[0]) else: norm_x = cmath.sqrt(x[0].conjugate() * x[0] + sigma) v = x.copy() phase = cmath.exp(1j * math.atan2(x[0].imag, x[0].real)) v[0] += phase * norm_x v /= np.linalg.norm(v) return (v, 2, -phase * norm_x) def skew_tridiagonalize(A, overwrite_a=False, calc_q=True): """T, Q = skew_tridiagonalize(A, overwrite_a, calc_q=True) or T = skew_tridiagonalize(A, overwrite_a, calc_q=False) Bring a real or complex skew-symmetric matrix (A=-A^T) into tridiagonal form T (with zero diagonal) with a orthogonal (real case) or unitary (complex case) matrix U such that A = Q T Q^T (Note that Q^T and *not* Q^dagger also in the complex case) A is overwritten if overwrite_a=True (default: False), and Q only calculated if calc_q=True (default: True) """ # Check if matrix is square assert A.shape[0] == A.shape[1] > 0 # Check if it's skew-symmetric assert abs((A + A.T).max()) < 1e-14 A = np.asarray(A) # the slice views work only properly for arrays # Check if we have a complex data type if np.issubdtype(A.dtype, np.complexfloating): householder = householder_complex elif not np.issubdtype(A.dtype, np.number): raise TypeError("pfaffian() can only work on numeric input") else: householder = householder_real if not overwrite_a: A = A.copy() if calc_q: Q = np.eye(A.shape[0], dtype=A.dtype) for i in range(A.shape[0] - 2): # Find a Householder vector to eliminate the i-th column v, tau, alpha = householder(A[i + 1 :, i]) A[i + 1, i] = alpha A[i, i + 1] = -alpha A[i + 2 :, i] = 0 A[i, i + 2 :] = 0 # Update the matrix block A(i+1:N,i+1:N) w = tau * np.dot(A[i + 1 :, i + 1 :], v.conj()) A[i + 1 :, i + 1 :] += np.outer(v, w) - np.outer(w, v) if calc_q: # Accumulate the individual Householder reflections # Accumulate them in the form P_1*P_2*..., which is # (..*P_2*P_1)^dagger y = tau * np.dot(Q[:, i + 1 :], v) Q[:, i + 1 :] -= np.outer(y, v.conj()) if calc_q: return (np.asmatrix(A), np.asmatrix(Q)) else: return np.asmatrix(A) def skew_LTL(A, overwrite_a=False, calc_L=True, calc_P=True): """T, L, P = skew_LTL(A, overwrite_a, calc_q=True) Bring a real or complex skew-symmetric matrix (A=-A^T) into tridiagonal form T (with zero diagonal) with a lower unit triangular matrix L such that P A P^T= L T L^T A is overwritten if overwrite_a=True (default: False), L and P only calculated if calc_L=True or calc_P=True, respectively (default: True). """ # Check if matrix is square assert A.shape[0] == A.shape[1] > 0 # Check if it's skew-symmetric assert abs((A + A.T).max()) < 1e-14 n = A.shape[0] A = np.asarray(A) # the slice views work only properly for arrays if not overwrite_a: A = A.copy() if calc_L: L = np.eye(n, dtype=A.dtype) if calc_P: Pv = np.arange(n) for k in range(n - 2): # First, find the largest entry in A[k+1:,k] and # permute it to A[k+1,k] kp = k + 1 + np.abs(A[k + 1 :, k]).argmax() # Check if we need to pivot if kp != k + 1: # interchange rows k+1 and kp temp = A[k + 1, k:].copy() A[k + 1, k:] = A[kp, k:] A[kp, k:] = temp # Then interchange columns k+1 and kp temp = A[k:, k + 1].copy() A[k:, k + 1] = A[k:, kp] A[k:, kp] = temp if calc_L: # permute L accordingly temp = L[k + 1, 1 : k + 1].copy() L[k + 1, 1 : k + 1] = L[kp, 1 : k + 1] L[kp, 1 : k + 1] = temp if calc_P: # accumulate the permutation matrix temp = Pv[k + 1] Pv[k + 1] = Pv[kp] Pv[kp] = temp # Now form the Gauss vector if A[k + 1, k] != 0.0: tau = A[k + 2 :, k].copy() tau /= A[k + 1, k] # clear eliminated row and column A[k + 2 :, k] = 0.0 A[k, k + 2 :] = 0.0 # Update the matrix block A(k+2:,k+2) A[k + 2 :, k + 2 :] += np.outer(tau, A[k + 2 :, k + 1]) A[k + 2 :, k + 2 :] -= np.outer(A[k + 2 :, k + 1], tau) if calc_L: L[k + 2 :, k + 1] = tau if calc_P: # form the permutation matrix as a sparse matrix P = sp.csr_matrix((np.ones(n), (np.arange(n), Pv))) if calc_L: if calc_P: return (np.asmatrix(A), np.asmatrix(L), P) else: return (np.asmatrix(A), np.asmatrix(L)) else: if calc_P: return (np.asmatrix(A), P) else: return np.asmatrix(A) def pfaffian(A, overwrite_a=False, method="P"): """pfaffian(A, overwrite_a=False, method='P') Compute the Pfaffian of a real or complex skew-symmetric matrix A (A=-A^T). If overwrite_a=True, the matrix A is overwritten in the process. This function uses either the Parlett-Reid algorithm (method='P', default), or the Householder tridiagonalization (method='H') """ # Check if matrix is square assert A.shape[0] == A.shape[1] > 0 # Check if it's skew-symmetric assert abs((A + A.T).max()) < 1e-14 # Check that the method variable is appropriately set assert method == "P" or method == "H" if method == "P": return pfaffian_LTL(A, overwrite_a) else: return pfaffian_householder(A, overwrite_a) def pfaffian_LTL(A, overwrite_a=False): """pfaffian_LTL(A, overwrite_a=False) Compute the Pfaffian of a real or complex skew-symmetric matrix A (A=-A^T). If overwrite_a=True, the matrix A is overwritten in the process. This function uses the Parlett-Reid algorithm. """ # Check if matrix is square assert A.shape[0] == A.shape[1] > 0 # Check if it's skew-symmetric assert abs((A + A.T).max()) < 1e-14 n, m = A.shape # type check to fix problems with integer numbers dtype = type(A[0, 0]) if dtype != np.complex128: # the slice views work only properly for arrays A = np.asarray(A, dtype=float) # Quick return if possible if n % 2 == 1: return 0 if not overwrite_a: A = A.copy() pfaffian_val = 1.0 for k in range(0, n - 1, 2): # First, find the largest entry in A[k+1:,k] and # permute it to A[k+1,k] kp = k + 1 + np.abs(A[k + 1 :, k]).argmax() # Check if we need to pivot if kp != k + 1: # interchange rows k+1 and kp temp = A[k + 1, k:].copy() A[k + 1, k:] = A[kp, k:] A[kp, k:] = temp # Then interchange columns k+1 and kp temp = A[k:, k + 1].copy() A[k:, k + 1] = A[k:, kp] A[k:, kp] = temp # every interchange corresponds to a "-" in det(P) pfaffian_val *= -1 # Now form the Gauss vector if A[k + 1, k] != 0.0: tau = A[k, k + 2 :].copy() tau = tau / A[k, k + 1] pfaffian_val *= A[k, k + 1] if k + 2 < n: # Update the matrix block A(k+2:,k+2) A[k + 2 :, k + 2 :] = A[k + 2 :, k + 2 :] + np.outer( tau, A[k + 2 :, k + 1] ) A[k + 2 :, k + 2 :] = A[k + 2 :, k + 2 :] - np.outer( A[k + 2 :, k + 1], tau ) else: # if we encounter a zero on the super/subdiagonal, the # Pfaffian is 0 return 0.0 return pfaffian_val def pfaffian_householder(A, overwrite_a=False): """pfaffian(A, overwrite_a=False) Compute the Pfaffian of a real or complex skew-symmetric matrix A (A=-A^T). If overwrite_a=True, the matrix A is overwritten in the process. This function uses the Householder tridiagonalization. Note that the function pfaffian_schur() can also be used in the real case. That function does not make use of the skew-symmetry and is only slightly slower than pfaffian_householder(). """ # Check if matrix is square assert A.shape[0] == A.shape[1] > 0 # Check if it's skew-symmetric assert abs((A + A.T).max()) < 1e-14 n = A.shape[0] # type check to fix problems with integer numbers dtype = type(A[0, 0]) if dtype != np.complex128: # the slice views work only properly for arrays A = np.asarray(A, dtype=float) # Quick return if possible if n % 2 == 1: return 0 # Check if we have a complex data type if np.issubdtype(A.dtype, np.complexfloating): householder = householder_complex elif not np.issubdtype(A.dtype, np.number): raise TypeError("pfaffian() can only work on numeric input") else: householder = householder_real A = np.asarray(A) # the slice views work only properly for arrays if not overwrite_a: A = A.copy() pfaffian_val = 1.0 for i in range(A.shape[0] - 2): # Find a Householder vector to eliminate the i-th column v, tau, alpha = householder(A[i + 1 :, i]) A[i + 1, i] = alpha A[i, i + 1] = -alpha A[i + 2 :, i] = 0 A[i, i + 2 :] = 0 # Update the matrix block A(i+1:N,i+1:N) w = tau * np.dot(A[i + 1 :, i + 1 :], v.conj()) A[i + 1 :, i + 1 :] = A[i + 1 :, i + 1 :] + np.outer(v, w) - np.outer(w, v) if tau != 0: pfaffian_val *= 1 - tau if i % 2 == 0: pfaffian_val *= -alpha pfaffian_val *= A[n - 2, n - 1] return pfaffian_val def pfaffian_schur(A, overwrite_a=False): """Calculate Pfaffian of a real antisymmetric matrix using the Schur decomposition. (Hessenberg would in principle be faster, but scipy-0.8 messed up the performance for scipy.linalg.hessenberg()). This function does not make use of the skew-symmetry of the matrix A, but uses a LAPACK routine that is coded in FORTRAN and hence faster than python. As a consequence, pfaffian_schur is only slightly slower than pfaffian(). """ assert np.issubdtype(A.dtype, np.number) and not np.issubdtype( A.dtype, np.complexfloating ) assert A.shape[0] == A.shape[1] > 0 assert abs(A + A.T).max() < 1e-14 # Quick return if possible if A.shape[0] % 2 == 1: return 0 (t, z) = la.schur(A, output="real", overwrite_a=overwrite_a) l = np.diag(t, 1) # noqa: E741 return np.prod(l[::2]) * la.det(z)
nilq/baby-python
python
# -*- coding: utf-8 -*- from django.core.urlresolvers import reverse from django.template import loader from django.utils.safestring import mark_safe as _S from django.utils.six.moves.urllib.parse import urlparse from django.contrib.contenttypes.models import ContentType from django.contrib.auth import get_permission_codename from django.utils import six if six.PY3: import io contents = io.BytesIO else: import StringIO contents = StringIO.StringIO import markdown import json from mdx_gfm import GithubFlavoredMarkdownExtension from mimetypes import guess_type from distutils.util import strtobool import djclick as click import re import yaml import time import struct def get_absolute_url(instance, name='detail'): return reverse( '{0}_{1}_{2}'.format( instance._meta.app_label, instance._meta.model_name, name), kwargs={'id': instance.id}) def get_contenttype(instance_or_class): if isinstance(instance_or_class, ContentType): return instance_or_class return ContentType.objects.get_for_model(instance_or_class) def to_natural_key(instance_or_class): return get_contenttype(instance_or_class).natural_key() def to_natural_key_string(instance_or_class): return ".".join(to_natural_key(instance_or_class)) def from_natual_key(app_lable, model_name, **queries): ct = ContentType.objects.get_by_natural_key(app_lable, model_name) if queries: return ct.get_object_for_this_type(**queries) return ct.model_class() def from_natual_key_string(natural_key_string, **queries): return from_natual_key(*natural_key_string.split('.'), **queries) def get_permission(ct_or_model, codename): ct = get_contenttype(ct_or_model) return ct.permission_set.filter(codename=codename).first() def get_perm_name(model, action): '''指定されたアクションに対するパーミッション名''' return "{}.{}".format( model._meta.app_label, get_permission_codename(action, model._meta)) def to_admin_change_url_name(instance_or_class): return "admin:{}_{}_change".format(instance_or_class._meta.app_label) def to_admin_change_url(instance_or_class, id=None): id = id or instance_or_class.id return reverse( to_admin_change_url_name(instance_or_class), args=[id]) def to_admin_changelist_url_name(instance_or_class): return 'admin:{0}_changelist'.format(instance_or_class._meta.db_table) def to_admin_changelist_url(instance_or_class): return reverse(to_admin_changelist_url_name(instance_or_class)) def spaceless(src): '''空白を取り除く''' return re.sub(ur'[\s\u3000]', '', src or '') def render(src, request=None, **ctx): '''テンプレート文字列でレンダリングする''' from django.template import engines engine = engines['django'] request = request or None return _S(engine.from_string(src).render(ctx, request=request)) def render_by(name, request=None, **ctx): '''テンプレートファイルでレンダリングする''' request = request or None return _S(loader.get_template(name).render(ctx, request=request)) def echo(teml, fg="green", **kwargs): '''テンプレートでコンソールに書き出す ''' click.secho(render(teml, **kwargs), fg=fg) def echo_by(name, fg="green", **kwargs): '''テンプレートでコンソールに書き出す ''' click.secho(render_by(name, **kwargs), fg=fg) def force_bool(value): '''強制的に論理値に変換''' try: return strtobool(u"{}".format(value)) == 1 except: pass return False def get_mimetype(file_name): '''ファイルのmimetypeを返す''' if not file_name or file_name.startswith('__MACOSX/'): return [None, None] name, _x = guess_type(file_name) return name and name.split('/') or [None, None] def list_to_choices(choices): return tuple((choices.index(i), i) for i in choices) def to_gfm(text, safe=True): '''Github Favored Markdown''' md = markdown.Markdown(extensions=[GithubFlavoredMarkdownExtension()]) return _S(md.convert(text)) if safe else md.convert(text) def convert(source, format='yaml'): if format in ['yaml', 'yml']: return yaml.load(source) if format == 'json': return json.loads(source) def load_template(name): '''名前で指定されたテンプレートのソース''' return loader.get_template(name).template.source def time_serial(): '''時間のシリアル値を16進数で返す''' return struct.pack('d', time.time()).encode('hex') def url(url_string): '''urlparse''' return urlparse(url_string) def permcode_items(perm_code): p = re.split(r"[._]", perm_code) + [None, None, None] return dict(zip(['app_label', 'action', 'model'], p[:3]))
nilq/baby-python
python
""" Evaluate the true Fourier coefficients of a given function x(1-x), generate the domain based on that and define the model Q:\Lambda \to D """ import sympy from inversefuns.utilities import get_coef, coef_domain, fourier_exp_vec import numpy as np param_len = 5 t=np.array((0.1,0.2,0.4,0.5,0.7)) period0 = 1.0 def true_param(): x = sympy.symbols('x') # This will take some time because we are evaluating oscillatory function integration an, bn = get_coef(expr=(1-x)*(x), vari=x, trun=(param_len-1), period = period0) return an, bn def my_model_domain(pow=-1,halfwidth0=0.5): an = bn = np.zeros(param_len) domain = coef_domain(an, bn, pow=pow, halfwidth0=halfwidth0) return domain def my_model(parameter_samples): num_samples = parameter_samples.shape[0] if t.shape: QoI_samples = np.zeros((num_samples, t.shape[0])) else: QoI_samples = np.zeros((num_samples, 1)) an = parameter_samples[:, 0::2] bn = parameter_samples[:, 1::2] for i in range(0, num_samples): QoI_samples[i, :] = fourier_exp_vec(t,an[i,:],bn[i,:]) return QoI_samples
nilq/baby-python
python
"""Base Class for a Solver. This class contains the different methods that can be used to solve an environment/problem. There are methods for mini-batch training, control, etc... The idea is that this class will contain all the methods that the different algorithms would need. Then we can simply call this class in the solver scripts and use its methods. I'm still torn between using a class or just using a script. """ from .evaluator import Evaluator from .interrogator import Interrogator import torch class Solver(object): """This class makes absolute sense because there are many types of training depending on the task. For this reason, in the future, this class can easily include all instances of such training routines. Of course, transparent to the user -which is the ultimate goal, complete transparency-. """ def __init__(self, slv_params): print("Creating Solver") self.env = slv_params['environment'] self.alg = slv_params['algorithm'] self.logger = slv_params['logger'] self.evaluator = Evaluator() self.interrogator = Interrogator() def forward(self): self.interrogator.set_inference(self.alg.model, self.env) def backward(self): self.evaluator.evaluate(self.env, self.interrogator.inference) feedback = (self.evaluator.score) self.alg.step(feedback) self.alg.print_state() def save(self, path=''): """Only works with my algorithms, not with SGD.""" fn = path+"model_elite.pth" torch.save(self.alg.model.state_dict(), fn) def save_pool_weights(self, models, path): for i, model in enumerate(models): fn = path+"model_"+str(i)+".pth" torch.save(model.state_dict(), fn) def save_elite_weights(self, path, name=''): if name == '': name = "model_elite.pth" else: name = name+'.pth' fn = path+name torch.save(self.alg.model.state_dict(), fn) def load(self, path, name="model_elite"): """Only works with my algorithms, not with SGD.""" fn = path+name+".pth" print("Loading weights in: " + fn) self.alg.model.load_state_dict(torch.load(fn)) self.alg.model.eval() #
nilq/baby-python
python
import re mystring='My ip address is 10.10.10.20 and by subnet mask is 255.255.255.255' if (re.search("ip address",mystring)): ipaddregex=re.search("ip address is \d+.\d+.\d+.\d+",mystring) ipaddregex=ipaddregex.group(0) ipaddress=ipaddregex.replace("ip address is ","") print ("IP address is :",ipaddress) if (re.search("subnet mask",mystring)): ipaddregex=re.search("subnet mask is \d+.\d+.\d+.\d+",mystring) ipaddregex=ipaddregex.group(0) ipaddress=ipaddregex.replace("subnet mask is ","") print ("Subnet mask is :",ipaddress)
nilq/baby-python
python
import sys import PyFBA.metabolism class Reaction: """ A reaction is the central concept of metabolism and is the conversion of substrates to products. The reaction describes what we know. At a bare minimum we need a a name for the reaction. The name can either be the reaction id (e.g. modelSEED or KEGG id), or another name for this reaction. A reaction is an object that describes how to get from one compound to another. We need to know what the compound(s) on the left of the equation are, what the compounds on the right of the reaction are, and the probability that the reaction proceeds in either direction. If the reaction is truly reversible the probability can be 1 in both cases. If it is unidirectional the probability can be 0 in one direction. The likelihood that a reaction completes will be some product of its delta G and its p. We could also do something simpler, e.g. if there is a -ve delta G (favorable reaction) we can increase p and if there is a +ve delta G (unfavorable reaction) we can decrease p. The direction and reversible is the direction that the equation can run. Acceptable values are: ====== =========================== Value Meaning ====== =========================== None We don't know the direction > Left to right < Right to left = Bidirectional ====== =========================== :ivar rctn_id: The reaction ID :ivar readable_name: The name of the reaction :ivar description: A description of the reaction :ivar equation: The reaction equation :ivar direction: The direction of the reaction (<, =, >, or ?) :ivar gfdirection: The possible gapfilled direction :ivar ntdirection: The non-template direction (before correcting for templates) :ivar left_compounds: A set of CompoundWithLocations on the left side of the reaction :ivar left_abundance: A dict of the CompoundWithLocations on the left and their abundance :ivar right_compounds: The set of CompoundWithLocations on the right side of the equation :ivar right_abundance: A dict of the CompoundWithLocations on the right and their abundance :ivar lower_bound: The lower bound for the reaction :ivar upper_bound: The upper bound for the reaction :ivar pLR: The probability the reaction proceeds left to right :ivar pRL: The probability the reaction proceeds right to left :ivar enzymes: The enzyme complex IDs involved in the reaction :ivar pegs: The protein-encoding genes involved in the reaction :ivar deltaG: The delta G :ivar deltaG_error: The error in the delta G :ivar inp: Whether the reaction is an input reaction :ivar outp: Whether the reaction is an output reaction :ivar is_transport: Whether the reaction is a transport reaction (imports or exports something) :ivar ran: Boolean to note whether the reaction ran :ivar is_biomass_reaction: Boolean to note whether this is a biomass reaction :ivar biomass_direction: If it is a biomass reaction, what is the direction :ivar is_gapfilled: Boolean to note whether the reaction was gapfilled :ivar gapfill_method: If the reaction was gapfilled, how was it gapfilled :ivar is_uptake_secretion: Is the reaction involved in uptake of compounds or secretion of compounds. """ def __init__(self, rctn_id, readable_name=None, description=None, equation=None, direction=None): """ Instantiate a reaction :param rctn_id: the reaction id :param readable_name: a human readable name. This was refactored from name to make it more unique :param description: a description of the reaction :param equation: the equation for the reaction :param direction: the direction of the reaction """ self.id = rctn_id self.model_seed_id = rctn_id self.readable_name = readable_name self.description = description self.equation = equation self.direction = direction self.gfdirection = direction # the gap filling direction self.ntdirection = direction # the non-template driven direction self.left_compounds = set() # type: set[PyFBA.metabolism.CompoundWithLocation] self.left_abundance = {} self.right_compounds = set() # type: set[PyFBA.metabolism.CompoundWithLocation] self.right_abundance = {} self.lower_bound = None self.upper_bound = None self.pLR = 0 self.pRL = 0 self.enzymes = set() self.ec_numbers = [] self.pegs = set() self.deltaG_error = 0 self.deltaG = 0 self.inp = False self.outp = False self.is_transport = False self.ran = False self.is_biomass_reaction = False self.biomass_direction = False self.is_gapfilled = False self.gapfill_method = "" self.is_uptake_secretion = False self.aliases = [] def __eq__(self, other): """ Two reactions are the same if they have the same left and right products, but not necessarily the same names or reactions. Note that we don't care whether the left and right (the directionality) is the same in our two comparisons :param other: The other reaction :type other: Reaction :return: Boolean :rtype: bool """ if isinstance(other, Reaction): return (self.id == other.id or (self.left_compounds, self.right_compounds) == (other.left_compounds, other.right_compounds) or (self.left_compounds, self.right_compounds) == (other.right_compounds, other.left_compounds) ) else: raise NotImplementedError(f"Comparing Reaction with {type(other)} is not implemented") def __cmp__(self, other): """ Compare whether two things are the same :param other: The other reaction :type other: Reaction :return: an integer, zero if they are the same :rtype: int """ if isinstance(other, Reaction): if __eq__(other): return 0 else: return 1 else: raise NotImplementedError(f"Comparing Reaction with {type(other)} is not implemented") def __ne__(self, other): """ Are these not equal? :param other: The other reaction :type other: Reaction :return: Boolean :rtype: bool """ try: result = self.__eq__(other) except NotImplementedError: return True return not result def __hash__(self): """ The hash function is based on the name of the reaction. :rtype: int """ return hash((self.id, self.readable_name)) def __str__(self): """ The string version of the reaction. :rtype: str """ if self.readable_name: return f"{self.id}: {self.readable_name}" else: return f"{self.id}: {self.equation}" """ Since we have complex data structures, we can't just pickle them and unpickle them with aplomb! In fact, this is affecting deep/shallow copy, and we need to ensure that we use copy.deepcopy() at all times, otherwise the data structures are not copied correctly. These two methods correctly allow us to pickle the data structures. Note that we have CompoundWithLocation objects, and we need both the object and its abundance to correctly create the pickle. """ def __getstate__(self): """ The state that the object is saved or copied as. We override the left/right compounds and abundances with simple arrays of data. This is lossy - we are losing the connections between compounds and data and we probably need to reconstruct that after pickling/unpickling the reactions. :return: """ state = self.__dict__.copy() state['left_compounds'] = [] state['right_compounds'] = [] state['left_abundance'] = {} state['right_abundance'] = {} for l in self.left_compounds: state['left_compounds'].append([l.id, l.name, l.location]) state['left_abundance'][f"{l.id} :: {l.name} :: {l.location}"] = self.left_abundance[l] for r in self.right_compounds: state['right_compounds'].append([r.id, r.name, r.location]) state['right_abundance'][f"{r.id} :: {r.name} :: {r.location}"] = self.right_abundance[r] return state def __setstate__(self, state): """ Create a new reaction from a saved state. This is from __getstate__ eg. when pickled. :param state: the state that was saved. :return: """ left = set() right = set() left_abundance = {} right_abundance = {} for l in state['left_compounds']: c = PyFBA.metabolism.CompoundWithLocation(id=l[0], name=l[1], location=l[2]) left.add(c) left_abundance[c] = state['left_abundance'][f"{l[0]} :: {l[1]} :: {l[2]}"] state['left_compounds'] = left state['left_abundance'] = left_abundance for r in state['right_compounds']: c = PyFBA.metabolism.CompoundWithLocation(id=r[0], name=r[1], location=r[2]) right.add(c) right_abundance[c] = state['right_abundance'][f"{r[0]} :: {r[1]} :: {r[2]}"] state['right_compounds'] = right state['right_abundance'] = right_abundance self.__dict__.update(state) def set_direction(self, direction): """ Set the direction of the reaction. :param direction: The direction of the reaction :type direction: str :rtype: str :return: The current direction """ allowable_directions = {'>', '<', '=', None} if direction in allowable_directions: self.direction = direction if not self.gfdirection: self.gfdirection = direction else: sys.stderr.write("Direction: " + str(direction) + " is not a permitted direction. Ignored\n") self.direction = None return self.direction def add_left_compounds(self, cmpds): """ The compounds on the left are a set of compounds that the reaction typically uses as substrates. :param cmpds: The compounds that should be added :type cmpds: set[PyFBA.metabolism.CompoundWithLocation] """ if isinstance(cmpds, set): # choose one element. next(iter(cmpds)) does not remove the element if not isinstance(next(iter(cmpds)), PyFBA.metabolism.CompoundWithLocation): raise TypeError(f"Starting with v.2 reactions need PyFBA.metabolism.CompoundWithLocation objects not {type(next(iter(cmpds)))}") self.left_compounds.update(cmpds) elif isinstance(cmpds, PyFBA.metabolism.CompoundWithLocation): # add a single compound self.left_compounds.add(cmpds) else: raise TypeError("Compounds must be a set of CompoundWithLocation") def set_left_compound_abundance(self, cmpd, abundance): """ Set the abundance of a compound on the left side of the equation. :param cmpd: The compound to set the abundance for :type cmpd: PyFBA.metabolism.CompoundWithLocation :param abundance: The amount of that abundance :type abundance: float | int """ if cmpd not in self.left_compounds: raise KeyError(f"{cmpd} is not in left compounds. Please add it before trying to set the abundance") if isinstance(abundance, float): self.left_abundance[cmpd] = abundance elif isinstance(abundance, int): self.left_abundance[cmpd] = float(abundance) else: raise TypeError("Abundance must be an int or a float") def get_left_compound_abundance(self, cmpd): """ Get the abundance of the compound on the left side of the equation. :param cmpd: The compound to get the abundance of :type cmpd: PyFBA.metabolism.CompoundWithLocation :return: The compounds abundance :rtype: float """ if cmpd in self.left_abundance: return self.left_abundance[cmpd] else: raise KeyError(f"In the reaction {self.readable_name} (reaction id: {self.id}), you do not have" + f" {cmpd} on the left hand side of the equation: {self.equation}") def number_of_left_compounds(self): """ The number of compounds on the left side of the equation. :rtype: int """ return len(self.left_compounds) def add_right_compounds(self, cmpds): """ The compounds on the right are a set of compounds that the reaction typically uses as substrates. :param cmpds: The compounds that should be added :type cmpds: set[PyFBA.metabolism.CompoundWithLocation] """ if isinstance(cmpds, set): # choose one element. next(iter(cmpds)) does not remove the element if not isinstance(next(iter(cmpds)), PyFBA.metabolism.CompoundWithLocation): raise TypeError("Starting with v.2 reactions need PyFBA.metabolism.CompoundWithLocation objects") self.right_compounds.update(cmpds) elif isinstance(cmpds, PyFBA.metabolism.CompoundWithLocation): # add a single compound self.right_compounds.add(cmpds) else: raise TypeError("Compounds must be a set of CompoundWithLocation") def set_right_compound_abundance(self, cmpd, abundance): """ Set the abundance of a compound on the right side of the equation :param cmpd: The compound to set the abundance for :type cmpd: PyFBA.metabolism.CompoundWithLocation :param abundance: The amount of that abundance :type abundance: float | int """ if cmpd not in self.right_compounds: raise KeyError(f"{cmpd} is not in right compounds. " + " Please add it before trying to set the abundance") if isinstance(abundance, float): self.right_abundance[cmpd] = abundance elif isinstance(abundance, int): self.right_abundance[cmpd] = float(abundance) else: raise TypeError("Abundance must be an int or a float") def get_right_compound_abundance(self, cmpd): """ Get the abundance of the compound on the right side of the equation. :param cmpd: The compound to get the abundance of :type cmpd: Compound :return: The compounds abundance :rtype: float """ if cmpd in self.right_abundance: return self.right_abundance[cmpd] else: raise KeyError(f"In the reaction {self.readable_name} (reaction id: {self.id}), you do not have" + f" {cmpd} on the right hand side of the equation: {self.equation}") def number_of_right_compounds(self): """ The number of compounds on the right side of the equation. :rtype: int """ return len(self.right_compounds) def all_compounds(self): """ Get all the compounds involved in this reaction. :return: A set of all the compounds :rtype: set """ return self.left_compounds.union(self.right_compounds) def number_of_compounds(self): """ Get the total number of compounds involved in this reaction. :rtype: int """ return len(self.all_compounds()) def has(self, cmpd): """ Does this reaction have a compound? Just returns true if the compound is present somewhere in the reaction. :param cmpd: The compound to test for :type cmpd: Compound :rtype: bool """ return cmpd in self.left_compounds or cmpd in self.right_compounds def opposite_sides(self, cmpd1, cmpd2): """ Are these two compounds on opposite sides of the reaction? :param cmpd1: The first compound :type cmpd1: Compound :param cmpd2: The second compound :type cmpd2: Compound :return: Whether the compounds are on opposite sides :rtype: bool """ if not self.has(cmpd1): raise ValueError(str(cmpd1) + " is not in this reaction") if not self.has(cmpd2): raise ValueError(str(cmpd2) + " is not in this reaction") if cmpd1 in self.left_compounds and cmpd2 in self.right_compounds: return True if cmpd1 in self.right_compounds and cmpd2 in self.left_compounds: return True return False def set_probability_left_to_right(self, p): """ Set the probability of the reaction running left to right. Note you can also access this as reaction.pLR :param p: The probablity :type p: float """ if isinstance(p, float): self.pLR = p elif isinstance(p, int): self.pLR = float(p) else: raise TypeError("The probability must be an int or a float") def get_probability_left_to_right(self): """ Get the probability of the reaction running left to right. Note you can also access this as reaction.pLR :return: The probablity :rtype p: float """ return self.pLR def set_probability_right_to_left(self, p): """ Set the probability of the reaction running right to left Note you can also access this as reaction.pRL :param p: The probablity :type p: float """ if isinstance(p, float): self.pRL = p elif isinstance(p, int): self.pRL = float(p) else: raise TypeError("The probability must be an int or a float") def get_probability_right_to_left(self): """ Get the probability of the reaction running right to left. Note you can also access this as reaction.pRL :return: The probablity :rtype p: float """ return self.pRL def add_enzymes(self, enz): """ Add one or more enzymes that completes this reaction. :param enz: A set of enzymes that you want to add :type enz: set """ if isinstance(enz, set): self.enzymes.update(enz) else: raise TypeError("You need to supply a set of enzymes") def has_enzyme(self, enz): """ Check whether an enzyme is involved in this reaction. :param enz: An Enzyme object :type enz: Enzyme :return: Whether we have this enzyme :rtype: bool """ return enz in self.enzymes def all_enzymes(self): """ Get all the enzymes involved in this reaction. Returns a set of complex IDs. :rtype: set """ return self.enzymes def number_of_enzymes(self): """ Gets the number of enzymes involved in this reaction. :rtype: int """ return len(self.enzymes) def add_pegs(self, pegs): """ Add one or more pegs to this reaction. Pegs must be a set. :param pegs: The pegs to add to the reaction :type pegs: set """ if isinstance(pegs, set): self.pegs.update(pegs) else: raise TypeError("pegs must be a set") def has_peg(self, peg): """ Check whether a peg is involved in this reaction. :param peg: The peg to check for :type peg: str :rtype: bool """ return peg in self.pegs def set_deltaG(self, dg): """ Set the value for delta G (Gibbs free energy) for this reaction. Recall -ve deltaG means the reaction is favorable. :param dg: The delta G of the reaction :type dg: float """ if isinstance(dg, float): self.deltaG = dg elif isinstance(dg, int): self.deltaG = float(dg) else: raise TypeError("The delta G must be an int or a float") def get_deltaG(self): """ Get the value for delta G (Gibbs free energy) for this reaction. :rtype: float """ return self.deltaG def check_input_output(self): """ Check whether this reaction is an input or output reaction. This is called when we ask is_input_reaction / is_output_reaction and both inp and outp are False """ # do we have external compounds on the left ... then it is an input reaction for c in self.left_compounds: if c.location == 'e': self.inp = True for c in self.right_compounds: if c.location == 'e': self.outp = True def toggle_input_reaction(self): """ Set this reaction as an input reaction. This only applies to this reaction, so if it is true we set it false, else we set it true """ if self.inp: self.inp = False else: self.inp = True def is_input_reaction(self): """ Is this an input reaction? :rtype: bool """ if self.inp is False and self.outp is False: self.check_input_output() return self.inp def toggle_output_reaction(self): """ Set this reaction as an output reaction. This only applies to this reaction, so if it is true we set it false, else we set it true """ if self.outp: self.outp = False else: self.outp = True def is_output_reaction(self): """ Is this an output reaction? :rtype: bool """ if self.inp is False and self.outp is False: self.check_input_output() return self.outp def reverse_reaction(self): """ Reverse the reaction - move the left compounds to the right, and vice versa. We also switch the abundances and the pLR and pRL. We also negate the deltaG, since that should be the other way around now. At the moment we don't switch input/output, not sure if we need to do that. """ (self.left_compounds, self.right_compounds) = (self.right_compounds, self.left_compounds) (self.left_abundance, self.right_abundance) = (self.right_abundance, self.left_abundance) (self.inp, self.outp) = (self.outp, self.inp) # we only need to reverse two directions if self.direction == '>': self.direction = '<' elif self.direction == '<': self.direction = '>' # we only need to reverse two gfdirections if self.gfdirection == '>': self.gfdirection = '<' elif self.gfdirection == '<': self.gfdirection = '>' if self.lower_bound != None and self.upper_bound != None: lbtemp = 0 - self.lower_bound self.lower_bound = 0 - self.upper_bound self.upper_bound = lbtemp (self.pLR, self.pRL) = (self.pRL, self.pLR) self.deltaG = -self.deltaG def add_attribute(self, key, value): """ Add an attribute to this class """ setattr(self, key, value) def get_attribute(self, key): """ Retrieve an attribute """ return getattr(self, key) def reset_bounds(self): """ reset the bounds of this reaction. If we are using this in gapfilling, we need to reset the bounds so we can calculate appropriately. :return: None """ self.lower_bound = None self.upper_bound = None
nilq/baby-python
python
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import paddle from mmdet.models.utils import interpolate_as def test_interpolate_as(): source = paddle.rand((1, 5, 4, 4)) target = paddle.rand((1, 1, 16, 16)) # Test 4D source and target result = interpolate_as(source, target) assert result.shape == torch.Size((1, 5, 16, 16)) # Test 3D target result = interpolate_as(source, target.squeeze(0)) assert result.shape == torch.Size((1, 5, 16, 16)) # Test 3D source result = interpolate_as(source.squeeze(0), target) assert result.shape == torch.Size((5, 16, 16)) # Test type(target) == np.ndarray target = np.random.rand(16, 16) result = interpolate_as(source.squeeze(0), target) assert result.shape == torch.Size((5, 16, 16))
nilq/baby-python
python
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython """ class Punto: """Representación de un punto en coordenadas polares. :param x: coordenada x del punto. :x type: int :param y: coordenada y del punto. :y type: int """ def __init__(self, x: int = 0, y: int = 0) -> None: self.x = x self.y = y def cuadrante(self) -> str: """Devuelve el cuadrante en el que se encuentra el punto.""" return f"{self} se encuentra en el {self._posicion()}." def _posicion(self) -> str: if self.x > 0 and self.y > 0: return "I° cuadrante" elif self.x < 0 and self.y > 0: return "II° cuadrante" elif self.x < 0 and self.y < 0: return "III° cuadrante" elif self.x > 0 and self.y < 0: return "IV° cuadrante" elif self.x != 0 and self.y == 0: return "eje X" elif self.x == 0 and self.y != 0: return "eje Y" else: return "origen" def __repr__(self) -> str: return f"({self.x}, {self.y})"
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from pdb import set_trace import re def check_entitys(text): ptrn = r"(&{1})([\w-]+)([;]{0,1})" lst = [] for m in re.finditer(ptrn, text): s = m.group() g2 = m.groups()[2] t = 0 if g2 == ';' else 1 lst.append({'s': s, 't': t}) return lst def check_entity_brackets(text): ptrn = r"([;(])(\S+)(\)*)" lst = [] for m in re.finditer(ptrn, text): s = m.group() nop = s.count('(') noc = s.count(')') if nop+noc == 0: continue s = s if s.find(';') < 0 else s[1:] t = 0 if nop == noc else 1 e = {'s': s, 't': t} lst.append(e) return lst # lista de pattern del tipo from to def check_overflow(text, po, pc): lst = [] pc = re.compile(pc) po = re.compile(po) so_last = "" c1_ = 0 for mo in re.finditer(po, text): so = mo.group() o0 = mo.start() o1 = mo.end() js = {'so': so, 'sc': '', 's': '', 't': 0} if o0 < c1_: l = len(lst)-1 lst[l]['s'] = so_last lst[l]['t'] = 1 so_last = so mc = re.search(pc, text[o1:]) if mc is None: js['s'] = so js['t'] = 1 lst.append(js) continue c0 = mc.start() c1 = mc.end() c1_ = o1+c0 s = text[o0:o1+c1] js['s'] = s js['sc'] = mc.group() lst.append(js) return lst OVER_KEY_TYPE_LIST = ( ('g3', '{3%',0), ('g2', '{2%',0), ('g1', '{1%',0), ('g0', '{0%',0), ('gu', '{_' ,0), ('qu', '[_' ,1), ('g', '{' ,0), ('q', '[' ,1) ) def fill_tag_over_lst(tag_lst): def find_over_key_type(tag_op): k=None t=None for kpt in OVER_KEY_TYPE_LIST: if tag_op==kpt[1]: k=kpt[0] t=kpt[2] break return k,t lst=[] for tag in tag_lst: key,func_type=find_over_key_type(tag[1]) if key is None: continue po = tag[1] pc = tag[2] so=po sc=pc if po == "[": po = po.replace('[', r'\[[^_]') pc = pc.replace(']', r'[^_]\]') elif po == "[_": po = po.replace('[', r'\[') pc = pc.replace(']', r'\]') elif po == "{": po = po.replace('{', r'\{[^_]\w') pc = pc.replace('}', r'\w[^_]\}') name = tag[0] lst.append([func_type,name,so,sc,po,pc]) return lst
nilq/baby-python
python
# -*- coding: utf-8 -*- # Generated by Django 1.9.8 on 2017-07-30 15:59 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('courses', '0011_auto_20170718_2027'), ] operations = [ migrations.AlterField( model_name='course', name='published_on', field=models.DateField(blank=True, null=True), ), migrations.AlterUniqueTogether( name='coursemember', unique_together=set([('course', 'member', 'role')]), ), ]
nilq/baby-python
python
import json, pdb, os, numpy as np, cv2, threading, math, io import torch from torch.autograd import Variable def open_image(fn): """ Opens an image using OpenCV given the file path. Arguments: fn: the file path of the image Returns: The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0 """ flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR if not os.path.exists(fn): raise OSError('No such file or directory: {}'.format(fn)) elif os.path.isdir(fn): raise OSError('Is a directory: {}'.format(fn)) else: try: im = cv2.imread(str(fn), flags).astype(np.float32)/255 if im is None: raise OSError(f'File not recognized by opencv: {fn}') print(f'Image shape is {im.shape}') return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) except Exception as e: raise OSError('Error handling image at: {}'.format(fn)) from e # getting val_tfms to work without fastai import from enum import IntEnum class TfmType(IntEnum): """ Type of transformation. Parameters IntEnum: predefined types of transformations NO: the default, y does not get transformed when x is transformed. PIXEL: x and y are images and should be transformed in the same way. Example: image segmentation. COORD: y are coordinates (i.e bounding boxes) CLASS: y are class labels (same behaviour as PIXEL, except no normalization) """ NO = 1 PIXEL = 2 COORD = 3 CLASS = 4 class CropType(IntEnum): """ Type of image cropping. """ RANDOM = 1 CENTER = 2 NO = 3 GOOGLENET = 4 class ChannelOrder(): ''' changes image array shape from (h, w, 3) to (3, h, w). tfm_y decides the transformation done to the y element. ''' def __init__(self, tfm_y=TfmType.NO): self.tfm_y=tfm_y def __call__(self, x, y): x = np.rollaxis(x, 2) #if isinstance(y,np.ndarray) and (len(y.shape)==3): if self.tfm_y==TfmType.PIXEL: y = np.rollaxis(y, 2) elif self.tfm_y==TfmType.CLASS: y = y[...,0] return x,y class Transforms(): def __init__(self, sz, tfms, normalizer, denorm, crop_type=CropType.CENTER, tfm_y=TfmType.NO, sz_y=None): if sz_y is None: sz_y = sz self.sz,self.denorm,self.norm,self.sz_y = sz,denorm,normalizer,sz_y crop_tfm = crop_fn_lu[crop_type](sz, tfm_y, sz_y) self.tfms = tfms + [crop_tfm, normalizer, ChannelOrder(tfm_y)] def __call__(self, im, y=None): return compose(im, y, self.tfms) def __repr__(self): return str(self.tfms) def A(*a): return np.array(a[0]) if len(a)==1 else [np.array(o) for o in a] class Denormalize(): """ De-normalizes an image, returning it to original format. """ def __init__(self, m, s): self.m=np.array(m, dtype=np.float32) self.s=np.array(s, dtype=np.float32) def __call__(self, x): return x*self.s+self.m class Normalize(): """ Normalizes an image to zero mean and unit standard deviation, given the mean m and std s of the original image """ def __init__(self, m, s, tfm_y=TfmType.NO): self.m=np.array(m, dtype=np.float32) self.s=np.array(s, dtype=np.float32) self.tfm_y=tfm_y def __call__(self, x, y=None): x = (x-self.m)/self.s if self.tfm_y==TfmType.PIXEL and y is not None: y = (y-self.m)/self.s return x,y class Transform(): """ A class that represents a transform. All other transforms should subclass it. All subclasses should override do_transform. Arguments --------- tfm_y : TfmType type of transform """ def __init__(self, tfm_y=TfmType.NO): self.tfm_y=tfm_y self.store = threading.local() def set_state(self): pass def __call__(self, x, y): self.set_state() x,y = ((self.transform(x),y) if self.tfm_y==TfmType.NO else self.transform(x,y) if self.tfm_y in (TfmType.PIXEL, TfmType.CLASS) else self.transform_coord(x,y)) return x, y def transform_coord(self, x, y): return self.transform(x),y def transform(self, x, y=None): x = self.do_transform(x,False) return (x, self.do_transform(y,True)) if y is not None else x # @abstractmethod # def do_transform(self, x, is_y): raise NotImplementedError class CoordTransform(Transform): """ A coordinate transform. """ @staticmethod def make_square(y, x): r,c,*_ = x.shape y1 = np.zeros((r, c)) y = y.astype(np.int) y1[y[0]:y[2], y[1]:y[3]] = 1. return y1 def map_y(self, y0, x): y = CoordTransform.make_square(y0, x) y_tr = self.do_transform(y, True) return to_bb(y_tr, y) def transform_coord(self, x, ys): yp = partition(ys, 4) y2 = [self.map_y(y,x) for y in yp] x = self.do_transform(x, False) return x, np.concatenate(y2) class Scale(CoordTransform): """ A transformation that scales the min size to sz. Arguments: sz: int target size to scale minimum size. tfm_y: TfmType type of y transformation. """ def __init__(self, sz, tfm_y=TfmType.NO, sz_y=None): super().__init__(tfm_y) self.sz,self.sz_y = sz,sz_y def do_transform(self, x, is_y): if is_y: return scale_min(x, self.sz_y, cv2.INTER_NEAREST) else : return scale_min(x, self.sz, cv2.INTER_AREA ) class NoCrop(CoordTransform): """ A transformation that resize to a square image without cropping. This transforms (optionally) resizes x,y at with the same parameters. Arguments: targ: int target size of the crop. tfm_y (TfmType): type of y transformation. """ def __init__(self, sz, tfm_y=TfmType.NO, sz_y=None): super().__init__(tfm_y) self.sz,self.sz_y = sz,sz_y def do_transform(self, x, is_y): if is_y: return no_crop(x, self.sz_y, cv2.INTER_NEAREST) else : return no_crop(x, self.sz, cv2.INTER_AREA ) imagenet_stats = A([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) stats = imagenet_stats tfm_norm = Normalize(*stats, TfmType.NO) tfm_denorm = Denormalize(*stats) def image_gen(normalizer, denorm, sz, tfms=None, max_zoom=None, pad=0, crop_type=None, tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT): """ Generate a standard set of transformations Arguments --------- normalizer : image normalizing function denorm : image denormalizing function sz : size, sz_y = sz if not specified. tfms : iterable collection of transformation functions max_zoom : float, maximum zoom pad : int, padding on top, left, right and bottom crop_type : crop type tfm_y : y axis specific transformations sz_y : y size, height pad_mode : cv2 padding style: repeat, reflect, etc. Returns ------- type : ``Transforms`` transformer for specified image operations. See Also -------- Transforms: the transformer object returned by this function """ if tfm_y is None: tfm_y=TfmType.NO if tfms is None: tfms=[] elif not isinstance(tfms, collections.Iterable): tfms=[tfms] if sz_y is None: sz_y = sz scale = [RandomScale(sz, max_zoom, tfm_y=tfm_y, sz_y=sz_y) if max_zoom is not None else Scale(sz, tfm_y, sz_y=sz_y)] if pad: scale.append(AddPadding(pad, mode=pad_mode)) if crop_type!=CropType.GOOGLENET: tfms=scale+tfms return Transforms(sz, tfms, normalizer, denorm, crop_type, tfm_y=tfm_y, sz_y=sz_y) crop_fn_lu = {CropType.NO: NoCrop} def compose(im, y, fns): """ apply a collection of transformation functions fns to images """ for fn in fns: #pdb.set_trace() im, y =fn(im, y) return im if y is None else (im, y) def scale_min(im, targ, interpolation=cv2.INTER_AREA): """ Scales the image so that the smallest axis is of size targ. Arguments: im (array): image targ (int): target size """ r,c,*_ = im.shape ratio = targ/min(r,c) sz = (scale_to(c, ratio, targ), scale_to(r, ratio, targ)) return cv2.resize(im, sz, interpolation=interpolation) def scale_to(x, ratio, targ): ''' no clue, does not work. ''' return max(math.floor(x*ratio), targ) def crop(im, r, c, sz): ''' crop image into a square of size sz, ''' return im[r:r+sz, c:c+sz] def no_crop(im, min_sz=None, interpolation=cv2.INTER_AREA): """ Returns a squared resized image """ r,c,*_ = im.shape if min_sz is None: min_sz = min(r,c) return cv2.resize(im, (min_sz, min_sz), interpolation=interpolation) # -------- end val_tfms stuff def write_test_image(img_bytes, path, file): if os.path.exists(path): print(f'Cleaning test dir: {path}') for root, dirs, files in os.walk(path): for f in files: os.unlink(os.path.join(root, f)) else: print(f'Creating test dir: {path}') os.makedirs(path, exist_ok=True) f = open(file, 'wb') f.write(img_bytes) def preproc_img(img, sz): val_tfm = image_gen(tfm_norm, tfm_denorm, sz, pad=0, crop_type=CropType.NO, tfm_y=None, sz_y=None) trans_img = val_tfm(img) print(f'Image shape: {trans_img.shape}') return Variable(torch.FloatTensor(trans_img)).unsqueeze_(0) def get_file_with_ext(path, ext): if type(ext) == list: ext = tuple(ext) if os.path.isdir(path): for file in os.listdir(path): if file.endswith(ext): return os.path.join(path, file) return None
nilq/baby-python
python
# -*- python -*- # This software was produced by NIST, an agency of the U.S. government, # and by statute is not subject to copyright in the United States. # Recipients of this software assume all responsibilities associated # with its operation, modification and maintenance. However, to # facilitate maintenance we ask that before distributing modifed # versions of this software, you first contact the authors at # oof_manager@nist.gov. dirname = 'elements' if not DIM_3: clib = 'oof2engine' else: clib = 'oof3dengine' cfiles = ['quad4.C', 'quad4_8.C', 'quad8.C', # 'quad8_4.C', 'quad9.C', 'tri3.C', 'tri3_6.C', 'tri6.C', 'tri6_3.C', 'quad4shapefunction.C', 'quad8shapefunction.C', 'quad9shapefunction.C', 'tri3shapefunction.C', 'tri6shapefunction.C', 'edge2.C','edge2shapefunction.C', 'edge3.C','edge3shapefunction.C', 'edge3sub.C','edge3super.C']#Interface branch hfiles = ['quad4shapefunction.h', 'quad8shapefunction.h', 'quad9shapefunction.h', 'tri3shapefunction.h', 'tri6shapefunction.h', 'edge2shapefunction.h','edge3shapefunction.h']#Interface branch swigfiles = ['quad4.swg', 'quad4_8.swg', 'quad8.swg', # 'quad8_4.swg', 'quad9.swg', 'tri3.swg', 'tri3_6.swg', 'tri6.swg', 'tri6_3.swg'] swigpyfiles = ['quad4.spy', 'quad4_8.spy', 'quad8.spy', 'quad8_4.spy', 'quad9.swg', 'tri3.spy', 'tri3_6.spy', 'tri6.spy', 'tri6_3.spy'] if DIM_3: cfiles.extend(['tet4.C','tet4shapefunction.C', 'tet10.C', 'tet10shapefunction.C']) hfiles.extend(['tet4shapefunction.h', 'tet10shapefunction.h']) swigfiles.extend(['tet4.swg', 'tet10.swg']) swigpyfiles.extend(['tet4.spy', 'tet10.spy']) pyfiles = ['initialize.py']
nilq/baby-python
python
from queue import PriorityQueue as PQueue N = int(input()) C = int(input()) V = int(input()) S = list(map(lambda x: int(x)-1, input().split())) T = list(map(lambda x: int(x)-1, input().split())) Y = list(map(int, input().split())) M = list(map(int, input().split())) E = [[] for _ in range(N)] for f, t, cost, time in zip(S, T, Y, M): E[t].append((f, cost, time)) INF = 10**7 dp = [[INF] * (C+1) for _ in range(N)] for i in range(C+1): dp[0][i] = 0 for t in range(N): for j in range(C+1): for f, cost, time in E[t]: if j >= cost and dp[t][j] > dp[f][j-cost] + time: dp[t][j] = dp[f][j-cost] + time print(min(dp[N-1]) if min(dp[N-1]) != INF else -1)
nilq/baby-python
python
import datetime from django.conf import settings from django.db import models BLOOD_GROUP_STATUSES = ( ('U', 'Urgente'), ('S', 'Stabile'), ('Z', 'Emergenza'), ('E', 'Eccedenza'), ('F', 'Fragile'), ) class BloodGroup(models.Model): groupid = models.CharField(max_length=3, unique=True) # AB+, B-, ... status = models.CharField( max_length=2, choices=BLOOD_GROUP_STATUSES, default='S', ) # choice between U, E ... def __str__(self): return self.groupid class Log(models.Model): datetime = models.DateTimeField(unique=True) image = models.ImageField( upload_to=settings.UPLOAD_METEO, blank=True ) twitter_done = models.BooleanField(default=False) telegram_done = models.BooleanField(default=False) facebook_done = models.BooleanField(default=False) @property def is_completed(self): return self.twitter_done and self.telegram_done and self.facebook_done def __str__(self): if self.datetime: return self.datetime.replace(microsecond=0).isoformat() else: return 'Bad Log entry'
nilq/baby-python
python
from rest_framework import serializers from rest_framework_recursive.fields import RecursiveField from backend.blog.models import BlogCategory, Tag, Post class BlogCategorySerializer(serializers.ModelSerializer): """Сериализация модели категорий""" children = serializers.ListField(source='get_children', read_only=True, child=RecursiveField(), ) class Meta: model = BlogCategory fields = ("id", "name", "children", "slug") class SortPostCategorySerializer(serializers.ModelSerializer): """Сериализация категории сортировки постов""" class Meta: model = BlogCategory fields = ("id", "name", "slug") class TagSerializer(serializers.ModelSerializer): """Сериализация тегов""" class Meta: model = Tag fields = ("id", "name") class PostSerializer(serializers.ModelSerializer): """Сериализация списка статей""" category = BlogCategorySerializer() tag = TagSerializer(many=True) class Meta: model = Post fields = ("id", "title", "mini_text", "created_date", "category", "tag", "viewed") class SortPostSerializer(serializers.ModelSerializer): """Сериализация постов по категории""" category = SortPostCategorySerializer() tag = TagSerializer(many=True) class Meta: model = Post fields = ("id", "title", "mini_text", "created_date", "category", "tag", "viewed") class PostDetailSerializer(serializers.ModelSerializer): """Сериализация полной статьи""" category = BlogCategorySerializer() tag = TagSerializer(many=True) class Meta: model = Post fields = ("id", "author", "title", "text", "image", "created_date", "category", "tag", "viewed")
nilq/baby-python
python
#!/usr/bin/env python # encoding: utf-8 # dit gedeelte zorgt ervoor dat stdout, stderr = subprocess.Popen werkt. import subprocess # tussen word = "" kun je de tekst typen die de koe moet uitspreken. # cowsay staat voor een koe, maar als je een ander karakter wilt zul je de code moeten aanpassen. # van 'cowsay', naar 'cowsay' '-f' 'hier komt de naam van je karakter' word="In de hal van kasteel Elseneur." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Stil nu! De schone Ophelia! Nimf, gedenk in uw gebeden al mijn zonden." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Edele heer, hoe gaat het u de laatste tijd?" stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Ik dank u heel goed." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Ik heb nog souvenirs van u, die ik al lang terug had willen geven. Hier... neemt u ze." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Nee, nee, ik niet ik heb u nimmer iets gegeven." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="U weet heel goed, heer, dat u 't wel gedaan hebt, en met zó zoete woorden dat hun waarde nog groter werd. Hun geur is nu vervlogen, neem ze dus terug; want voor een edele geest verbleekt de rijkste gift wanneer de gever zich arm aan liefde toont. Hier zijn ze, heer." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Aha! ben je kuis?" stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Heer" stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Ben je mooi?" stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Wat bedoelt uwe hoogheid?" stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Dat als je kuis en mooi bent, je kuisheid geen omgang met je schoonheid zou mogen toestaan." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Maar, heer, kan schoonheid ooit beter omgang hebben dan met kuisheid?" stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Jazeker, want de macht van de schoonheid zal de kuisheid eer der in een koppelaarster veranderen, dan dat kuisheid de schoonheid dwingen kan haar te gelijken. Dit was vroeger een paradox, maar nu wordt het door de tijd bewezen. Ik heb je eens liefgehad." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Ja, heer, dat hebt u me doen geloven." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Je had me niet moeten geloven, want de deugd kan niet zó geënt worden op onze oude stam, dat er geen zweem van overblijft. Ik heb je niet liefgehad." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Dan ben ik des te meer bedrogen." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Ga in een klooster! Waarom zou je zondaars fokken? Ik mag wel zeggen dat ik vrij deugdzaam ben, maar toch zou ik me kunnen beschuldigen van dingen waarom mijn moeder me beter niet had kunnen baren. Ik ben erg hoogmoedig, wraak zuchtig en eergierig, en ik heb meer wandaden voor 't grijpen dan gedachten om ze uit te drukken, verbeelding om ze vorm te geven of tijd om ze te begaan. Wat moeten kerels als ik ook rond kruipen tussen hemel en aarde? Wij zijn aartsschavuiten geloof niemand van ons. Maak dat je in een klooster komt! Waar is je vader?" stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Thuis, heer." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Laat dan de deuren achter hem dichtdoen, opdat hij nergens anders voor gek kan spelen dan in zijn eigen huis. Vaarwel." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="0 hemelse goedheid, help hem! " stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Mocht je trouwen, dan geef ik je deze vloek als bruidsschat mee, je kunt zo kuis als ijs, zo zuiver als sneeuw zijn, tóch ontkom je niet aan de laster. Ga in een klooster! Vaarwel. Of als je met alle geweld trouwen wilt, trouw dan een idioot, want mannen met hersens weten te goed wat voor monsters je van hen maakt. Naar een klooster en gauw! Vaarwel." stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Ik weet maar al te goed hoe jullie je beschildert. God heeft je een gezicht gegeven, maar jullie maakt je een ander. Je huppelt en trippelt, je geeft Gods schepselen bijnamen en laat je wulpsheid doorgaan voor argeloosheid. Ga weg, ik wil er niets meer van weten het heeft me gek gemaakt. Ik zeg je, dat er geen huwelijken meer moeten komen. De getrouwden mogen blijven leven op één na - en de ongetrouwden moeten blijven zoals ze zijn. Naar een klooster! Ga! " stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() word="Wat een edele geest is hier verscheurd! Oog, tong en zwaard van hoveling, geleerde en krijgsman, hoop en bloem van onze staat, spiegel der zeden, toonbeeld van beschaving, door eerbetoon omringd... voorgoed verloren. En ik, rampzaligste van alle vrouwen, die honing zoog uit zijn welluidend woord, hoor nu de tonen van dat helder brein verward en schril als een ontstemde beiaard, en zie het ongeëvenaarde beeld van bloesemende jeugd, verdord door waanzin. 0, wee mij, die gezien heeft wat ik zag, zie wat ik zie!" stdout, stderr = subprocess.Popen( ['cowsay', word]).communicate() def main(): """ Hiermee opent u de script bestanden en print u de conversatie. """ #roep de python bestand op en voert het uit, met de juiste cowfile. py = conversation.py print conversation.py #print conversatie uit script bestand #if __name__ == '__main__': # main() #def main(): # word = .communicate() # stdout, stderr = subprocess.Popen( # ['cowsay', word]).communicate() #if __name__ == '__main__': # main()
nilq/baby-python
python
"""Functions for generating interactive visualizations of 3D models of trees.""" import os import numpy as np import pandas as pd import geopandas as gpd import seaborn as sns import ipyvolume as ipv from ipywidgets import FloatSlider, VBox, HBox, Accordion, Text, Layout from forest3d.geometry import make_tree_all_params, get_elevation, Tree from forest3d.validate_data import tree_list_checker import warnings warnings.filterwarnings( "ignore", message="invalid value encountered in double_scalars") warnings.filterwarnings( "ignore", message="invalid value encountered in greater_equal") warnings.filterwarnings("ignore", message="invalid value encountered in less") warnings.filterwarnings( "ignore", message="invalid value encountered in true_divide") def plot_tree_with_widgets(): """Creates and interactive plot of a tree crown with widgets to control its shape. Returns -------- tree_plot : ipywidgets HBox widget widget containing the parameter widgets and a 3D scatter plot widget. """ # creating all the widgets for each parameter of the tree model species = Text(value='Douglas-fir', description='Species') dbh = FloatSlider(value=5.0, min=0, max=50, step=1.0, description='dbh') height = FloatSlider( value=75, min=0, max=150, step=1.0, description='height', orientation='vertical') stem_x = FloatSlider(value=0, min=-10, max=10, step=1.0, description='x') stem_y = FloatSlider(value=0, min=-10, max=10, step=1.0, description='y') stem_z = FloatSlider(value=0, min=-10, max=10, step=1.0, description='z') lean_direction = FloatSlider( min=0, max=360, step=1.0, description='direction') lean_severity = FloatSlider( min=0, max=89, step=1.0, description='severity') crown_ratio = FloatSlider( value=0.65, min=0, max=1.0, step=0.01, description='crown ratio', orientation='vertical') crown_radius_E = FloatSlider( value=10, min=0, max=30, step=1.0, description='east') crown_radius_N = FloatSlider( value=10, min=0, max=30, step=1.0, description='north') crown_radius_W = FloatSlider( value=10, min=0, max=30, step=1.0, description='west') crown_radius_S = FloatSlider( value=10, min=0, max=30, step=1.0, description='south') crown_edge_height_E = FloatSlider( value=0.3, min=0, max=1, step=0.01, description='east', orientation='vertical') crown_edge_height_N = FloatSlider( value=0.3, min=0, max=1, step=0.01, description='north', orientation='vertical') crown_edge_height_W = FloatSlider( value=0.3, min=0, max=1, step=0.01, description='west', orientation='vertical') crown_edge_height_S = FloatSlider( value=0.3, min=0, max=1, step=0.01, description='south', orientation='vertical') shape_top_E = FloatSlider( value=2.0, min=0.0, max=3.0, step=0.1, description='top, east') shape_top_N = FloatSlider( value=2.0, min=0.0, max=3.0, step=0.1, description='top, north') shape_top_W = FloatSlider( value=2.0, min=0.0, max=3.0, step=0.1, description='top, west') shape_top_S = FloatSlider( value=2.0, min=0.0, max=3.0, step=0.1, description='top, south') shape_bot_E = FloatSlider( value=2.0, min=0.0, max=3.0, step=0.1, description='bottom, east') shape_bot_N = FloatSlider( value=2.0, min=0.0, max=3.0, step=0.1, description='bottom, north') shape_bot_W = FloatSlider( value=2.0, min=0.0, max=3.0, step=0.1, description='bottom, west') shape_bot_S = FloatSlider( value=2.0, min=0.0, max=3.0, step=0.1, description='bottom, south') # Group the parameter widgets into groups of controls height_controls = HBox([height, crown_ratio]) edge_height_controls = HBox([ crown_edge_height_E, crown_edge_height_N, crown_edge_height_W, crown_edge_height_S ]) location_controls = VBox([stem_x, stem_y, stem_z]) lean_controls = VBox([lean_direction, lean_severity]) radius_controls = VBox( [crown_radius_E, crown_radius_N, crown_radius_W, crown_radius_S]) shape_controls = VBox([ shape_top_E, shape_top_N, shape_top_W, shape_top_S, shape_bot_E, shape_bot_N, shape_bot_W, shape_bot_S ]) # create and expandable user interface controls = Accordion([ location_controls, height_controls, lean_controls, radius_controls, edge_height_controls, shape_controls ]) controls.set_title(0, 'Stem Location') controls.set_title(1, 'Tree Height') controls.set_title(2, 'Tree Lean') controls.set_title(3, 'Crown Radius') controls.set_title(4, 'Crown Edge Heights') controls.set_title(5, 'Crown Shapes') # create the 3D scatter widget tree_scatter = ipv.quickscatter( x=np.random.rand(100, ) * 100 - 50, y=np.random.rand(100, ) * 100 - 50, z=np.random.rand(100, ) * 170 - 10, marker='sphere', color='green', size=1) # define some visualization parameters of the scatter plot tree_scatter.children[0].xlim = [-50, 50] tree_scatter.children[0].ylim = [-50, 50] tree_scatter.children[0].zlim = [-10, 160] tree_scatter.children[0].camera.up = [0, 1, 0] tree_scatter.children[0].camera.position = (-0.03944879903076046, -3.097863509106879, 0.27417047137158385) def on_value_change(*args): """Updates values of scatter plot when parameter widgets are updated. """ new_x, new_y, new_z = make_tree_all_params( species.value, dbh.value, height.value, stem_x.value, stem_y.value, stem_z.value, lean_direction.value, lean_severity.value, crown_ratio.value, crown_radius_E.value, crown_radius_N.value, crown_radius_W.value, crown_radius_S.value, crown_edge_height_E.value, crown_edge_height_N.value, crown_edge_height_W.value, crown_edge_height_S.value, shape_top_E.value, shape_top_N.value, shape_top_W.value, shape_top_S.value, shape_bot_E.value, shape_bot_N.value, shape_bot_W.value, shape_bot_S.value) tree_scatter.children[0].scatters[0].x = new_x tree_scatter.children[0].scatters[0].y = new_y tree_scatter.children[0].scatters[0].z = new_z # set up all widgets to trigger update to scatter plot upon changed value species.observe(on_value_change, 'value') dbh.observe(on_value_change, 'value') height.observe(on_value_change, 'value') stem_x.observe(on_value_change, 'value') stem_y.observe(on_value_change, 'value') stem_z.observe(on_value_change, 'value') lean_direction.observe(on_value_change, 'value') lean_severity.observe(on_value_change, 'value') crown_ratio.observe(on_value_change, 'value') crown_radius_E.observe(on_value_change, 'value') crown_radius_N.observe(on_value_change, 'value') crown_radius_W.observe(on_value_change, 'value') crown_radius_S.observe(on_value_change, 'value') crown_edge_height_E.observe(on_value_change, 'value') crown_edge_height_N.observe(on_value_change, 'value') crown_edge_height_W.observe(on_value_change, 'value') crown_edge_height_S.observe(on_value_change, 'value') shape_top_E.observe(on_value_change, 'value') shape_top_N.observe(on_value_change, 'value') shape_top_W.observe(on_value_change, 'value') shape_top_S.observe(on_value_change, 'value') shape_bot_E.observe(on_value_change, 'value') shape_bot_N.observe(on_value_change, 'value') shape_bot_W.observe(on_value_change, 'value') shape_bot_S.observe(on_value_change, 'value') return HBox([controls, tree_scatter], layout=Layout(width='100%')) def plot_tree_list(tree_list, dem=None, sample=None): """Plots an interactive 3D view of a tree list. Parameters ----------- tree_list : path to shapefile shapefile containing trees with measured attributes dem : path to elevation raster raster readable by rasterio, will be used to calculate elevation on a grid and produce """ if not tree_list_checker(tree_list): raise TypeError('Tree list is not formatted appropriately.') if type(tree_list) == pd.core.frame.DataFrame: trees = tree_list elif type(tree_list) == gpd.geodataframe.GeoDataFrame: trees = tree_list elif not os.path.isfile(tree_list): raise FileNotFoundError('The file does not exist.') else: # check file type and open with pandas or geopandas file_type = os.path.basename(tree_list).split('.')[1] if file_type == "csv": trees = pd.read_csv(tree_list) elif file_type == "shp": trees = gpd.read_file(tree_list) else: raise TypeError('Unknown file type') spp = pd.unique(trees.species) palette = sns.color_palette('colorblind', len(spp)) # get elevation raster to display as surface underneath trees if dem is not None: # calculate z locations of the tree stems based on the dem trees['stem_z'] = get_elevation(dem, trees['stem_x'], trees['stem_y']) # calculate a dem to display as a surface in the plot xs = np.linspace(trees.stem_x.min(), trees.stem_x.max(), 100) ys = np.linspace(trees.stem_y.min(), trees.stem_y.max(), 100) xx, yy = np.meshgrid(xs, ys) elevation = get_elevation(dem, xx.flatten(), yy.flatten()) elevation_surface = elevation.reshape(xs.shape[0], ys.shape[0]) else: if 'stem_z' not in trees.columns: trees['stem_z'] = 0 else: pass if sample is not None: trees = trees.sample(n=sample) else: pass ipv.figure(width=800) for idx, tree in trees.iterrows(): # calculate the tree's crown coordinates x, y, z = Tree( species=tree.species, dbh=tree.dbh, top_height=tree.top_height, stem_x=tree.stem_x, stem_y=tree.stem_y, stem_z=tree.stem_z, crown_ratio=tree.cr_ratio, crown_radii=np.full(shape=4, fill_value=tree.cr_radius), crown_shapes=np.full(shape=(2, 4), fill_value=2.0)).get_crown() # find out the spp index to give it a unique color spp_idx = np.where(spp == tree.species)[0][0] # plot the tree crown ipv.plot_surface( x.reshape((50, 32)), y.reshape((50, 32)), z.reshape((50, 32)), color=palette[spp_idx]) if dem is not None: ipv.plot_surface(xx, yy, elevation_surface, color='brown') else: pass ipv.xlim(trees.stem_x.min() - 20, trees.stem_x.max() + 20) ipv.ylim(trees.stem_y.min() - 20, trees.stem_y.max() + 20) ipv.zlim(trees.stem_z.min(), trees.stem_z.min() + trees.top_height.max() + 20) ipv.style.use('minimal') ipv.squarelim() ipv.show()
nilq/baby-python
python
from js9 import j def init_actions_(service, args): dependencies = { 'list_disks': ['init'], 'get_consumption': ['install'] } return dependencies def init(job): service = job.service if 'g8client' not in service.producers: raise j.exceptions.AYSNotFound("No producer g8client found. Cannot continue init of %s" % service) users = service.model.data.accountusers for user in users: uservdc = service.aysrepo.serviceGet('uservdc', user.name) service.consume(uservdc) service.saveAll() def authorization_user(account, service, g8client): authorized_users = account.authorized_users userslist = service.producers.get('uservdc', []) if not userslist: return users = [] user_exists = True for u in userslist: if u.model.data.provider != '': users.append(u.model.dbobj.name + "@" + u.model.data.provider) else: users.append(u.model.dbobj.name) # Authorize users for user in users: if user not in authorized_users: user_exists = False for uvdc in service.model.data.accountusers: if uvdc.name == user.split('@')[0]: if user_exists: for acl in account.model['acl']: if acl['userGroupId'] == user and acl['right'] != uvdc.accesstype: account.update_access(username=user, right=uvdc.accesstype) else: account.authorize_user(username=user, right=uvdc.accesstype) # Unauthorize users not in the schema for user in authorized_users: if user not in users: if user == g8client.model.data.login: raise j.exceptions.Input("Can't remove current authenticating user: %s. To remove use another user for g8client service." % user) account.unauthorize_user(username=user) def get_user_accessright(username, service): for u in service.model.data.accountusers: if u.name == username: return u.accesstype def install(job): service = job.service if 'g8client' not in service.producers: raise j.exceptions.AYSNotFound("No producer g8client found. Cannot continue install of %s" % service) g8client = service.producers["g8client"][0] config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance) cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key") # Set limits # if account does not exist, it will create it account = cl.account_get(name=service.model.dbobj.name, create=True, maxMemoryCapacity=service.model.data.maxMemoryCapacity, maxVDiskCapacity=service.model.data.maxDiskCapacity, maxCPUCapacity=service.model.data.maxCPUCapacity, maxNumPublicIP=service.model.data.maxNumPublicIP, ) service.model.data.accountID = account.model['id'] service.model.save() authorization_user(account, service, g8client) # Unauthorize users not in the schema # THIS FUNCTIONALITY IS DISABLED UNTIL OVC DOESN'T REQUIRE USERS TO BE ADMIN # update capacity in case account already existed account.model['maxMemoryCapacity'] = service.model.data.maxMemoryCapacity account.model['maxVDiskCapacity'] = service.model.data.maxDiskCapacity account.model['maxNumPublicIP'] = service.model.data.maxNumPublicIP account.model['maxCPUCapacity'] = service.model.data.maxCPUCapacity account.save() def processChange(job): service = job.service if 'g8client' not in service.producers: raise j.exceptions.AYSNotFound("No producer g8client found. Cannot continue processChange of %s" % service) g8client = service.producers["g8client"][0] config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance) cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key") account = cl.account_get(name=service.model.dbobj.name, create=False) args = job.model.args category = args.pop('changeCategory') if category == "dataschema" and service.model.actionsState['install'] == 'ok': for key, value in args.items(): if key == 'accountusers': # value is a list of (uservdc) if not isinstance(value, list): raise j.exceptions.Input(message="%s should be a list" % key) if 'uservdc' in service.producers: for s in service.producers['uservdc']: if not any(v['name'] == s.name for v in value): service.model.producerRemove(s) for v in value: accessRight = v.get('accesstype', '') if v['name'] == s.name and accessRight != get_user_accessright(s.name, service) and accessRight: name = s.name + '@' + s.model.data.provider if s.model.data.provider else s.name account.update_access(name, v['accesstype']) for v in value: userservice = service.aysrepo.serviceGet('uservdc', v['name']) if userservice not in service.producers.get('uservdc', []): service.consume(userservice) setattr(service.model.data, key, value) authorization_user(account, service, g8client) # update capacity account.model['maxMemoryCapacity'] = service.model.data.maxMemoryCapacity account.model['maxVDiskCapacity'] = service.model.data.maxDiskCapacity account.model['maxNumPublicIP'] = service.model.data.maxNumPublicIP account.model['maxCPUCapacity'] = service.model.data.maxCPUCapacity account.save() service.save() def uninstall(job): service = job.service if 'g8client' not in service.producers: raise j.exceptions.AYSNotFound("No producer g8client found. Cannot continue uninstall of %s" % service) g8client = service.producers["g8client"][0] config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance) cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key") acc = cl.account_get(service.model.dbobj.name) acc.delete() def list_disks(job): service = job.service g8client = service.producers["g8client"][0] config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance) cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key") account = cl.account_get(name=service.model.dbobj.name) service.model.disks = account.disks service.save() def get_consumption(job): import datetime service = job.service g8client = service.producers["g8client"][0] config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance) cl = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key") account = cl.account_get(name=service.model.dbobj.name) if not service.model.data.consumptionFrom and not service.model.data.consumptionTo: service.model.data.consumptionFrom = account.model['creationTime'] end = datetime.datetime.fromtimestamp(service.model.data.consumptionFrom) + datetime.timedelta(hours=1) service.model.data.consumptionTo = end.timestamp() service.model.data.consumptionData = account.get_consumption(service.model.data.consumptionFrom, service.model.data.consumptionTo)
nilq/baby-python
python
import math n = input() r = list(map(int,n)) lastNum = r[-1] l = r[:-1] newArray = list(map(int,l)) #print(newArray) print(lastNum) print(newArray)
nilq/baby-python
python
from __future__ import absolute_import import logging import time from django.contrib.auth.models import User from django.http import HttpResponse, StreamingHttpResponse from django.shortcuts import get_object_or_404, render, render_to_response import elasticapm class MyException(Exception): pass class IgnoredException(Exception): skip_elasticapm = True def no_error(request): resp = HttpResponse('') resp['My-Header'] = 'foo' return resp def fake_login(request): return HttpResponse('') def django_exc(request): return get_object_or_404(MyException, pk=1) def raise_exc(request): raise MyException(request.GET.get('message', 'view exception')) def raise_ioerror(request): raise IOError(request.GET.get('message', 'view exception')) def decorated_raise_exc(request): return raise_exc(request) def template_exc(request): return render_to_response('error.html') def ignored_exception(request): raise IgnoredException() def logging_request_exc(request): logger = logging.getLogger(__name__) try: raise Exception(request.GET.get('message', 'view exception')) except Exception as e: logger.error(e, exc_info=True, extra={'request': request}) return HttpResponse('') def logging_view(request): logger = logging.getLogger('logmiddleware') logger.info("Just loggin'") return HttpResponse('') def render_template_view(request): def something_expensive(): with elasticapm.capture_span("something_expensive", "code"): return [User(username='Ron'), User(username='Beni')] return render(request, "list_users.html", {'users': something_expensive}) def render_jinja2_template(request): return render(request, "jinja2_template.html") def render_user_view(request): def something_expensive(): with elasticapm.capture_span("something_expensive", "code"): for i in range(100): users = list(User.objects.all()) return users return render(request, "list_users.html", {'users': something_expensive}) def streaming_view(request): def my_generator(): for i in range(5): with elasticapm.capture_span('iter', 'code'): time.sleep(0.01) yield str(i) resp = StreamingHttpResponse(my_generator()) return resp def override_transaction_name_view(request): elasticapm.set_transaction_name('foo') elasticapm.set_transaction_result('okydoky') return HttpResponse()
nilq/baby-python
python
"""empty message Revision ID: 878f67285c72 Revises: 122dd6a5c035 Create Date: 2019-05-29 12:57:36.544059 """ # revision identifiers, used by Alembic. revision = '878f67285c72' down_revision = '122dd6a5c035' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): # ###commandsautogeneratedbyAlembic-pleaseadjust!### op.create_table('registration', sa.Column('id', sa.Integer(), nullable=False), sa.Column('offer_id', sa.Integer(), nullable=False), sa.Column('registration_form_id', sa.Integer(), nullable=False), sa.Column('confirmed', sa.Boolean(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('confirmation_email_sent_at', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint(['offer_id'], ['offer.id']), sa.ForeignKeyConstraint(['registration_form_id'], ['registration_form.id']), sa.PrimaryKeyConstraint('id') ) op.create_table('registration_answer', sa.Column('id', sa.Integer(), nullable=False), sa.Column('registration_id', sa.Integer(), nullable=False), sa.Column('registration_question_id', sa.Integer(), nullable=False), sa.Column('value', sa.String(), nullable=False), sa.ForeignKeyConstraint(['registration_id'], ['registration.id']), sa.ForeignKeyConstraint(['registration_question_id'], ['registration_question.id']), sa.PrimaryKeyConstraint('id') ) # ###endAlembiccommands### def downgrade(): # ### commands auto generated by Alembic-please adjust!### op.drop_table('registration_answer') op.drop_table('registration') # ###endAlembiccommands###
nilq/baby-python
python
import numpy as np import trimesh from pdb import set_trace as bp def write_off(file_path, verts, faces=None): """Export point cloud into .off file. Positional arguments: file_path: output path verts: Nx3 array (float) Kwargs: faces: Mx3 array (int) """ off = open(file_path, 'w') assert isinstance(verts, np.ndarray), "Invalid data type for vertices: %s" % type(verts) assert len(verts.shape) == 2 and verts.shape[1] == 3, "Invalid array shape for vertices: %s" % str(verts.shape) verts_count = verts.shape[0] if faces is not None: assert isinstance(faces, np.ndarray), "Invalid data type for faces: %s" % type(faces) assert len(faces.shape) == 2 and faces.shape[1] == 3, "Invalid array shape for faces: %s" % str(faces.shape) faces_count = faces.shape[0] # write header off.write('OFF\n') if faces is not None: off.write('%d %d 0\n' % (verts_count, faces_count)) else: off.write('%d 0 0\n' % (verts_count)) # write vertices np.savetxt(off, verts, fmt='%.6f') # write faces if faces is not None: augmented_faces = np.hstack((np.ones((faces.shape[0], 1), dtype=np.int)*3, faces)) np.savetxt(off, augmented_faces, fmt='%d') off.close() ## base function NORM = np.linalg.norm def lap_smooth(v,f,adj): smoothed = v.copy() for i in range(v.shape[0]): neibour = adj[i] base_point = v[i] if 1: laplacian = np.vstack((v[neibour])) smoothed[i] = np.average(laplacian,0) else: laplacian = np.zeros_like((base_point)) edge_cost = 1/ NORM(v[neibour] - v[i],axis=1) laplacian += np.sum(v[neibour] * edge_cost.reshape(-1,1),axis=0) # laplacian += base_point total_weight = np.sum(edge_cost) if total_weight > 0: smoothed[i] = laplacian/total_weight # else: return smoothed def smooth2(v,f,adj,iteration): for i in range(iteration): v = lap_smooth(v,f,adj) return v def get_smoothed_mesh(v,f,iteration=5): adj = get_adj(v,f) smooth_verts = smooth2(v,f,adj,iteration) tri_mesh = trimesh.Trimesh(vertices=smooth_verts,faces=f,process=False) return tri_mesh def get_adj(v,f): adj = [] for i,vt in enumerate(v): neibour = set(f[np.where(f==i)[0]].flatten()) # pdb.set_trace() # print(neibour) # print(i) neibour.remove(i) neibour = list(neibour) adj.append(neibour) return adj def get_tagent_space_naive(mesh): normals = mesh.vertex_normals tangents = np.cross(normals,normals+[0,1,0]) tangents = tangents/np.linalg.norm(tangents,axis=1).reshape(-1,1) bitangents = np.cross(normals,tangents) bitangents = bitangents/np.linalg.norm(bitangents,axis=1).reshape(-1,1) return tangents,normals,bitangents def rotation_matrix_x(angle): rad = angle * np.pi / 180 return np.array([[1,0,0],[0, np.cos(rad), -np.sin(rad)], [0, np.sin(rad), np.cos(rad)]]) def rotation_matrix_y(angle): rad = angle * np.pi / 180 return np.array([[np.cos(rad), 0, np.sin(rad)],[0, 1, 0], [-np.sin(rad), 0, np.cos(rad)]]) def rotation_matrix_z(angle): rad = angle * np.pi / 180 return np.array([[np.cos(rad), -np.sin(rad), 0], [np.sin(rad), np.cos(rad), 0], [0, 0, 1]]) def rotate_plane(vec1, vec2 ): """ giving two vector, return the rotation matrix """ #vec1 = vec1 / np.linalg.norm(vec1) #unit vector norm = np.linalg.norm(vec1) * np.linalg.norm(vec2) cos_theta = np.dot(vec1,vec2)/norm sin_theta = np.linalg.norm(np.cross(vec1,vec2))/norm if sin_theta == 0: return np.eye(3) k = np.cross(vec1,vec2) /(norm*sin_theta) K = np.array([[0,-k[2],k[1]], [k[2],0,-k[0]], [-k[1],k[0],0]]) R = np.eye(3) + sin_theta*K +(1-cos_theta)*np.dot(K,K) return R def get_index_list(full,part): idlist = [] for pt in part: arr = NORM(full-pt,axis=1) < 0.001 id = np.where(arr) idlist.append(id[0][0]) return idlist def get_Rs(tangents,normals,bitangents): return np.dstack(( tangents,normals,bitangents)) def get_delta_mushed_target(source_v,target_v,f): smooth_time = 25 smoothed_source_mesh = get_smoothed_mesh(source_v,f,smooth_time) st,sn,sb = get_tagent_space_naive(smoothed_source_mesh) Rs = get_Rs(st,sn,sb) vd = np.einsum('ijk,ik->ij' ,np.linalg.pinv(Rs),source_v-smoothed_source_mesh.vertices) smoothed_target_mesh = get_smoothed_mesh(target_v,f,smooth_time) tn = smoothed_target_mesh.vertex_normals tt = np.zeros_like(tn) tb = np.zeros_like(tn) # key part: get rotated tangent space for i,vec1 in enumerate(tn): Rn = rotate_plane(sn[i],tn[i]) tt[i],tb[i] = Rn @ st[i], Rn @ sb[i] Cs = get_Rs(tt,tn,tb) deformed = np.einsum('ijk,ik->ij' ,Cs,vd) + smoothed_target_mesh.vertices return deformed def demo(): # load source mesh source_mesh = trimesh.load_mesh('tube_r.off',process=False) v,f = source_mesh.vertices,source_mesh.faces # rotate part of tube rotation_angle_y = 45 center = np.average(v,0) select = np.where(v[:,0]>center[0]+1) R = rotation_matrix_z(rotation_angle_y) target = v.copy() target[:,0] -= 1 target[select] = (R @ target[select].T).T target[:,0] += 1 # get delta mushed target mesh deformed = get_delta_mushed_target(v,target,f) write_off('deformed.off',deformed,f) if __name__ == '__main__': demo()
nilq/baby-python
python
from PyQt5 import QtWidgets, QtCore, QtGui import os #from gui.export_widget import Ui_Form from editable_list_widget import list_widget from gui import build from wizard.vars import defaults from wizard.tools import log from wizard.prefs.main import prefs import options_widget import dialog_comment from wizard.tools.tx_from_files import tx_from_files from wizard.prefs import project as project_prefs logger = log.pipe_log(__name__) prefs = prefs() class Main(list_widget): def __init__(self, asset, sanity, count): super(Main, self).__init__() self.sanity = sanity self.count = count self.asset = asset self.init_ui() self.connect_functions() def init_ui(self): self.export_widget_folder_pushButton = self.add_button(defaults._folder_icon_) self.export_widget_comment_pushButton = self.add_button(defaults._comment_icon_) self.export_widget_tx_pushButton = self.add_button(defaults._tx_icon_) icon = defaults._export_list_neutral_icon_ export_prefs = prefs.asset(self.asset).export self.export_widget_version_label = self.add_label(self.asset.export_version, "export_widget_version_label", 40) self.export_widget_user_label = self.add_label(export_prefs.version_user, "export_widget_user_label", 120) self.export_widget_date_label = self.add_label(export_prefs.version_date, "export_widget_date_label", 180) self.export_widget_comment_label = self.add_label(export_prefs.version_comment, "export_widget_comment_label", 230, QtCore.Qt.AlignLeft) try: self.ui.export_widget_software_label.setText(f'From {export_prefs.version_software}') except: pass if self.asset.stage != defaults._texturing_: self.export_widget_tx_pushButton.setVisible(0) self.update_sanity(self.sanity) def update_sanity(self, sanity): if sanity: list_dir = os.listdir(prefs.asset(self.asset).export.version_folder) if list_dir == [] or not list_dir: icon = defaults._missing_file_export_list_icon_ else: if prefs.asset(self.asset).software.extension in list_dir[0]: icon = defaults._export_list_icon_ else: icon = defaults._missing_file_export_list_icon_ else: icon = defaults._export_list_neutral_icon_ self.set_icon(icon) def open_folder(self): file = prefs.asset(self.asset).export.version_folder os.startfile(file) def change_comment(self): self.dialog_comment = dialog_comment.Main(self.asset) if build.launch_dialog_comment(self.dialog_comment): self.export_widget_comment_label.setText(self.dialog_comment.comment) def make_tx(self): folder = prefs.asset(self.asset).export.version_folder file_names_list = os.listdir(folder) files_list = [] extension = (project_prefs.get_custom_pub_ext_dic())[self.asset.stage][self.asset.software] for file in file_names_list: if file.endswith(extension): files_list.append(os.path.join(folder, file)) tx_from_files(files_list) def connect_functions(self): self.export_widget_folder_pushButton.clicked.connect(self.open_folder) self.export_widget_comment_pushButton.clicked.connect(self.change_comment) self.export_widget_tx_pushButton.clicked.connect(self.make_tx) def closeEvent(self, event): event.ignore() self.hide()
nilq/baby-python
python
""" PYTHON NUMBER SEQUENCE """ __author__ = 'Sol Amour - amoursol@gmail.com' __twitter__ = '@solamour' __version__ = '1.0.0' # SYNTAX: [ value * step for value in range( amount ) ] # Step = This is the value we will multiply our range by # Amount = How many total values we want # NOTES: # All parameters can be either integers or doubles # All parameters can be positive or negative # range( amount ) is the same as range( 0, amount ) # To achieve the same output as '0..10' in DesignScript, you must use # 'range( 10 + 1 )' as the Stop value is not included in the range function # The input ports step = IN[0] # A number such as 20 (int) or 20.5 (float) demarcating the step amount = IN[1] # A number such as 10 demarcating the amount # The output port - In this case we use a list comprehension OUT = [ value * step for value in range( amount ) ]
nilq/baby-python
python
# based on https://github.com/pypa/sampleproject # MIT License # Always prefer setuptools over distutils from setuptools import setup, find_namespace_packages from os import path from io import open here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='asreview-semantic-clustering', description='Semantic clustering tool for the ASReview project', version='0.1', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/asreview/semantic-clusters', author='Utrecht University', author_email='asreview@uu.nl', classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 3 - Alpha', # Pick your license as you wish 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], keywords='asreview extension semantic clustering clusters visualization', packages=find_namespace_packages(include=['asreviewcontrib.*']), install_requires=[ "numpy", "matplotlib", "asreview", "dash", "plotly", "sklearn", "transformers", "numpy", "seaborn", "torch", ], extras_require={ }, entry_points={ "asreview.entry_points": [ "semantic_clustering = asreviewcontrib.semantic_clustering.main:SemClusEntryPoint", # noqa: E501 ] }, project_urls={ 'Bug Reports': "https://github.com/asreview/semantic-clusters/issues", 'Source': "https://github.com/asreview/semantic-clusters", }, )
nilq/baby-python
python