content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
# A few convenient math functions for the bicorr project import matplotlib #matplotlib.use('agg') # for flux import matplotlib.pyplot as plt import seaborn as sns sns.set(style='ticks') import sys import os import os.path import scipy.io as sio from scipy.optimize import curve_fit import time import numpy as np np.set_printoptions(threshold=np.nan) # print entire matrices import pandas as pd from tqdm import * # Don't import any bicorr modules here # Other modules will import bicorr_math, but not the other way around def prop_err_division(num,num_err,denom,denom_err): A = num/denom A_err = A*np.sqrt((num_err/num)**2+(denom_err/denom)**2) return A, A_err def calc_centers(edges): """ Simple method for returning centers from an array of bin edges. Calculates center between each point as difference between containing edges. Example, plt.plot(bicorr.centers(edges),counts,'.k') Serves as a shortcode to first producing array of bin centers. Parameters ---------- edges : ndarray Array of bin edges Returns ------- centers : ndarray Array of bin edges """ return (edges[:-1]+edges[1:])/2 def calc_histogram_mean(bin_edges, counts, print_flag = False, bin_centers_flag = False): """ Calculate mean of a count rate distribution, counts vs. x. Errors are calculated under the assumption that you are working with counting statistics. (C_err = sqrt(C) in each bin) Parameters ---------- bin_edges : ndarray Bin edges for x counts : ndarray Bin counts print_flag : bool Option to print intermediate values bin_centers_flag : bool Option to provide bin centers instead of bin edges (useful for 2d histograms) Returns ------- x_mean : float x_mean_err : float """ if bin_centers_flag == True: bin_centers = bin_edges else: bin_centers = calc_centers(bin_edges) num = np.sum(np.multiply(bin_centers,counts)) num_err = np.sqrt(np.sum(np.multiply(bin_centers**2,counts))) denom = np.sum(counts) denom_err = np.sqrt(denom) if print_flag: print('num: ',num) print('num_err: ',num_err) print('denom: ',denom) print('denom_err: ',denom_err) x_mean = num/denom x_mean_err = x_mean * np.sqrt((num_err/num)**2+(denom_err/denom)**2) if print_flag: print('x_mean: ',x_mean) print('x_mean_err:',x_mean_err) return x_mean, x_mean_err def convert_energy_to_time(energy, distance = 1.05522): ''' Convert energy in MeV to time in ns for neutrons that travel 1 m. From Matthew's `reldist.m` script. 6/5/18 Changing default to 105.522 cm, which is mean distance. Parameters ---------- energy : float Neutron energy in MeV distance : float, optional Neutron flight distance in meters Returns ------- time : float Time of flight of neutron ''' # Constants m_n = 939.565 # MeV/c2 c = 2.99e8 # m/s # Calculations v = c*np.sqrt(2*energy/m_n) time = np.divide(distance/v,1e-9) return time def convert_time_to_energy(time, distance = 1.05522): ''' Convert time in ns to energy in MeV for neutrons that travel 1 m. From Matthew's `reldist.m` script. 6/5/18 Changing default to 105.522 cm, which is mean distance. If an array of times, use energy_bin_edges = np.asarray(np.insert([bicorr.convert_time_to_energy(t) for t in dt_bin_edges[1:]],0,10000)) Parameters ---------- time : float Time of flight of neutron in ns distance : float, optional Neutron flight distance in meters Returns ------- energy : float Neutron energy in MeV ''' # Constants m_n = 939.565 # MeV/c2 c = 2.99e8 # m/s v = distance * 1e9 / time # ns -> s energy = (m_n/2)*(v/c)**2 return energy def f_line(x, m, b): """ Line fit with equation y = mx + b Parameters ---------- x : array x values m : float slope b : float y-intercept Returns ------- y : array y values """ y = m*x + b return y def fit_f_line(x, y, y_err=None, p0=None, bounds=(-np.inf,np.inf)): """ Fit a straight line with equation y = mx + b Parameters ---------- x : ndarray y : ndarray y_err : ndarray, optional p0 : ndarra Initial guess of coefficients bounds : ndarray Boundaries for searching for coefficients Returns ------- m, m_err : float b, b_err : float """ if y_err is None: y_err = np.ones(x.size) # Only use dat apoints with non-zero error w = np.where(y_err != 0) popt, pcov = curve_fit(f_line, x[w], y[w], sigma=y_err[w], p0=p0, absolute_sigma = True, bounds = bounds) errors = np.sqrt(np.diag(pcov)) [m, b] = popt [m_err, b_err] = errors return m, m_err, b, b_err
nilq/baby-python
python
import sys import os project = u'Pelikan' description = u"Unified cache backend. http://go/pelikan" copyright = u'Twitter' extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.ifconfig', ] exclude_patterns = ['_build'] html_static_path = ['_static'] source_suffix = '.rst' master_doc = 'index' language = u'C' today_fmt = '%Y/%m/%d' pygments_style = 'sphinx' html_theme = "default" html_logo = u'_static/img/white_pelican.jpg' intersphinx_mapping = {'http://docs.python.org/': None}
nilq/baby-python
python
from random import (randrange, shuffle) from copy import deepcopy from forest_calculations import (get_forest_dimensions, get_tree_counts) from forest_transpormations import (flatten_forest, deflatten_forest) from forest_constants import (LEAFY, CONIFEROUS) def get_random_position(rows, cols): return randrange(rows), randrange(cols) def randomize_forest_1(forest): forest_cpy = deepcopy(forest) rows_num, cols_num = get_forest_dimensions(forest_cpy) leafy_count, coniferous_count = get_tree_counts(forest_cpy) if leafy_count > coniferous_count: more_trees = LEAFY less_trees = CONIFEROUS less_trees_count = coniferous_count else: more_trees = CONIFEROUS less_trees = LEAFY less_trees_count = leafy_count for row_index, row in enumerate(forest_cpy): for col_index, _ in enumerate(row): forest_cpy[row_index][col_index] = more_trees for _ in range(less_trees_count): while True: random_row, random_col = get_random_position(rows_num, cols_num) if forest_cpy[random_row][random_col] != less_trees: forest_cpy[random_row][random_col] = less_trees break return forest_cpy def randomize_forest_2(forest): rows, _ = get_forest_dimensions(forest) flat_forest = flatten_forest(forest) shuffle(flat_forest) return deflatten_forest(flat_forest, rows)
nilq/baby-python
python
from img_utils import img_utils as _lib from .utils import u8 def darken_pixels(src_path: str, dst_path: str, amount: int, cutoff: int): """ Darken Pixels Darkens all pixels in the image by percentage, specified by `amount`. Any pixel that doesn't have a subpixel below than the `cutoff` will be ignored. `amount` and `cutoff` are clamped between (inclusive) 0-255 ```python import img_utils img_utils.darken_pixels( src_path="in_file.jpg", dst_path="out_file.jpg", amount=80, cutoff=200, ) ``` will take the `in_file.jpg` and lower each subpixel of the image by 80%, unless all the subpixels are above 200. The RGB pixel `100, 220, 220` will be turned into `20, 44, 44` while `210, 220, 230` will be left alone. """ _lib._darken_pixels(src_path, dst_path, u8(amount), u8(cutoff))
nilq/baby-python
python
import asyncio import logging import os import socket import uuid import pika import pika.adapters.asyncio_connection from .subscription import QueueSubscriptionObject, ExchangeSubscriptionObject from ..broker import Broker # L = logging.getLogger(__name__) # class AMQPBroker(Broker): ''' The broker that uses Advanced Message Queuing Protocol (AMQP) and it can be used with e.g. RabbitMQ as a message queue. ''' ConfigDefaults = { 'url': 'amqp://username:password@localhost/virtualhost', 'appname': 'asab.mom', 'reconnect_delay': 10.0, 'prefetch_count': 5, 'exchange': 'amq.fanout', 'reply_exchange': '', } def __init__(self, app, accept_replies=False, task_service=None, config_section_name="asab:mom:amqp", config=None): super().__init__(app, accept_replies, task_service, config_section_name, config) self.Origin = '{}#{}'.format(socket.gethostname(), os.getpid()) self.Connection = None self.SubscriptionObjects = {} self.ReplyTo = None self.InboundQueue = asyncio.Queue(loop=app.Loop) self.OutboundQueue = asyncio.Queue(loop=app.Loop) self.SenderFuture = None self.Exchange = self.Config['exchange'] self.ReplyExchange = self.Config['reply_exchange'] async def finalize(self, app): await super().finalize(app) if self.SenderFuture is not None: self.SenderFuture.cancel() self.SenderFuture = None def _reconnect(self): if self.Connection is not None: if not (self.Connection.is_closing or self.Connection.is_closed): self.Connection.close() self.Connection = None if self.SenderFuture is not None: self.SenderFuture.cancel() self.SenderFuture = None parameters = pika.URLParameters(self.Config['url']) if parameters.client_properties is None: parameters.client_properties = dict() parameters.client_properties['application'] = self.Config['appname'] self.SubscriptionObjects.clear() self.ReplyTo = None self.Connection = pika.adapters.asyncio_connection.AsyncioConnection( parameters=parameters, on_open_callback=self._on_connection_open, on_open_error_callback=self._on_connection_open_error, on_close_callback=self._on_connection_close ) # Connection callbacks def _on_connection_open(self, connection): L.info("AMQP connected") asyncio.ensure_future(self.ensure_subscriptions(), loop=self.Loop) self.Connection.channel(on_open_callback=self._on_sending_channel_open) def _on_connection_close(self, connection, *args): try: code, reason = args L.warning("AMQP disconnected ({}): {}".format(code, reason)) except ValueError: error, = args L.warning("AMQP disconnected: {}".format(error)) self.Loop.call_later(float(self.Config['reconnect_delay']), self._reconnect) def _on_connection_open_error(self, connection, error_message=None): L.error("AMQP error: {}".format(error_message if error_message is not None else 'Generic error')) self.Loop.call_later(float(self.Config['reconnect_delay']), self._reconnect) def _on_sending_channel_open(self, channel): self.SenderFuture = asyncio.ensure_future(self._sender_future(channel), loop=self.Loop) async def ensure_subscriptions(self): if self.Connection is None: return if not self.Connection.is_open: return for s, pkwargs in self.Subscriptions.items(): if s in self.SubscriptionObjects: continue if pkwargs.get('exchange', False): self.SubscriptionObjects[s] = ExchangeSubscriptionObject(self, s, **pkwargs) else: self.SubscriptionObjects[s] = QueueSubscriptionObject(self, s, **pkwargs) async def main(self): self._reconnect() while True: channel, method, properties, body = await self.InboundQueue.get() try: if self.AcceptReplies and (method.routing_key == self.ReplyTo): await self.dispatch("reply", properties, body) else: await self.dispatch(method.routing_key, properties, body) except BaseException: L.exception("Error when processing inbound message") channel.basic_nack(method.delivery_tag, requeue=False) else: channel.basic_ack(method.delivery_tag) async def publish( self, body, target: str = '', content_type: str = None, content_encoding: str = None, correlation_id: str = None, reply_to: str = None, exchange: str = None ): await self.OutboundQueue.put(( exchange if exchange is not None else self.Exchange, # Where to publish target, # Routing key body, pika.BasicProperties( content_type=content_type, content_encoding=content_encoding, delivery_mode=1, correlation_id=correlation_id, reply_to=self.ReplyTo, message_id=uuid.uuid4().urn, # id app_id=self.Origin, # origin # headers = { } ) )) async def reply( self, body, reply_to: str, content_type: str = None, content_encoding: str = None, correlation_id: str = None, ): await self.OutboundQueue.put(( self.ReplyExchange, # Where to publish reply_to, # Routing key body, pika.BasicProperties( content_type=content_type, content_encoding=content_encoding, delivery_mode=1, correlation_id=correlation_id, message_id=uuid.uuid4().urn, # id app_id=self.Origin, # origin # headers = { } ) )) async def _sender_future(self, channel): if self.AcceptReplies: self.ReplyTo = await self._create_exclusive_queue(channel, "~R@" + self.Origin) while True: exchange, routing_key, body, properties = await self.OutboundQueue.get() channel.basic_publish(exchange, routing_key, body, properties) async def _create_exclusive_queue(self, channel, queue_name): lock = asyncio.Event() lock.set() def on_queue_declared(method): lock.clear() assert(method.method.queue == queue_name) self.SubscriptionObjects[queue_name] = QueueSubscriptionObject(self, queue_name) channel.queue_declare( queue=queue_name, exclusive=True, auto_delete=True, callback=on_queue_declared, ) await lock.wait() return queue_name
nilq/baby-python
python
from urllib import urlencode from django import forms from django.conf import settings from django.contrib import admin from django.core import validators from django.core.urlresolvers import resolve from django.utils.html import format_html from django.utils.translation import ugettext from olympia import amo from olympia.access import acl from olympia.amo.urlresolvers import reverse from . import models class AddonAdmin(admin.ModelAdmin): class Media: css = { 'all': ('css/admin/l10n.css',) } js = ('js/admin/l10n.js',) exclude = ('authors',) list_display = ('__unicode__', 'type', 'status', 'average_rating') list_filter = ('type', 'status') fieldsets = ( (None, { 'fields': ('name', 'guid', 'default_locale', 'type', 'status'), }), ('Details', { 'fields': ('summary', 'description', 'homepage', 'eula', 'privacy_policy', 'developer_comments', 'icon_type', ), }), ('Support', { 'fields': ('support_url', 'support_email'), }), ('Stats', { 'fields': ('average_rating', 'bayesian_rating', 'total_ratings', 'text_ratings_count', 'weekly_downloads', 'total_downloads', 'average_daily_users'), }), ('Truthiness', { 'fields': ('disabled_by_user', 'view_source', 'requires_payment', 'public_stats', 'is_experimental', 'external_software', 'dev_agreement'), }), ('Dictionaries', { 'fields': ('target_locale', 'locale_disambiguation'), })) def queryset(self, request): return models.Addon.unfiltered class FeatureAdmin(admin.ModelAdmin): raw_id_fields = ('addon',) list_filter = ('application', 'locale') list_display = ('addon', 'application', 'locale') class FrozenAddonAdmin(admin.ModelAdmin): raw_id_fields = ('addon',) class CompatOverrideRangeInline(admin.TabularInline): model = models.CompatOverrideRange # Exclude type since firefox only supports blocking right now. exclude = ('type',) class CompatOverrideAdminForm(forms.ModelForm): def clean(self): if '_confirm' in self.data: raise forms.ValidationError('Click "Save" to confirm changes.') return self.cleaned_data class CompatOverrideAdmin(admin.ModelAdmin): raw_id_fields = ('addon',) inlines = [CompatOverrideRangeInline] form = CompatOverrideAdminForm class ReplacementAddonForm(forms.ModelForm): def clean_path(self): path = None try: path = self.data.get('path') site = settings.SITE_URL if models.ReplacementAddon.path_is_external(path): if path.startswith(site): raise forms.ValidationError( 'Paths for [%s] should be relative, not full URLs ' 'including the domain name' % site) validators.URLValidator()(path) else: path = ('/' if not path.startswith('/') else '') + path resolve(path) except forms.ValidationError as validation_error: # Re-raise the ValidationError about full paths for SITE_URL. raise validation_error except Exception: raise forms.ValidationError('Path [%s] is not valid' % path) return path class ReplacementAddonAdmin(admin.ModelAdmin): list_display = ('guid', 'path', 'guid_slug', '_url') form = ReplacementAddonForm def _url(self, obj): guid_param = urlencode({'guid': obj.guid}) return format_html( '<a href="{}">Test</a>', reverse('addons.find_replacement') + '?%s' % guid_param) def guid_slug(self, obj): try: slug = models.Addon.objects.get(guid=obj.guid).slug except models.Addon.DoesNotExist: slug = ugettext(u'- Add-on not on AMO -') return slug def has_module_permission(self, request): # If one can see the changelist, then they have access to the module. return self.has_change_permission(request) def has_change_permission(self, request, obj=None): # If an obj is passed, then we're looking at the individual change page # for a replacement addon, otherwise we're looking at the list. When # looking at the list, we also allow users with Addons:Edit - they # won't be able to make any changes but they can see the list. if obj is not None: return super(ReplacementAddonAdmin, self).has_change_permission( request, obj=obj) else: return ( acl.action_allowed(request, amo.permissions.ADDONS_EDIT) or super(ReplacementAddonAdmin, self).has_change_permission( request, obj=obj)) admin.site.register(models.DeniedGuid) admin.site.register(models.Addon, AddonAdmin) admin.site.register(models.FrozenAddon, FrozenAddonAdmin) admin.site.register(models.CompatOverride, CompatOverrideAdmin) admin.site.register(models.ReplacementAddon, ReplacementAddonAdmin)
nilq/baby-python
python
def ext_gcd(p, q): if p == 0: return q, 0, 1 else: # gcd, s_i, t_i gcd, u, v = ext_gcd(q % p, p) return gcd, v - (q // p) * u, u p = 240 q = 46 gcd, u, v = ext_gcd(p, q) print("[+] GCD: {}".format(gcd)) print("[+] u,v: {},{}".format(u,v)) print(f"\n[*] FLAG: crypto{{{u},{v}}}")
nilq/baby-python
python
# Generated by Django 2.2.6 on 2019-11-21 17:17 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('selections', '0009_auto_20190529_0937'), ] operations = [ migrations.AlterField( model_name='selection', name='is_no_target', field=models.BooleanField(default=False, verbose_name='This fragment does not contain a target'), ), migrations.AlterField( model_name='selection', name='tense', field=models.CharField(blank=True, max_length=200, verbose_name='Tense'), ), ]
nilq/baby-python
python
import urllib import time def main(request, response): index = request.request_path.index("?") args = request.request_path[index+1:].split("&") headersSent = 0 for arg in args: if arg.startswith("ignored"): continue elif arg.endswith("ms"): time.sleep(float(arg[0:-2]) / 1E3); elif arg.startswith("redirect:"): return (302, "WEBPERF MARKETING"), [("Location", urllib.unquote(arg[9:]))], "TEST" elif arg.startswith("mime:"): response.headers.set("Content-Type", urllib.unquote(arg[5:])) elif arg.startswith("send:"): text = urllib.unquote(arg[5:]) if headersSent == 0: response.write_status_headers() headersSent = 1 response.writer.write_content(text) # else: # error " INVALID ARGUMENT %s" % arg
nilq/baby-python
python
import dotenv from pathlib import Path from .exceptions import EnvKeyNotFoundError, EnvNotFoundError BASE_PATH = Path(__file__).resolve().parent.parent if not (ENV := dotenv.dotenv_values(BASE_PATH / '.env')): raise EnvNotFoundError() if not (BOT_CLIENT_TOKEN := ENV.get((key := 'BOT_CLIENT_TOKEN'))): raise EnvKeyNotFoundError(key) DISCORD_API_ROOT = 'https://discord.com/api/v8/' DISCORD_OAUTH_ROOT = DISCORD_API_ROOT + 'oauth2/' DATABASE_PATH = BASE_PATH / 'db.sqlite3'
nilq/baby-python
python
from django.db import models class Customer(models.Model): id = models.AutoField(primary_key=True, null=False) name = models.CharField(max_length=200, null=False) keyAPI = models.CharField(max_length=200, null=False) pathTrainingDataSet = models.CharField(max_length=1000, null=True) status = models.BooleanField(default=1, null=False) class Meta: db_table = "Customer" class User(models.Model): id = models.AutoField(primary_key=True, null=False) identificationProfileId = models.CharField(max_length=200, null=False) pathNN = models.CharField(max_length=1000, null=True) status = models.BooleanField(default=1, null=False) idCostumer = models.ForeignKey(Customer, on_delete=models.DO_NOTHING) class Meta: db_table = "User" # class Choice(models.Model): # question = models.ForeignKey(Question, on_delete=models.CASCADE) # choice_text = models.CharField(max_length=200) # votes = models.IntegerField(default=0) # rating = models.CharField(max_length=400, default='some string') # def __str__(self): # return self.choice_text # Create your models here.
nilq/baby-python
python
#!/usr/bin/env python # -*- coding:utf-8 -*- import os import configparser import nbformat from .static_text import Common, EvasionAttack # Type of printing. OK = 'ok' # [*] NOTE = 'note' # [+] FAIL = 'fail' # [-] WARNING = 'warn' # [!] NONE = 'none' # No label. # Create report. class IpynbReport: def __init__(self, utility): self.utility = utility self.report_util = None # Read config file. config = configparser.ConfigParser() self.file_name = os.path.basename(__file__) self.full_path = os.path.dirname(os.path.abspath(__file__)) self.root_path = os.path.join(self.full_path, '../') config.read(os.path.join(self.root_path, 'config.ini')) # model/dataset path. self.model_path = '' self.dataset_path = '' self.label_path = '' self.dataset_num = 0 # Create common part. def create_common(self, nb): self.utility.print_message(OK, 'Creating common part...') # Introduction. if self.lang == 'en': nb['cells'] = [ nbformat.v4.new_markdown_cell(Common.md_report_title.value), nbformat.v4.new_markdown_cell(Common.md_1_1_title.value), nbformat.v4.new_markdown_cell(Common.md_1_1_text.value), nbformat.v4.new_markdown_cell(Common.md_1_2_title.value), nbformat.v4.new_markdown_cell(Common.md_1_2_text.value) ] else: nb['cells'] = [ nbformat.v4.new_markdown_cell(Common.md_report_title.value), nbformat.v4.new_markdown_cell(Common.md_1_1_title_ja.value), nbformat.v4.new_markdown_cell(Common.md_1_1_text_ja.value), nbformat.v4.new_markdown_cell(Common.md_1_2_title_ja.value), nbformat.v4.new_markdown_cell(Common.md_1_2_text_ja.value) ] # Preparation if self.lang == 'en': nb['cells'].extend([nbformat.v4.new_markdown_cell(Common.md_2_title.value), nbformat.v4.new_markdown_cell(Common.md_2_text.value), nbformat.v4.new_markdown_cell(Common.md_2_1_title.value), nbformat.v4.new_markdown_cell(Common.md_2_1_text.value), nbformat.v4.new_code_cell(Common.cd_2_1_code.value), nbformat.v4.new_markdown_cell(Common.md_2_2_title.value), nbformat.v4.new_markdown_cell(Common.md_2_2_text.value), nbformat.v4.new_code_cell(Common.cd_2_2_code.value.format(self.dataset_path, self.dataset_num, self.label_path)), nbformat.v4.new_markdown_cell(Common.md_2_3_title.value), nbformat.v4.new_markdown_cell(Common.md_2_3_text.value), nbformat.v4.new_code_cell(Common.cd_2_3_code.value.format(self.model_path)), nbformat.v4.new_markdown_cell(Common.md_2_4_title.value), nbformat.v4.new_markdown_cell(Common.md_2_4_text.value), nbformat.v4.new_code_cell(Common.cd_2_4_code.value), ]) else: nb['cells'].extend([nbformat.v4.new_markdown_cell(Common.md_2_title_ja.value), nbformat.v4.new_markdown_cell(Common.md_2_text_ja.value), nbformat.v4.new_markdown_cell(Common.md_2_1_title_ja.value), nbformat.v4.new_markdown_cell(Common.md_2_1_text_ja.value), nbformat.v4.new_code_cell(Common.cd_2_1_code_ja.value), nbformat.v4.new_markdown_cell(Common.md_2_2_title_ja.value), nbformat.v4.new_markdown_cell(Common.md_2_2_text_ja.value), nbformat.v4.new_code_cell(Common.cd_2_2_code_ja.value.format(self.dataset_path, self.dataset_num, self.label_path)), nbformat.v4.new_markdown_cell(Common.md_2_3_title_ja.value), nbformat.v4.new_markdown_cell(Common.md_2_3_text_ja.value), nbformat.v4.new_code_cell(Common.cd_2_3_code_ja.value.format(self.model_path)), nbformat.v4.new_markdown_cell(Common.md_2_4_title_ja.value), nbformat.v4.new_markdown_cell(Common.md_2_4_text_ja.value), nbformat.v4.new_code_cell(Common.cd_2_4_code_ja.value), ]) self.utility.print_message(OK, 'Done creating common part.') return nb # Create evasion (FGSM) part. def create_evasion_fgsm(self, nb, aes_path): self.utility.print_message(OK, 'Creating Evasion (FGSM) part...') # FGSM. if self.lang == 'en': nb['cells'].extend([nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_title.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_text.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_1_title.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_1_text.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_2_title.value), nbformat.v4.new_code_cell(EvasionAttack.cd_ae_fgsm_2_code.value.format(aes_path)), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_3_title.value), nbformat.v4.new_code_cell(EvasionAttack.cd_ae_fgsm_3_code.value.format(self.dataset_num)), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_4_title.value), nbformat.v4.new_code_cell(EvasionAttack.cd_ae_fgsm_4_code.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_5_title.value), nbformat.v4.new_code_cell(EvasionAttack.cd_ae_fgsm_5_code.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_6_title.value), nbformat.v4.new_code_cell(EvasionAttack.cd_ae_fgsm_6_code.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_7_title.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_7_text.value), ]) else: nb['cells'].extend([nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_title_ja.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_text_ja.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_1_title_ja.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_1_text_ja.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_2_title_ja.value), nbformat.v4.new_code_cell(EvasionAttack.cd_ae_fgsm_2_code_ja.value.format(aes_path)), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_3_title_ja.value), nbformat.v4.new_code_cell( EvasionAttack.cd_ae_fgsm_3_code_ja.value.format(self.dataset_num)), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_4_title_ja.value), nbformat.v4.new_code_cell(EvasionAttack.cd_ae_fgsm_4_code_ja.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_5_title_ja.value), nbformat.v4.new_code_cell(EvasionAttack.cd_ae_fgsm_5_code_ja.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_6_title_ja.value), nbformat.v4.new_code_cell(EvasionAttack.cd_ae_fgsm_6_code_ja.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_7_title_ja.value), nbformat.v4.new_markdown_cell(EvasionAttack.md_ae_fgsm_7_text_ja.value), ]) self.utility.print_message(OK, 'Done Evasion (FGSM) part...') return nb # Create report. def create_report(self): self.utility.print_message(NOTE, 'Creating report...') nb = nbformat.v4.new_notebook() # Report Setting. self.model_path = self.report_util.template_target['model_path'] self.dataset_path = self.report_util.template_target['dataset_path'] self.label_path = self.report_util.template_target['label_path'] self.dataset_num = self.report_util.template_target['dataset_num'] # Create common part. nb = self.create_common(nb) # Create replay part. report_name = '' report_full_path = '' if self.report_util.template_data_poisoning['exist']: self.utility.print_message(WARNING, 'Not implementation.') elif self.report_util.template_model_poisoning['exist']: self.utility.print_message(WARNING, 'Not implementation.') elif self.report_util.template_evasion['exist']: if self.report_util.template_evasion['fgsm']['exist']: # Create FGSM. report_name = 'evasion_fgsm.ipynb' nb = self.create_evasion_fgsm(nb, self.report_util.template_evasion['fgsm']['aes_path']) report_full_path = os.path.join(self.report_util.report_path, report_name) with open(report_full_path, 'w') as fout: nbformat.write(nb, fout) self.report_util.template_evasion['fgsm']['ipynb_path'] = report_full_path if self.report_util.template_evasion['cnw']['exist']: # Create C&W. self.utility.print_message(WARNING, 'Not implementation.') if self.report_util.template_evasion['jsma']['exist']: # Create JSMA. self.utility.print_message(WARNING, 'Not implementation.') elif self.report_util.template_exfiltration['exist']: self.utility.print_message(WARNING, 'Not implementation.') self.utility.print_message(NOTE, 'Done creating report.') return self.report_util, report_name
nilq/baby-python
python
""" pygments.lexers.email ~~~~~~~~~~~~~~~~~~~~~ Lexer for the raw E-mail. :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, DelegatingLexer, bygroups from pygments.lexers.mime import MIMELexer from pygments.token import Text, Keyword, Name, String, Number, Comment from pygments.util import get_bool_opt __all__ = ["EmailLexer"] class EmailHeaderLexer(RegexLexer): """ Sub-lexer for raw E-mail. This lexer only process header part of e-mail. .. versionadded:: 2.5 """ def __init__(self, **options): super().__init__(**options) self.highlight_x = get_bool_opt(options, "highlight-X-header", False) def get_x_header_tokens(self, match): if self.highlight_x: # field yield match.start(1), Name.Tag, match.group(1) # content default_actions = self.get_tokens_unprocessed( match.group(2), stack=("root", "header")) yield from default_actions else: # lowlight yield match.start(1), Comment.Special, match.group(1) yield match.start(2), Comment.Multiline, match.group(2) tokens = { "root": [ (r"^(?:[A-WYZ]|X400)[\w\-]*:", Name.Tag, "header"), (r"^(X-(?:\w[\w\-]*:))([\s\S]*?\n)(?![ \t])", get_x_header_tokens), ], "header": [ # folding (r"\n[ \t]", Text.Whitespace), (r"\n(?![ \t])", Text.Whitespace, "#pop"), # keywords (r"\bE?SMTPS?\b", Keyword), (r"\b(?:HE|EH)LO\b", Keyword), # mailbox (r"[\w\.\-\+=]+@[\w\.\-]+", Name.Label), (r"<[\w\.\-\+=]+@[\w\.\-]+>", Name.Label), # domain (r"\b(\w[\w\.-]*\.[\w\.-]*\w[a-zA-Z]+)\b", Name.Function), # IPv4 ( r"(?<=\b)(?:(?:25[0-5]|2[0-4][0-9]|1?[0-9][0-9]?)\.){3}(?:25[0" r"-5]|2[0-4][0-9]|1?[0-9][0-9]?)(?=\b)", Number.Integer, ), # IPv6 (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,7}:(?!\b)", Number.Hex), (r"(?<=\b):((:[0-9a-fA-F]{1,4}){1,7}|:)(?=\b)", Number.Hex), (r"(?<=\b)([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}(?=\b)", Number.Hex), (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}(?=\b)", Number.Hex), (r"(?<=\b)[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})(?=\b)", Number.Hex), (r"(?<=\b)fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}(?=\b)", Number.Hex), (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}(?=\b)", Number.Hex), (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}(?=\b)", Number.Hex), (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}(?=\b)", Number.Hex), (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}(?=\b)", Number.Hex), ( r"(?<=\b)::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}" r"[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}" r"[0-9])(?=\b)", Number.Hex, ), ( r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-" r"9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-" r"9])(?=\b)", Number.Hex, ), # Date time ( r"(?:(Sun|Mon|Tue|Wed|Thu|Fri|Sat),\s+)?(0[1-9]|[1-2]?[0-9]|3[" r"01])\s+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+(" r"19[0-9]{2}|[2-9][0-9]{3})\s+(2[0-3]|[0-1][0-9]):([0-5][0-9])" r"(?::(60|[0-5][0-9]))?(?:\.\d{1,5})?\s+([-\+][0-9]{2}[0-5][0-" r"9]|\(?(?:UTC?|GMT|(?:E|C|M|P)(?:ST|ET|DT)|[A-IK-Z])\)?)", Name.Decorator, ), # RFC-2047 encoded string ( r"(=\?)([\w-]+)(\?)([BbQq])(\?)([\[\w!\"#$%&\'()*+,-./:;<=>@[\\" r"\]^_`{|}~]+)(\?=)", bygroups( String.Affix, Name.Constant, String.Affix, Keyword.Constant, String.Affix, Number.Hex, String.Affix ) ), # others (r'[\s]+', Text.Whitespace), (r'[\S]', Text), ], } class EmailLexer(DelegatingLexer): """ Lexer for raw E-mail. Additional options accepted: `highlight-X-header` Highlight the fields of ``X-`` user-defined email header. (default: ``False``). .. versionadded:: 2.5 """ name = "E-mail" aliases = ["email", "eml"] filenames = ["*.eml"] mimetypes = ["message/rfc822"] def __init__(self, **options): super().__init__(EmailHeaderLexer, MIMELexer, Comment, **options)
nilq/baby-python
python
# import dota_utils as util import os # import cv2 import json # from PIL import Image import xmltodict import xml.etree.ElementTree as ET # from ShipRSImageNet_devkit import ShipRSImageNet_utils as util # from collections import OrderedDict wordname_50 = ['Other Ship', 'Other Warship', 'Submarine', 'Other Aircraft Carrier', 'Enterprise', 'Nimitz', 'Midway', 'Ticonderoga', 'Other Destroyer', 'Atago DD', 'Arleigh Burke DD', 'Hatsuyuki DD', 'Hyuga DD', 'Asagiri DD', 'Other Frigate', 'Perry FF', 'Patrol', 'Other Landing', 'YuTing LL', 'YuDeng LL', 'YuDao LL', 'YuZhao LL', 'Austin LL', 'Osumi LL', 'Wasp LL', 'LSD 41 LL', 'LHA LL', 'Commander', 'Other Auxiliary Ship', 'Medical Ship', 'Test Ship', 'Training Ship', 'AOE', 'Masyuu AS', 'Sanantonio AS', 'EPF', 'Other Merchant', 'Container Ship', 'RoRo', 'Cargo', 'Barge', 'Tugboat', 'Ferry', 'Yacht', 'Sailboat', 'Fishing Vessel', 'Oil Tanker', 'Hovercraft', 'Motorboat', 'Dock'] # wordname_50 = ['Other Ship', 'Other Warship', 'Submarine', 'Other Aircraft Carrier', 'Enterprise', 'Nimitz', 'Midway', # 'Ticonderoga', # 'Other Destroyer', 'Atago DD', 'Arleigh Burke DD', 'Hatsuyuki DD', 'Hyuga DD', 'Asagiri DD', 'Frigate', # 'Perry FF', # 'Patrol', 'Other Landing', 'YuTing LL', 'YuDeng LL', 'YuDao LL', 'YuZhao LL', 'Austin LL', 'Osumi LL', # 'Wasp LL', 'LSD 41 LL', 'LHA LL', 'Commander', 'Other Auxiliary Ship', 'Medical Ship', 'Test Ship', # 'Training Ship', # 'AOE', 'Masyuu AS', 'Sanantonio AS', 'EPF', 'Other Merchant', 'Container Ship', 'RoRo', 'Cargo', # 'Barge', 'Tugboat', 'Ferry', 'Yacht', 'Sailboat', 'Fishing Vessel', 'Oil Tanker', 'Hovercraft', # 'Motorboat', 'Dock'] def ShipImageNet2COCOTrain(filenames, destfile, cls_names, level_num): # set difficult to filter '2', '1', or do not filter, set '-1' # imageparent = os.path.join(srcpath, 'JPEGImages') # labelparent = .path.join(srcpath, 'Annotations_v2') if level_num == 3: level_class = 'level_3' elif level_num == 2: level_class = 'level_2' elif level_num == 1: level_class = 'level_1' else: level_class = 'level_0' data_dict = {} data_dict['images'] = [] data_dict['categories'] = [] data_dict['annotations'] = [] for idex, name in enumerate(cls_names): single_cat = {'id': idex + 1, 'name': name, 'supercategory': name} data_dict['categories'].append(single_cat) inst_count = 1 image_id = 1 with open(destfile, 'w') as f_out: # filenames = util.GetFileFromThisRootDir(labelparent) for file in filenames: doc = xmltodict.parse(open(file).read()) tree = ET.parse(file) root = tree.getroot() single_image = {} single_image['file_name'] = str(doc['annotation']['filename']) single_image['id'] = image_id single_image['width'] = int(doc['annotation']['size']['width']) single_image['height'] = int(doc['annotation']['size']['height']) # print(single_image) data_dict['images'].append(single_image) # annotations for obj in root.iter('object'): single_obj = {} single_obj['area'] = float(obj.find('Ship_area').text) single_obj['category_id'] = int(obj.find(level_class).text) single_obj['segmentation'] = [] x1 = float(obj.find('polygon').find("x1").text) y1 = float(obj.find('polygon').find("y1").text) x2 = float(obj.find('polygon').find("x2").text) y2 = float(obj.find('polygon').find("y2").text) x3 = float(obj.find('polygon').find("x3").text) y3 = float(obj.find('polygon').find("y3").text) x4 = float(obj.find('polygon').find("x4").text) y4 = float(obj.find('polygon').find("y4").text) single_obj['segmentation'] = x1, y1, x2, y2, x3, y3, x4, y4 single_obj['iscrowd'] = 0 xmin = int(obj.find('bndbox').find("xmin").text) ymin = int(obj.find('bndbox').find("ymin").text) xmax = int(obj.find('bndbox').find("xmax").text) ymax = int(obj.find('bndbox').find("ymax").text) width, height = xmax - xmin, ymax - ymin # 计算旋转矩形框旋转角度 # roted_box = util.polygonToRotRectangle([x1,y1,x2,y2,x3,y3,x4,y4]) # xcenter,ycenter,width,height,angle = roted_box single_obj['bbox'] = xmin,ymin,width,height single_obj['image_id'] = image_id data_dict['annotations'].append(single_obj) single_obj['id'] = inst_count inst_count = inst_count + 1 image_id = image_id + 1 json.dump(data_dict, f_out) print('Total Instances:',image_id) def ShipImageNet2COCOTest(filenames, destfile, cls_names): # imageparent = os.path.join(srcpath, 'JPEGImages') data_dict = {} data_dict['images'] = [] data_dict['categories'] = [] for idex, name in enumerate(cls_names): single_cat = {'id': idex + 1, 'name': name, 'supercategory': name} data_dict['categories'].append(single_cat) image_id = 1 with open(destfile, 'w') as f_out: # filenames = util.GetFileFromThisRootDir(labelparent) for file in filenames: doc = xmltodict.parse(open(file).read()) single_image = {} single_image['file_name'] = str(doc['annotation']['filename']) single_image['id'] = image_id single_image['width'] = int(doc['annotation']['size']['width']) single_image['height'] = int(doc['annotation']['size']['height']) data_dict['images'].append(single_image) image_id = image_id + 1 json.dump(data_dict, f_out) def get_filenames(rootdir, file_dir, set_name): dataset_name = set_name + '.txt' File = os.path.join(text_dir, dataset_name) filenames = list() level_num = 3 with open(File, "rb") as f: for line in f: fileName = str(line.strip(), encoding="utf-8") # print(fileName) fle_xml = fileName.replace('.bmp', '.xml') annotation_path = os.path.join(rootdir, fle_xml) filenames.append(annotation_path) return filenames if __name__ == '__main__': rootdir = '/home/ssd/dataset/ShipRSImageNet/VOC_Format/Annotations/' text_dir = '/home/ssd/dataset/ShipRSImageNet/VOC_Format/ImageSets/' out_dir = '/home/zzn/Documents/zhangzhn_workspace/pycharm/ship_dataset/COCO_Format/' level_num = 0 if not os.path.exists(out_dir): os.makedirs(out_dir) train_filenames = get_filenames(rootdir, text_dir, 'train') val_filenames = get_filenames(rootdir, text_dir, 'val') test_filenames = get_filenames(rootdir, text_dir, 'test') # print(train_filenames) # print('\n') train_json_file_name = "{}ShipRSImageNet_bbox_train_level_{}.json".format(out_dir, level_num) val_json_file_name = "{}ShipRSImageNet_bbox_val_level_{}.json".format(out_dir, level_num) test_json_file_name = "{}ShipRSImageNet_bbox_test_level_{}.json".format(out_dir, level_num) ShipImageNet2COCOTrain(train_filenames, train_json_file_name, wordname_50, level_num) ShipImageNet2COCOTrain(val_filenames, val_json_file_name, wordname_50, level_num) ShipImageNet2COCOTest(test_filenames, test_json_file_name, wordname_50) print('Finished')
nilq/baby-python
python
from draw2d import Viewer, Text, Line, Rectangle, Frame, Point, Circle import math, time, random viewer = Viewer(600,600) W = 1.0 F = viewer.frame(0., W, 0., W) F.add(Text("North", anchor_x="center", anchor_y="top", color=(0.2,0.2,1.0)).move_to(0.5,0.9)) F.add(Text("South", anchor_x="center", anchor_y="bottom", color=(1.0,1.0,0.1)).move_to(0.5,0.1)) F.add(Text("East", anchor_x="right", anchor_y="center", color=(0.2,1.0,1.0)).move_to(0.9,0.5)) F.add(Text("West", anchor_x="left", anchor_y="center", color=(1.0,0.2,0.1)).move_to(0.1,0.5)) fly = Frame() fly.add(Circle(radius=0.01).color(1,1,1)) label = Text("").move_to(0.01, 0.01) vlabel = Text("", rotation=0.0, anchor_x="left", anchor_y="center").move_to(0.02, 0.0) fly.add(label) fly.add(vlabel) F.add(fly, "fly") x, y = random.random(), random.random() vx, vy = 0.0, 0.0 vmax = 0.5 r = random.random() omega = 0.0 max_omega = 0.1 tau = 0.1 while True: x += vx * tau y += vy * tau r += omega * tau if x < 0.0 or x > W: vx = -vx*0.8 if y < 0.0 or y > W: vy = -vy*0.8 x = max(0.0, min(W, x)) y = max(0.0, min(W, y)) ax, ay = (2*random.random()-1)*vmax/10, (2*random.random()-1)*vmax/10 vx += ay * tau vy += ay * tau vx = max(-vmax, min(vmax, vx)) vy = max(-vmax, min(vmax, vy)) omega += (2*random.random()-1)*max_omega/10 omega = max(max_omega, min(-max_omega, omega)) fly.move_to(x, y).rotate_to(r) label.Text = "[xy: %.3f:%.3f]" % (x,y) vlabel.Text = "[vxy: %.3f:%.3f]" % (vx,vy) viewer.render() time.sleep(tau)
nilq/baby-python
python
class Student(): # 类变量 # name = '' sum = 0 age = 0 def __init__(self, name, age): # 实例变量 self.name = name self.age = age self.__score = 0 # print(name) # xiaoming # print(age) # 18 print(Student.age) print(self.__class__.age) self.__class__.sum += 1 print('当前学生总数为:' + str(self.__class__.sum)) def say(self): print('my name is ' + self.name + 'my age is ' + str(self.age)) self.__score = 10 self.__dohomework() # 类方法 cls -- 同时可以被实例和类调用 @classmethod def plus_sum(cls): print(cls.sum) # 静态方法 -- 同时可以被实例和类调用 @staticmethod def add(x, y): print(Student.sum + x + y) def __dohomework(self): print('homework') # 公开的 public # 私有的 private 加 __ 设置为私有 # 方法私有化后,外部访问会报错,而属性却没有,原因是:由于python语言特性,其实是动态添加了一个新的属性
nilq/baby-python
python
import os import pytest from ci_framework import FlopyTestSetup, base_test_dir import flopy base_dir = base_test_dir(__file__, rel_path="temp", verbose=True) pthtest = os.path.join("..", "examples", "data", "swtv4_test") swtv4_exe = "swtv4" isswtv4 = flopy.which(swtv4_exe) runmodel = False verbose = False swtdir = [ "1_box", "1_box", "2_henry", "2_henry", "2_henry", "2_henry", "2_henry", "2_henry", "3_elder", "4_hydrocoin", "5_saltlake", "6_rotation", "6_rotation", "7_swtv4_ex", "7_swtv4_ex", "7_swtv4_ex", "7_swtv4_ex", "7_swtv4_ex", "7_swtv4_ex", "7_swtv4_ex", ] subds = [ "case1", "case2", "1_classic_case1", "2_classic_case2", "3_VDF_no_Trans", "4_VDF_uncpl_Trans", "5_VDF_DualD_Trans", "6_age_simulation", "", "", "", "1_symmetric", "2_asymmetric", "case1", "case2", "case3", "case4", "case5", "case6", "case7", ] def test_seawat_array_format(): test_setup = FlopyTestSetup(verbose=True) d = "2_henry" subds = ["1_classic_case1"] for subd in subds: pth = os.path.join(pthtest, d, subd) model_ws = os.path.join( f"{base_dir}_test_seawat_array_format_{d}-{subd}" ) test_setup.add_test_dir(model_ws) namfile = "seawat.nam" if subd == "6_age_simulation": namfile = "henry_mod.nam" m = flopy.seawat.Seawat.load(namfile, model_ws=pth, verbose=verbose) m.change_model_ws(model_ws, reset_external=True) m.bcf6.hy[0].fmtin = "(BINARY)" m.btn.prsity[0].fmtin = "(BINARY)" m.write_input() if isswtv4 is not None and runmodel: success, buff = m.run_model(silent=False) assert success, f"{m.name} did not run" return @pytest.mark.parametrize( "d, subd", zip(swtdir, subds), ) def test_swtv4(d, subd): run_swtv4(d, subd) return def run_swtv4(d, subd): test_setup = FlopyTestSetup(verbose=True) model_ws = os.path.join(f"{base_dir}_test_swtv4_{d}-{subd}") test_setup.add_test_dir(model_ws) # set up paths pth = os.path.join(pthtest, d, subd) namfile = "seawat.nam" if subd == "6_age_simulation": namfile = "henry_mod.nam" # load the existing model m = flopy.seawat.swt.Seawat.load(namfile, model_ws=pth, verbose=verbose) # change working directory m.change_model_ws(model_ws) # write input files m.write_input() # run the model if isswtv4 is not None and runmodel: success, buff = m.run_model(silent=False) assert success, f"{m.name} did not run" if __name__ == "__main__": for d, subd in zip(swtdir, subds): run_swtv4(d, subd) test_seawat_array_format()
nilq/baby-python
python
from ursina import * from model.pion import PionBlanc, PionNoir class VuePion(Entity): def __init__(self, position, qubic, *args, **kwargs): self.qubic = qubic super().__init__( position=position, *args, **kwargs ) class VuePionFactory: def __init__(self, qubic, pion='Classic'): """ Args: pion: le type de pion (le skin) qubic: le qubic """ super().__init__() pion_types = {'Classic': self.create_classic} self.create_pion = pion_types.get(pion) self.qubic = qubic def create_classic(self, position, **kwargs): vp = VuePion(position, self.qubic, model='classic', origin=(0, -0.5), # texture='classic', **kwargs) vp.scale = 0.5 vp.y = vp.y * vp.scale[1] if self.qubic.get_pion(position) == PionBlanc: vp.color = color.white elif self.qubic.get_pion(position) == PionNoir: vp.color = color.dark_gray else: vp.color = color.black50 return vp
nilq/baby-python
python
import db_handler ZONE_MAPPING = { 27721: 3, 27767: 9, -2: 7, 45041: 8, 27723: 3, -6: 5, 27724: 5, 115_092: 5, 33130: 5, 27770: 2, 27726: 5, 61204: 4, 117_928: 4, 30754: 9, 35673: 8, 27774: 8, 27775: 8, 110_924: 8, 130_226: 12, 27779: 12, 33401: 12, 27730: 3, -7: 3, 27781: 7, 30407: 12, 27783: 12, 27784: 11, 104_884: 1, 27746: 3, 57333: 10, 29349: 7, 29192: 7, 122_767: 3, 27790: 2, 27791: 12, 30913: 9, 27745: 2, 27792: 9, 29725: 12, 27788: 11, 27747: 1, 27796: 7, 27748: 6, 27750: 2, 39796: 6, 48149: 4, 27753: 6, 116_362: 7, 27814: 8, 27754: 6, 111_197: 11, 29439: 11, 27804: 11, 27805: 11, 27807: 10, 97579: 2, 105_249: 4, 52963: 4, 27757: 4, 45984: 10, 46497: 7, 109_971: 1, 27759: 1, 27760: 1, 29586: 10, 102_613: 10, 29241: 5, 27764: 2, 27742: 3, 27812: 11, 128_919: 7, 27766: 3, 27816: 7, 44342: 1, 27818: 9, 46134: 1, } def get_zones(struct, year=2020): data = db_handler.get_data_object_from_db(year, struct) clubs = [ c for c in data.get_district_clubs(include_officers=False) if not c.is_closed ] clubs.sort(key=lambda x: x.name) d = {} for club in clubs: z_id = int(input(f"{club.name}: ")) d[club.id] = z_id insert_zone_mapping(d, struct, year) def insert_zone_mapping(mapping, struct, year=2020): data = db_handler.get_data_object_from_db(year, struct) tc = data.db.tables["clubzone"] vals = [{"year": 2020, "club_id": k, "zone_id": 45 + v} for (k, v) in mapping.items()] data.db.conn.execute(tc.insert(vals)) get_zones("410W") # insert_zone_mapping("410W")
nilq/baby-python
python
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from types import SimpleNamespace from typing import Dict from playwright.connection import ChannelOwner class BrowserServer(ChannelOwner): Events = SimpleNamespace(Close="close",) def __init__( self, parent: ChannelOwner, type: str, guid: str, initializer: Dict ) -> None: super().__init__(parent, type, guid, initializer) self._channel.on("close", lambda _: self.emit(BrowserServer.Events.Close)) @property def pid(self) -> str: return self._initializer["pid"] @property def wsEndpoint(self) -> str: return self._initializer["wsEndpoint"] async def kill(self) -> None: await self._channel.send("kill") async def close(self) -> None: await self._channel.send("close")
nilq/baby-python
python
# -*- test-case-name: mimic.test.test_cinder -*- """ Defines a mock for Cinder """ import json from uuid import uuid4 from six import text_type from zope.interface import implementer from twisted.plugin import IPlugin from mimic.rest.mimicapp import MimicApp from mimic.catalog import Entry from mimic.catalog import Endpoint from mimic.imimic import IAPIMock @implementer(IAPIMock, IPlugin) class CinderApi(object): """ Rest endpoints for mocked Cinder Api. """ def __init__(self, regions=["DFW", "ORD", "IAD"]): """ Create a CinderApi. """ self._regions = regions def catalog_entries(self, tenant_id): """ List catalog entries for the Cinder API. """ return [ Entry( tenant_id, "volume", "cloudBlockStorage", [ Endpoint(tenant_id, region, text_type(uuid4()), prefix="v2") for region in self._regions ] ) ] def resource_for_region(self, region, uri_prefix, session_store): """ Get an :obj:`twisted.web.iweb.IResource` for the given URI prefix; implement :obj:`IAPIMock`. """ return CinderMock(self, uri_prefix, session_store, region).app.resource() class CinderMock(object): """ DNS Mock """ def __init__(self, api_mock, uri_prefix, session_store, name): """ Create a Cinder region with a given URI prefix """ self.uri_prefix = uri_prefix self._api_mock = api_mock self._session_store = session_store self._name = name app = MimicApp() @app.route('/v2/<string:tenant_id>/volumes', methods=['GET']) def get_volumes(self, request, tenant_id): """ Lists summary information for all Block Storage volumes that the tenant can access. http://developer.openstack.org/api-ref-blockstorage-v2.html#getVolumesSimple """ request.setResponseCode(200) return json.dumps({'volumes': []})
nilq/baby-python
python
from sklearn.metrics import classification_report import pandas as pd import tests.test_utils as t import unittest from nlu import * class SentimentTrainingTests(unittest.TestCase): def test_sentiment_training(self): #sentiment datase df_train = self.load_sentiment_dl_dataset()#'/home/loan/Documents/freelancework/jsl/nlu/4realnlugit/tests/datasets/sentiment_dl/AllProductReviews.csv' print(df_train.columns) #convert int to str labels so our model predicts strings not numbers # the text data to use for classification should be in a column named 'text' df_train['text'] = df_train['text_data'] # the label column must have name 'y' name be of type str df_train['y'] = df_train['Sentiment'].astype(str) df_train.y = df_train.y.astype(str) df_train.y = df_train.y.str.replace('-1','negative') df_train.y = df_train.y.str.replace('1','positive') df_train=df_train.iloc[0:100] pipe = nlu.load('train.sentiment',verbose=True) pipe = pipe.fit(df_train) df = pipe.predict(df_train) print(df) print(df.columns) print(df) print(df.columns) for c in df.columns : print (df[c]) # print(df[['sentiment','sentiment_confidence']]) # print(df.sentiment.value_counts()) # print(df.sentiment_confidence.value_counts()) def test_sentiment_training_with_custom_embeds_document_level(self): #sentiment datase df_train = self.load_sentiment_dl_dataset() # the text data to use for classification should be in a column named 'text' df_train['text'] = df_train['text_data'] # the label column must have name 'y' name be of type str df_train['Sentiment'] = df_train['Sentiment'] df_train['y'] = df_train['Sentiment'].astype(str) df_train.y = df_train.y.str.replace('-1','negative') df_train.y = df_train.y.str.replace('1','positive') # df_train=df_train.iloc[0:4000] pipe = nlu.load('use train.sentiment',verbose=True, ) pipe = pipe.fit(df_train) # df = fitted_pipe.predict(' I love NLU!') df = pipe.predict(df_train.iloc[0:500],output_level='document') for c in df.columns : print (df[c]) # print(df) # print(df.columns) # print(df[['sentiment','sentiment_confidence']]) # print(df.sentiment.value_counts()) # print(df.sentiment_confidence.value_counts()) # TODO test if bad performance persists in Spark NLP with non USE sentence eebddigns def test_sentiment_training_with_custom_embeds_sentence_level(self): #sentiment datase df_train = self.load_sentiment_dl_dataset() # the text data to use for classification should be in a column named 'text' df_train['text'] = df_train['text_data'] # the label column must have name 'y' name be of type str df_train['Sentiment'] = df_train['Sentiment'] df_train['y'] = df_train['Sentiment'].astype(str) df_train.y = df_train.y.str.replace('-1','negative') df_train.y = df_train.y.str.replace('1','positive') # df_train=df_train.iloc[0:4000] pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.sentiment',verbose=True, ) pipe.print_info() pipe['sentiment_dl'].setMaxEpochs(1) pipe = pipe.fit(df_train) # df = fitted_pipe.predict(' I love NLU!') df = pipe.predict(df_train.iloc[0:50],output_level='sentence') s_path = 'saved_models/training_custom_embeds' pipe.save(s_path) hdd_pipe = nlu.load(path=s_path) print(hdd_pipe.predict("YESSSSSSSSSSSSSSSSSSSSSSSSSSSSSsss")) for c in df.columns : print (df[c]) # print(df.columns) # print(df) # print(df.columns) # print(df[['sentiment','sentiment_confidence']]) # print(df.sentiment.value_counts()) # print(df.sentiment_confidence.value_counts()) def load_sentiment_dl_dataset(self): output_file_name = 'stock.csv' output_folder = 'sentiment/' data_url = 'http://ckl-it.de/wp-content/uploads/2020/12/stock_data.csv' return pd.read_csv(t.download_dataset(data_url,output_file_name,output_folder),error_bad_lines=False).iloc[0:100] if __name__ == '__main__': unittest.main()
nilq/baby-python
python
import ntpath import os import sys import tempfile import unittest from itertools import count try: from unittest.mock import Mock, patch, call, mock_open except ImportError: from mock import Mock, patch, call, mock_open from flask import Flask, render_template_string, Blueprint import six import flask_s3 from flask_s3 import FlaskS3 class FlaskStaticTest(unittest.TestCase): def setUp(self): self.app = Flask(__name__) self.app.testing = True @self.app.route('/<url_for_string>') def a(url_for_string): return render_template_string(url_for_string) def test_jinja_url_for(self): """ Tests that the jinja global gets assigned correctly. """ self.assertNotEqual(self.app.jinja_env.globals['url_for'], flask_s3.url_for) # then we initialise the extension FlaskS3(self.app) self.assertEquals(self.app.jinja_env.globals['url_for'], flask_s3.url_for) # Temporarily commented out """ def test_config(self): "" Tests configuration vars exist. "" FlaskS3(self.app) defaults = ('S3_USE_HTTP', 'USE_S3', 'USE_S3_DEBUG', 'S3_BUCKET_DOMAIN', 'S3_CDN_DOMAIN', 'S3_USE_CACHE_CONTROL', 'S3_HEADERS', 'S3_URL_STYLE') for default in defaults: self.assertIn(default, self.app.config) """ class UrlTests(unittest.TestCase): def setUp(self): self.app = Flask(__name__) self.app.testing = True self.app.config['FLASKS3_BUCKET_NAME'] = 'foo' self.app.config['FLASKS3_USE_HTTPS'] = True self.app.config['FLASKS3_BUCKET_DOMAIN'] = 's3.amazonaws.com' self.app.config['FLASKS3_CDN_DOMAIN'] = '' self.app.config['FLASKS3_OVERRIDE_TESTING'] = True @self.app.route('/<url_for_string>') def a(url_for_string): return render_template_string(url_for_string) @self.app.route('/') def b(): return render_template_string("{{url_for('b')}}") bp = Blueprint('admin', __name__, static_folder='admin-static') @bp.route('/<url_for_string>') def c(): return render_template_string("{{url_for('b')}}") self.app.register_blueprint(bp) def client_get(self, ufs): FlaskS3(self.app) client = self.app.test_client() import six if six.PY3: return client.get('/%s' % ufs) elif six.PY2: return client.get('/{}'.format(ufs)) def test_required_config(self): """ Tests that ValueError raised if bucket address not provided. """ raises = False del self.app.config['FLASKS3_BUCKET_NAME'] try: ufs = "{{url_for('static', filename='bah.js')}}" self.client_get(ufs) except ValueError: raises = True self.assertTrue(raises) def test_url_for(self): """ Tests that correct url formed for static asset in self.app. """ # non static endpoint url_for in template self.assertEquals(self.client_get('').data, six.b('/')) # static endpoint url_for in template ufs = "{{url_for('static', filename='bah.js')}}" exp = 'https://foo.s3.amazonaws.com/static/bah.js' self.assertEquals(self.client_get(ufs).data, six.b(exp)) def test_url_for_per_url_scheme(self): """ Tests that if _scheme is passed in the url_for arguments, that scheme is used instead of configuration scheme. """ # check _scheme overriden per url ufs = "{{url_for('static', filename='bah.js', _scheme='http')}}" exp = 'http://foo.s3.amazonaws.com/static/bah.js' self.assertEquals(self.client_get(ufs).data, six.b(exp)) def test_url_for_handles_special_args(self): """ Tests that if any special arguments are passed, they are ignored, and removed from generated url. As of this writing these are the special args: _external, _anchor, _method (from flask's url_for) """ # check _external, _anchor, and _method are ignored, and not added # to the url ufs = "{{url_for('static', filename='bah.js',\ _external=True, _anchor='foobar', _method='GET')}}" exp = 'https://foo.s3.amazonaws.com/static/bah.js' self.assertEquals(self.client_get(ufs).data, six.b(exp)) def test_url_for_debug(self): """Tests Flask-S3 behaviour in debug mode.""" self.app.debug = True # static endpoint url_for in template ufs = "{{url_for('static', filename='bah.js')}}" exp = '/static/bah.js' self.assertEquals(self.client_get(ufs).data, six.b(exp)) def test_url_for_debug_override(self): """Tests Flask-S3 behavior in debug mode with USE_S3_DEBUG turned on.""" self.app.debug = True self.app.config['FLASKS3_DEBUG'] = True ufs = "{{url_for('static', filename='bah.js')}}" exp = 'https://foo.s3.amazonaws.com/static/bah.js' self.assertEquals(self.client_get(ufs).data, six.b(exp)) def test_url_for_blueprint(self): """ Tests that correct url formed for static asset in blueprint. """ # static endpoint url_for in template ufs = "{{url_for('admin.static', filename='bah.js')}}" exp = 'https://foo.s3.amazonaws.com/admin-static/bah.js' self.assertEquals(self.client_get(ufs).data, six.b(exp)) def test_url_for_cdn_domain(self): self.app.config['FLASKS3_CDN_DOMAIN'] = 'foo.cloudfront.net' ufs = "{{url_for('static', filename='bah.js')}}" exp = 'https://foo.cloudfront.net/static/bah.js' self.assertEquals(self.client_get(ufs).data, six.b(exp)) def test_url_for_url_style_path(self): """Tests that the URL returned uses the path style.""" self.app.config['FLASKS3_URL_STYLE'] = 'path' ufs = "{{url_for('static', filename='bah.js')}}" exp = 'https://s3.amazonaws.com/foo/static/bah.js' self.assertEquals(self.client_get(ufs).data, six.b(exp)) def test_url_for_url_style_invalid(self): """Tests that an exception is raised for invalid URL styles.""" self.app.config['FLASKS3_URL_STYLE'] = 'balderdash' ufs = "{{url_for('static', filename='bah.js')}}" self.assertRaises(ValueError, self.client_get, six.b(ufs)) class S3TestsWithCustomEndpoint(unittest.TestCase): def setUp(self): self.app = Flask(__name__) self.app.testing = True self.app.config['FLASKS3_BUCKET_NAME'] = 'thebucket' self.app.config['FLASKS3_REGION'] = 'theregion' self.app.config['AWS_ACCESS_KEY_ID'] = 'thekeyid' self.app.config['AWS_SECRET_ACCESS_KEY'] = 'thesecretkey' self.app.config['FLASKS3_ENDPOINT_URL'] = 'https://minio.local:9000/' @patch('flask_s3.boto3') def test__custom_endpoint_is_passed_to_boto(self, mock_boto3): flask_s3.create_all(self.app) mock_boto3.client.assert_called_once_with("s3", region_name='theregion', aws_access_key_id='thekeyid', aws_secret_access_key='thesecretkey', endpoint_url='https://minio.local:9000/') class S3Tests(unittest.TestCase): def setUp(self): self.app = Flask(__name__) self.app.testing = True self.app.config['FLASKS3_BUCKET_NAME'] = 'foo' self.app.config['FLASKS3_USE_CACHE_CONTROL'] = True self.app.config['FLASKS3_CACHE_CONTROL'] = 'cache instruction' self.app.config['FLASKS3_CACHE_CONTROL'] = '3600' self.app.config['FLASKS3_HEADERS'] = { 'Expires': 'Thu, 31 Dec 2037 23:59:59 GMT', 'Content-Encoding': 'gzip', } self.app.config['FLASKS3_ONLY_MODIFIED'] = False def test__bp_static_url(self): """ Tests test__bp_static_url """ bps = [Mock(static_url_path='/foo', url_prefix=None), Mock(static_url_path=None, url_prefix='/pref'), Mock(static_url_path='/b/bar', url_prefix='/pref'), Mock(static_url_path=None, url_prefix=None)] expected = [six.u('/foo'), six.u('/pref'), six.u('/pref/b/bar'), six.u('')] self.assertEquals(expected, [flask_s3._bp_static_url(x) for x in bps]) def test__cache_config(self): """ Test that cache headers are set correctly. """ new_app = Flask("test_cache_param") new_app.config['FLASKS3_USE_CACHE_CONTROL'] = True new_app.config['FLASKS3_CACHE_CONTROL'] = '3600' flask_s3.FlaskS3(new_app) expected = {'Cache-Control': '3600'} self.assertEqual(expected, new_app.config['FLASKS3_HEADERS']) @patch('os.walk') @patch('os.path.isdir') def test__gather_files(self, path_mock, os_mock): """ Tests the _gather_files function """ self.app.static_folder = '/home' self.app.static_url_path = '/static' bp_a = Mock(static_folder='/home/bar', static_url_path='/a/bar', url_prefix=None) bp_b = Mock(static_folder='/home/zoo', static_url_path='/b/bar', url_prefix=None) bp_c = Mock(static_folder=None) self.app.blueprints = {'a': bp_a, 'b': bp_b, 'c': bp_c} dirs = {'/home': [('/home', None, ['.a'])], '/home/bar': [('/home/bar', None, ['b'])], '/home/zoo': [('/home/zoo', None, ['c']), ('/home/zoo/foo', None, ['d', 'e'])]} os_mock.side_effect = dirs.get path_mock.return_value = True expected = {('/home/bar', six.u('/a/bar')): ['/home/bar/b'], ('/home/zoo', six.u('/b/bar')): ['/home/zoo/c', '/home/zoo/foo/d', '/home/zoo/foo/e']} actual = flask_s3._gather_files(self.app, False) self.assertEqual(expected, actual) expected[('/home', six.u('/static'))] = ['/home/.a'] actual = flask_s3._gather_files(self.app, True) self.assertEqual(expected, actual) @patch('os.walk') @patch('os.path.isdir') def test__gather_files_no_blueprints_no_files(self, path_mock, os_mock): """ Tests that _gather_files works when there are no blueprints and no files available in the static folder """ self.app.static_folder = '/foo' dirs = {'/foo': [('/foo', None, [])]} os_mock.side_effect = dirs.get path_mock.return_value = True actual = flask_s3._gather_files(self.app, False) self.assertEqual({}, actual) @patch('os.walk') @patch('os.path.isdir') def test__gather_files_bad_folder(self, path_mock, os_mock): """ Tests that _gather_files when static folder is not valid folder """ self.app.static_folder = '/bad' dirs = {'/bad': []} os_mock.side_effect = dirs.get path_mock.return_value = False actual = flask_s3._gather_files(self.app, False) self.assertEqual({}, actual) @patch('os.path.splitdrive', side_effect=ntpath.splitdrive) @patch('os.path.join', side_effect=ntpath.join) def test__path_to_relative_url_win(self, join_mock, split_mock): """ Tests _path_to_relative_url on Windows system """ input_ = [r'C:\foo\bar\baz.css', r'C:\foo\bar.css', r'\foo\bar.css'] expected = ['/foo/bar/baz.css', '/foo/bar.css', '/foo/bar.css'] for in_, exp in zip(input_, expected): actual = flask_s3._path_to_relative_url(in_) self.assertEquals(exp, actual) @unittest.skipIf(sys.version_info < (3, 0), "not supported in this version") @patch('flask_s3.boto3') @patch("{}.open".format("builtins"), mock_open(read_data='test')) def test__write_files(self, key_mock): """ Tests _write_files """ static_url_loc = '/foo/static' static_folder = '/home/z' assets = ['/home/z/bar.css', '/home/z/foo.css'] exclude = ['/foo/static/foo.css', '/foo/static/foo/bar.css'] # we expect foo.css to be excluded and not uploaded expected = [call(bucket=None, name=six.u('/foo/static/bar.css')), call().set_metadata('Cache-Control', 'cache instruction'), call().set_metadata('Expires', 'Thu, 31 Dec 2037 23:59:59 GMT'), call().set_metadata('Content-Encoding', 'gzip'), call().set_contents_from_filename('/home/z/bar.css')] flask_s3._write_files(key_mock, self.app, static_url_loc, static_folder, assets, None, exclude) self.assertLessEqual(expected, key_mock.mock_calls) @patch('flask_s3.boto3') def test__write_only_modified(self, key_mock): """ Test that we only upload files that have changed """ self.app.config['FLASKS3_ONLY_MODIFIED'] = True static_folder = tempfile.mkdtemp() static_url_loc = static_folder filenames = [os.path.join(static_folder, f) for f in ['foo.css', 'bar.css']] expected = [] data_iter = count() for filename in filenames: # Write random data into files with open(filename, 'wb') as f: if six.PY3: data = str(data_iter) f.write(data.encode()) else: data = str(data_iter.next()) f.write(data) # We expect each file to be uploaded expected.append(call.put_object(ACL='public-read', Bucket=None, Key=filename.lstrip("/"), Body=data, Metadata={}, Expires='Thu, 31 Dec 2037 23:59:59 GMT', ContentEncoding='gzip')) files = {(static_url_loc, static_folder): filenames} hashes = flask_s3._upload_files(key_mock, self.app, files, None) # All files are uploaded and hashes are returned self.assertLessEqual(len(expected), len(key_mock.mock_calls)) self.assertEquals(len(hashes), len(filenames)) # We now modify the second file with open(filenames[1], 'wb') as f: data = str(next(data_iter)) if six.PY2: f.write(data) else: f.write(data.encode()) # We expect only this file to be uploaded expected.append(call.put_object(ACL='public-read', Bucket=None, Key=filenames[1].lstrip("/"), Body=data, Metadata={}, Expires='Thu, 31 Dec 2037 23:59:59 GMT', ContentEncoding='gzip')) new_hashes = flask_s3._upload_files(key_mock, self.app, files, None, hashes=dict(hashes)) #import pprint #pprint.pprint(zip(expected, key_mock.mock_calls)) self.assertEquals(len(expected), len(key_mock.mock_calls)) @patch('flask_s3.boto3') def test_write_binary_file(self, key_mock): """ Tests _write_files """ self.app.config['FLASKS3_ONLY_MODIFIED'] = True static_folder = tempfile.mkdtemp() static_url_loc = static_folder filenames = [os.path.join(static_folder, 'favicon.ico')] for filename in filenames: # Write random data into files with open(filename, 'wb') as f: f.write(bytearray([120, 3, 255, 0, 100])) flask_s3._write_files(key_mock, self.app, static_url_loc, static_folder, filenames, None) expected = { 'ACL': 'public-read', 'Bucket': None, 'Metadata': {}, 'ContentEncoding': 'gzip', 'Body': b'x\x03\xff\x00d', 'Key': filenames[0][1:], 'Expires': 'Thu, 31 Dec 2037 23:59:59 GMT'} name, args, kwargs = key_mock.mock_calls[0] self.assertEquals(expected, kwargs) def test_static_folder_path(self): """ Tests _static_folder_path """ inputs = [('/static', '/home/static', '/home/static/foo.css'), ('/foo/static', '/home/foo/s', '/home/foo/s/a/b.css'), ('/bar/', '/bar/', '/bar/s/a/b.css')] expected = [six.u('/static/foo.css'), six.u('/foo/static/a/b.css'), six.u('/bar/s/a/b.css')] for i, e in zip(inputs, expected): self.assertEquals(e, flask_s3._static_folder_path(*i)) @patch('flask_s3.boto3') def test__bucket_acl_not_set(self, mock_boto3): flask_s3.create_all(self.app, put_bucket_acl=False) self.assertFalse(mock_boto3.client().put_bucket_acl.called, "put_bucket_acl was called!") @patch('flask_s3._write_files') def test__upload_uses_prefix(self, mock_write_files): s3_mock = Mock() local_path = '/local_path/static' file_paths = ['/local_path/static/file1', '/local_path/static/file2'] files = {(local_path, '/static'): file_paths} flask_s3._upload_files(s3_mock, self.app, files, 's3_bucket') expected_call = call( s3_mock, self.app, '/static', local_path, file_paths, 's3_bucket', hashes=None) self.assertEquals(mock_write_files.call_args_list, [expected_call]) for supported_prefix in ['foo', '/foo', 'foo/', '/foo/']: mock_write_files.reset_mock() self.app.config['FLASKS3_PREFIX'] = supported_prefix flask_s3._upload_files(s3_mock, self.app, files, 's3_bucket') expected_call = call(s3_mock, self.app, '/foo/static', local_path, file_paths, 's3_bucket', hashes=None) self.assertEquals(mock_write_files.call_args_list, [expected_call]) @patch('flask_s3.current_app') def test__url_for_uses_prefix(self, mock_current_app): bucket_path = 'foo.s3.amazonaws.com' flask_s3.FlaskS3(self.app) mock_current_app.config = self.app.config mock_bind = mock_current_app.url_map.bind flask_s3.url_for('static', **{'filename': 'test_file.txt'}) self.assertEqual(mock_bind.call_args_list, [call(bucket_path, url_scheme='https')]) for supported_prefix in ['bar', '/bar', 'bar/', '/bar/']: mock_bind.reset_mock() self.app.config['FLASKS3_PREFIX'] = supported_prefix flask_s3.url_for('static', **{'filename': 'test_file.txt'}) expected_path = '%s/%s' % (bucket_path, 'bar') self.assertEqual(mock_bind.call_args_list, [call(expected_path, url_scheme='https')]) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
# Copyright (c) 2018, Ioannis Tziakos # All rights reserved. # # Plugin hooks are inspired by the current implementations found in # the tox.venv module and adapted to support edm. import subprocess import os import re import sys from tox import hookimpl, exception from tox.venv import VirtualEnv COMMAND_FAILED = ( "command failed but result from testenv is ignored\ncmd: {}") def env_exists(edm, envname): try: subprocess.check_call([str(edm), 'envs', 'exists', envname]) except subprocess.CalledProcessError: return False else: return True @hookimpl def tox_testenv_create(venv, action): name = venv.envconfig.basepython m = re.match(r"python(\d)\.(\d)", name) if m: version = "%s.%s" % m.groups() else: raise exception.UnsupporterInterpreter( 'TOX-EDM cannot infer version from {!r}'.format(name)) edm = venv.getcommandpath('edm', venv=False) action.venv.envconfig.whitelist_externals.append( os.path.dirname(edm)) if action.activity == 'recreate': action.popen([ edm, 'envs', 'create', action.venvname, '--force', '--version', version]) elif not env_exists(edm, action.venvname): action.popen([ edm, 'envs', 'create', action.venvname, '--version', version]) prefix = action.popen( [edm, 'prefix', '-e', action.venvname], redirect=False, returnout=True) prefix = prefix.strip() # The envbindir will be used to find the environment python # So we have to make sure that it has the right value. action.venv.envconfig.envbindir = prefix action.venv.envconfig.whitelist_externals.append(prefix) return True @hookimpl def tox_testenv_install_deps(venv, action): deps = venv._getresolvedeps() name = action.venvname if len(deps) > 0: edm = venv.getcommandpath('edm', venv=False) depinfo = " ".join(map(str, deps)) action.setactivity("installdeps", "%s" % depinfo) args = [edm, 'install', '-e', name, '-y'] + map(str, deps) action.popen(args) return True @hookimpl def tox_runenvreport(venv, action): edm = venv.getcommandpath('edm', venv=True) output = action.popen([ edm, 'run', '-e', action.venvname, '--', 'pip', 'freeze']) output = output.split("\n\n")[-1] return output.strip().splitlines() @hookimpl def tox_runtest_pre(venv): return True @hookimpl def tox_runtest_post(venv): return True @hookimpl def tox_runtest(venv, redirect): session = venv.session envconfig = venv.envconfig action = session.newaction(venv, "runtests") with action: venv.status = 0 session.make_emptydir(envconfig.envtmpdir) envconfig.envtmpdir.ensure(dir=1) env = venv._getenv(testcommand=True) cwd = envconfig.changedir edm = venv.getcommandpath('edm', venv=True) action.setactivity( "runtests", "PYTHONHASHSEED={!r}".format( env.get("PYTHONHASHSEED"))) for i, argv in enumerate(envconfig.commands): message = "commands[%s] | %s" % ( i, ' '.join([str(x) for x in argv])) action.setactivity("runtests", message) ignore_return = argv[0].startswith("-") if ignore_return: if argv[0] == "-": del argv[0] else: argv[0] = argv[0].lstrip("-") argv = [edm, 'run', '-e', action.venvname, '--'] + argv try: action.popen( argv, cwd=cwd, env=env, redirect=redirect, ignore_ret=ignore_return) except exception.InvocationError as error: if envconfig.ignore_outcome: session.report.warning(COMMAND_FAILED.format(error)) venv.status = "ignored failed command" continue # keep processing commands session.report.error(str(error)) venv.status = "commands failed" if not envconfig.ignore_errors: break # Don't process remaining commands except KeyboardInterrupt: venv.status = "keyboardinterrupt" session.report.error(venv.status) raise return True @hookimpl def tox_get_python_executable(envconfig): venv = VirtualEnv(envconfig=envconfig) edm = venv.getcommandpath('edm', venv=False) if env_exists(edm, envconfig.envname): executable = subprocess.check_output([ str(edm), 'run', '-e', envconfig.envname, '--', 'python', '-c', "import sys; sys.stdout.write(sys.executable)"]) executable = executable.strip() if sys.platform.startswith('win'): # Make sure that we always have the right bin directory envconfig.envbindir = os.path.join( os.path.dirname(executable), 'Scripts') return os.path.abspath(executable) else: return None
nilq/baby-python
python
# Generated by Django 2.1.1 on 2018-09-23 18:35 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('backend', '0002_song'), ] operations = [ migrations.AlterModelOptions( name='song', options={'ordering': ['position']}, ), migrations.AddField( model_name='song', name='position', field=models.IntegerField(default=0), ), ]
nilq/baby-python
python
#!/usr/bin/env python import dfl.dynamic_system import dfl.dynamic_model as dm import numpy as np import matplotlib.pyplot as plt from scipy import signal m = 1.0 k11 = 0.2 k13 = 2.0 b1 = 3.0 class Plant1(dfl.dynamic_system.DFLDynamicPlant): def __init__(self): self.n_x = 2 self.n_eta = 2 self.n_u = 1 self.n = self.n_x + self.n_eta # User defined matrices for DFL self.A_cont_x = np.array([[0.0, 1.0], [0.0, 0.0]]) self.A_cont_eta = np.array([[0.0, 0.0], [-1/m,-1/m]]) self.B_cont_x = np.array([[0.0],[1.0]]) # Limits for inputs and states self.x_min = np.array([-2.0,-2.0]) self.x_max = np.array([2.0 ,2.0]) self.u_min = np.array([-2.5]) self.u_max = np.array([ 2.5]) # Hybrid model self.P = np.array([[1, 1]]) self.A_cont_eta_hybrid = self.A_cont_eta.dot(np.linalg.pinv(self.P)) # functions defining constituitive relations for this particular system @staticmethod def phi_c1(q): e = k11*q + k13*q**3 return e @staticmethod def phi_r1(f): # e = b1*np.sign(f)*np.abs(f)*np.abs(f) e = b1*np.sign(f)*f**2 return e @staticmethod def phi_rc(q,v): return 5*v*np.abs(q) # nonlinear state equations def f(self,t,x,u): x_dot = np.zeros(x.shape) q,v = x[0],x[1] x_dot[0] = v x_dot[1] = -self.phi_r1(v) -self.phi_c1(q) + u return x_dot # nonlinear observation equations @staticmethod def g(t,x,u): return dm.Koopman.gkoop1(x) # auxiliary variables (outputs from nonlinear elements) def phi(self,t,x,u): ''' outputs the values of the auxiliary variables ''' q,v = x[0],x[1] eta = np.zeros(self.n_eta) eta[0] = self.phi_c1(q) eta[1] = self.phi_r1(v) return eta ########################################################################################### #Dummy forcing laws def zero_u_func(y,t): return 1 def rand_u_func(y,t): return np.random.normal(0.0,0.3) def sin_u_func(y,t): return 0.5*signal.square(3 * t) # return np.sin(3*t) if __name__== "__main__": driving_fun = sin_u_func plant1 = Plant1() x_0 = np.zeros(plant1.n_x) fig, axs = plt.subplots(2, 1) tru = dm.GroundTruth(plant1) data = tru.generate_data_from_random_trajectories() t, u, x_tru, y_tru = tru.simulate_system(x_0, driving_fun, 10.0) axs[0].plot(t, x_tru[:,0], 'k-', label='Ground Truth') koo = dm.Koopman(plant1, observable='filippos') koo.learn(data) _, _, x_koo, y_koo = koo.simulate_system(x_0, driving_fun, 10.0) axs[0].plot(t, x_koo[:,0], 'g-.', label='Koopman') dfl = dm.DFL(plant1) dfl.learn(data) _, _, x_dfl, y_dfl = dfl.simulate_system(x_0, driving_fun, 10.0) axs[0].plot(t, x_dfl[:,0], 'r-.', label='DFL') lrn = dm.L3(plant1, 2, ac_filter=False) lrn.learn(data) _, _, x_lrn, y_lrn = lrn.simulate_system(x_0, driving_fun, 10.0) axs[0].plot(t, x_lrn[:,0], 'b-.', label='L3') axs[0].legend() axs[1].plot(t, u, 'k') axs[1].set_xlabel('time') axs[0].set_ylabel('q') axs[1].set_ylabel('u') plt.show()
nilq/baby-python
python
# RUN this file for an example adventure. # THEN go to 02_my_adventure.py to make your own! from random import randint def startGame(): print("This is an adventure game.") input("Press enter to continue the text.") print("When you see this you will need to respond. Here type 'ok'. Then press enter.") input("> ") input("Ready? ...") startRoom() def startRoom(): input("You are in a big empty room.") input("There are four doors.") input("Which door do you enter?") print("Type 1, 2, 3, or 4 then press enter.") door = input("> ") if door == "1": input("You walk through door 1.") emptyRoom() elif door == "2": input("You walk through door 2.") mathTrap() elif door == "3": input("You walk through door 3.") library() elif door == "4": pit() else: input("that's not a door, try again.") print() startRoom() def emptyRoom(): input("It is an empty room.") input("But you hear a mysterious voice.") input("It whispers:") input('"The password is...password..."') input("...") input("Whatever. Press enter leave back to the main room.") startRoom() def mathTrap(): input("OH NO it is a math trap.") num1 = randint(1, 99) num2 = randint(1, 99) stringNum1 = str(num1) stringNum2 = str(num2) print("Answer the math question correctly to escape:") answer = input(stringNum1 + " + " + stringNum2 + " = ") if (int(answer) == num1 + num2): input("CORRECT!") input("You escape back to the main room.") startRoom() else: input("INCORRECT!") gameOver() def library(): input("You are in a library.") input("The librarian glares at you.") input("'What is the password?' she asks.") print("What do you say?") password = input("> ") if password == "password": input("'How did you know?? Okay then...'") input("She pulls a book out of a shelf, then the shelf moves...") secretPassage() else: input("'Incorrect!!' she screams, then kicks you out.") startRoom() def pit(): input("What is in door 4???") print("Guess!") input("Your guess: ") input("Nope, it's just a bottomless pit. Sorry.") gameOver() def secretPassage(): input("You enter a secret passageway.") input("and there is cake!") win() def win(): input("You win!!") print("congrats :D") def gameOver(): print("Game Over!") startGame()
nilq/baby-python
python
from src.preprocessing.data_filter import DataFilter from src.preprocessing.dataset import Article, Sentence, Token class ThreeSentenceDataFilter(DataFilter): def __init__(self, total_sentence_limit=None, *args, **kwargs): self.article = None self.sentence = None self.last_entity = None self.total_sentence_count = 0 self.total_sentence_limit = total_sentence_limit super().__init__(*args, **kwargs) def filter_articles(self): missing_ids = 0 wrong_title_spans = 0 for article in self.articles.copy(): if article.is_valid(): wikidata_json = self.page_id_to_wikidata_id.get(int(article.doc_id), None) if wikidata_json is not None and wikidata_json['id'] is not None: nkjp_class = self.entity_id_to_nkjp_class.get(wikidata_json['id'], None) nkjp_specific_class = self.entity_id_to_nkjp_class.get(wikidata_json['id'], None) if nkjp_class is not None: article.annotate_title(wikidata_json['title'], nkjp_class, nkjp_specific_class) if article.title_annotation_error: wrong_title_spans += 1 else: article.title_annotation_error = True missing_ids += 1 if not article.is_valid(): self.articles.remove(article) def set_up(self): pass def process_line(self, line: str): if self.total_sentence_limit is not None and self.total_sentence_limit <= self.total_sentence_count: return columns = line[:-1].split('\t') if len(columns) == 7: article_no, token, lemma, space, tags, entity, entity_wikidata_id = columns if self.article is None or article_no != self.article.doc_id: if self.article is not None: self.articles.add(self.article) self.article = Article(article_no, sentence_limit=3) self.total_sentence_count += 3 if self.sentence is None: self.sentence = Sentence() self.article.add_next_sentence(self.sentence) token = Token(token, lemma, space, tags, entity, entity_wikidata_id) self.sentence.tokens.append(token) if entity_wikidata_id != '_': entity_wikidata_id = int(entity_wikidata_id[1:]) token.nkjp_class = self.entity_id_to_nkjp_class.get(entity_wikidata_id) token.specific_nkjp_class = self.entity_id_to_nkjp_specific_class.get(entity_wikidata_id) if token.nkjp_class is not None: token.start_tag = 'B' if self.last_entity != entity else 'I' # if nkjp_class is not None: # print(token, entity, nkjp_class) self.last_entity = entity elif len(columns) != 1: print('Invalid number of columns: %d' % len(columns)) print(columns) else: # we reached a blank line - meaning the sentence is over self.sentence = None def process(base_dir): ThreeSentenceDataFilter( None, 'data/unfiltered_datasets/poleval', 'data/training_datasets/wikipedia_three_sentences', base_dir)\ .filter_data_and_save() if __name__ == '__main__': process(r'C:\Users\piotrek\Desktop\inf\magisterka\ner')
nilq/baby-python
python
import pytest from pyvipr.examples_models.lopez_embedded import model from pyvipr.pysb_viz.static_viz import PysbStaticViz @pytest.fixture def viz_model(): viz = PysbStaticViz(model) return viz def test_viz_exists(viz_model): assert viz_model def test_graphs(viz_model): g_sp = viz_model.species_graph() g_rxn_bi = viz_model.sp_rxns_bidirectional_graph(two_edges=True) g_rxn = viz_model.sp_rxns_graph() g_rules = viz_model.sp_rules_graph() g_proj_sp = viz_model.projected_graph(g_rxn_bi, 'species_from_bireactions', viz_model.model.reactions_bidirectional) g_proj_birxns = viz_model.projected_graph(g_rxn_bi, 'bireactions') g_proj_rules = viz_model.projected_graph(g_rules, 'rules') n_species = len(viz_model.model.species) assert len(g_sp.nodes()) == n_species assert len(g_rxn_bi.nodes()) == n_species + len(viz_model.model.reactions_bidirectional) assert len(g_rxn.nodes()) == n_species + len(viz_model.model.reactions) assert len(g_rules.nodes()) == n_species + len(viz_model.model.rules) assert len(g_proj_sp.nodes()) == n_species assert len(g_proj_birxns.nodes()) == len(viz_model.model.reactions_bidirectional) assert len(g_proj_rules.nodes()) == len(viz_model.model.rules) def test_wrong_projection(viz_model): with pytest.raises(ValueError): viz_model._projections_view('wrong_projection') def test_no_compartments(viz_model): with pytest.raises(ValueError): viz_model.compartments_data_graph()
nilq/baby-python
python
# Generated by Django 2.1.5 on 2019-01-31 18:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ipam', '0023_change_logging'), ] operations = [ migrations.AlterField( model_name='vrf', name='rd', field=models.CharField(blank=True, max_length=21, null=True, unique=True), ), ]
nilq/baby-python
python
from itertools import product with open("day-04.txt") as f: numbers_str, *boards_str = f.read().rstrip().split("\n\n") numbers = [int(n) for n in numbers_str.split(",")] boards = {} for b, board_str in enumerate(boards_str): boards[b] = {} for r, row in enumerate(board_str.splitlines()): for c, number in enumerate(map(int, row.split())): boards[b][number] = r, c boards_rows = [[set() for _ in range(5)] for _ in range(len(boards))] boards_cols = [[set() for _ in range(5)] for _ in range(len(boards))] for number, (b, board) in product(numbers, boards.items()): if number not in board: continue row, col = board.pop(number) boards_rows[b][row].add(number) boards_cols[b][col].add(number) if len(boards_rows[b][row]) == 5 or len(boards_cols[b][col]) == 5: winning_board = b called_number = number break else: print("No winning board found") exit(1) print(sum(boards[winning_board]) * called_number)
nilq/baby-python
python
import gffutils import pyfaidx def select_annotation_type(db, fasta, selectionAnnotationType): """ list of gff3 features as fasta record of selected gff3 type (e.g. mRNA) """ countFeature = db.count_features_of_type(selectionAnnotationType) featureList = [None] * countFeature i = 0 for feature in db.features_of_type(selectionAnnotationType): featureList[i] = feature i=i+1 featureList = list(filter(None.__ne__, featureList)) return(featureList) def variant_position_within(coordsVar, coordsInterval): """ check if coordsVars is within coordsInterval. Return 0 """ if coordsVar.CHROM == coordsInterval.seqid: if coordsVar.POS >= coordsInterval.start: if coordsVar.POS <= coordsInterval.end: return(1) else: return(0) else: return(0) return(0)
nilq/baby-python
python
import socket import win32.lib.win32serviceutil as win32serviceutil import win32.servicemanager as servicemanager import win32.win32event as win32event import win32.win32service as win32service class SMWinServiceBase(win32serviceutil.ServiceFramework): _svc_name_ = "SampleleService" _svc_display_name_ = "Sample Service" _svc_description_ = "Service Sample Description" @classmethod def parse_command_line(cls): win32serviceutil.HandleCommandLine(cls) def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) socket.setdefaulttimeout(60) def SvcStop(self): self.stop() self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) def SvcDoRun(self): self.start() servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ""), ) self.main() def start(self): pass def stop(self): pass def main(self): pass if __name__ == "__main__": SMWinServiceBase.parse_command_line()
nilq/baby-python
python
import os import re import subprocess import shlex from ConfigParser import SafeConfigParser CONFIG_FILE = os.path.join(os.getcwd(), '.forrest') def get_config(): config = SafeConfigParser() config.read(CONFIG_FILE) return config def save_config(config): config.write(open(CONFIG_FILE, 'w')) def get_input(text, default=''): response = raw_input(text) if len(response) == 0: response = default return response def create_bundle(source_dir): local_command('tar czf /tmp/bundle.tgz -C %s .' % source_dir) def local_command(command, decoder=None, tty=None): if tty: return os.system(command) else: dev_null = open(os.devnull, 'w') output = subprocess.check_output(shlex.split(command)) dev_null.close() if decoder: return decoder(output) else: return output
nilq/baby-python
python
from http import HTTPStatus from django.urls import reverse from mock import patch from barriers.models import Company from core.tests import MarketAccessTestCase class EditCompaniesTestCase(MarketAccessTestCase): company_id = "0692683e-5197-4853-a0fe-e43e35b8e7c5" company_name = "Test Company" company_data = { "id": company_id, "name": company_name, "created_on": "2020-01-01", "address": { "line_1": "123 Test Street", "town": "London", }, } def test_edit_companies_landing_page(self): """ Landing page should have the barrier's companies in the form """ response = self.client.get( reverse( "barriers:edit_companies", kwargs={"barrier_id": self.barrier["id"]} ) ) assert response.status_code == HTTPStatus.OK assert "form" in response.context company_ids = [company["id"] for company in self.barrier["companies"]] assert response.context["form"].initial["companies"] == company_ids assert self.client.session["companies"] == self.barrier["companies"] def test_company_search_page_loads(self): """ The search page should load with a form in the context """ response = self.client.get( reverse( "barriers:search_company", kwargs={"barrier_id": self.barrier["id"]} ) ) assert response.status_code == HTTPStatus.OK assert "form" in response.context @patch("utils.datahub.DatahubClient.post") def test_company_search_submit(self, mock_post): """ Searching should call the Datahub API """ mock_post.return_value = { "count": 1, "results": [self.company_data], } response = self.client.post( reverse( "barriers:search_company", kwargs={"barrier_id": self.barrier["id"]} ), data={"query": "test search"}, ) assert response.status_code == HTTPStatus.OK assert "form" in response.context assert "results" in response.context results = response.context["results"] assert results["count"] == 1 assert results["results"][0].id == self.company_id assert results["results"][0].name == self.company_name @patch("barriers.views.companies.DatahubClient.get_company") def test_company_detail(self, mock_get_company): """ Company Detail should call the Datahub API """ mock_get_company.return_value = Company(self.company_data) response = self.client.get( reverse( "barriers:company_detail", kwargs={ "barrier_id": self.barrier["id"], "company_id": self.company_id, }, ), ) assert response.status_code == HTTPStatus.OK mock_get_company.assert_called_with(self.company_id) assert response.context["company"].id == self.company_id assert response.context["company"].name == self.company_name @patch("utils.api.resources.APIResource.patch") @patch("barriers.views.companies.DatahubClient.get_company") def test_add_company(self, mock_get_company, mock_patch): """ Add company should change the session, not call the API """ mock_get_company.return_value = Company(self.company_data) response = self.client.post( reverse( "barriers:company_detail", kwargs={ "barrier_id": self.barrier["id"], "company_id": self.company_id, }, ), data={"company_id": self.company_id}, ) assert response.status_code == HTTPStatus.FOUND new_company = { "id": self.company_id, "name": self.company_name, } assert new_company in self.client.session["companies"] assert mock_patch.called is False @patch("utils.api.resources.APIResource.patch") def test_remove_company(self, mock_patch): """ Removing a company should remove it from the session, not call the API """ companies = [ { "id": self.company_id, "name": self.company_name, }, { "id": self.barrier["companies"][0]["id"], "name": self.barrier["companies"][0]["name"], }, ] self.update_session({"companies": companies}) response = self.client.post( reverse( "barriers:remove_company", kwargs={"barrier_id": self.barrier["id"]} ), data={"company_id": self.company_id}, ) assert response.status_code == HTTPStatus.FOUND companies = self.client.session["companies"] assert { "id": self.company_id, "name": self.company_name, } not in self.client.session["companies"] assert self.barrier["companies"][0] in self.client.session["companies"] assert mock_patch.called is False @patch("utils.api.resources.APIResource.patch") def test_confirm_companies(self, mock_patch): """ Saving should call the API """ self.update_session( { "companies": [ { "id": self.company_id, "name": self.company_name, } ] } ) response = self.client.post( reverse( "barriers:edit_companies_session", kwargs={ "barrier_id": self.barrier["id"], }, ), data={"companies": [self.company_id]}, ) assert response.status_code == HTTPStatus.FOUND mock_patch.assert_called_with( id=self.barrier["id"], companies=[ { "id": self.company_id, "name": self.company_name, } ], ) assert "companies" not in self.client.session
nilq/baby-python
python
#!/usr/bin/env python3 # Reading and Writing files # Creates a new file object and assigning it to a variable called file file = open ("spider.txt") # readline method reads a single line of a file print(file.readline()) # readline method reads the second line of a file - each time the readline method isi called the file object updates current position in the file) print (file.readline()) print(file.read()) # We have to close opened file file.close ############# WITH OPEN _ FILE ################333 """With keyword creates block of code with the work needs to be done with the file inside""" """When 'with' is used. Python will automatically close the file""" with open("spider.txt") as file: print (file.readline()) with open ("spider.txt") as file: print (file.readline()) with open ("spider.txt") as file: for line in file: print(line.upper()) # Empty lines can be avoided by using STRIP with open ("spider.txt") as file: for line in file: print (line.strip().upper())
nilq/baby-python
python
import math def length_norm(score): length_tgt = len(score) return sum(score) / length_tgt def word_reward(score, reward): length_tgt = len(score) return sum(score) - reward * length_tgt def bounded_word_reward(score, reward, bound): """ bound = L_predict L_predict could be: 1) length_src * alpha 2) average length_tgt * beta 3) model predicted length * gamma """ length_tgt = len(score) bounded_length = min(length_tgt, bound) return sum(score) - reward * bounded_length def bounded_adaptive_reward(score, rewards, bound): if len(rewards) > bound: rewards = rewards[:bound] return sum(score) - sum(rewards) def neg_sigmoid(x): return 1.0 / (1 + math.exp(x))
nilq/baby-python
python
import pandas as pd while(1): menu = {1:"Driver Login", 2:"Customer Login", 3:"ZULA Administarator", 4:"Exit"} intial_cab_drivers = {"id":[1,2,3,4], "Name":["aaa","bbb","ccc","ddd"], "Pass":[111,222,333,444], "Age":[25,36,31,28] } intial_customers = {"id":[1,2,3,4], "Name":["ww","xx","yy","zz"], "Pass":[55,66,77,88], "Age":[25,36,31,28] } intial_locations = {"id":[1,3,4,6,2,7,8,5], "Name":["A","C","D","F","B","G","H","E"], "Dist_from_origin":[0,4,7,9,15,18,20,23] } intial_cab_positions = { "Location":["D","G","H","A"], "cabid":[1,2,3,4] } cabdrivers_summary = { "cabid":{1: {"Source":["D","E","C"], "Destination":["H","G","B"], "CustomerDetail":[4,2,2], "Fare":[130,50,110], "ZulaCommision":[39,15,33] },2:{"Source":["C","E","D"], "Destination":["B","G","H"], "CustomerDetail":[4,3,2], "Fare":[145,50,187], "ZulaCommision":[87,25,55] }, 3:{"Source":["F","E","D","H"], "Destination":["A","B","G","E"], "CustomerDetail":[2,3,4,7], "Fare":[187,150,145,96], "ZulaCommision":[55,58,36,47] }, 4:{"Source":["A","C","B"], "Destination":["E","H","E"], "CustomerDetail":[5,4,1], "Fare":[125,30,158], "ZulaCommision":[65,5,35] } } } customer_ride_summary = {"custid":{1: { "Source":["A","E","C"], "Destination":["E","G","B"], "Cab Detail":[3,1,1], "Fare":[230,50,110] }, 2: { "Source":["H","E","G"], "Destination":["A","G","H"], "Cab Detail":[4,2,2], "Fare":[220,40,100] }, 3: { "Source":["A","E","C"], "Destination":["E","G","B"], "Cab Detail":[5,3,2], "Fare":[225,45,115] }, 4: { "Source":["H","E","F"], "Destination":["F","H","G"], "Cab Detail":[5,2,3], "Fare":[150,45,86] }, } } cab_summary = {"cabid":{1:{"Total Number of Trips":3, "Total Fare Collected":290, "Total Zula Commision":87 }, 2:{"Total Number of Trips":10, "Total Fare Collected":2900, "Total Zula Commision":1000 }, 3:{"Total Number of Trips":7, "Total Fare Collected":1500, "Total Zula Commision":500 }, 4:{"Total Number of Trips":5, "Total Fare Collected":700, "Total Zula Commision":150 } } } Welcome = ["Welcome to !!*** ZULA***!!","1.Cab driver login","2.Customer login","3.Administration","4.Quit","Please choose a service"] for i in Welcome: print(i) option = int(input()) if option==1: id = int(input("Enter your ID: ")) password = int(input("Enter your password: ")) if id in intial_cab_drivers["id"] and password in intial_cab_drivers["Pass"]: print("Congratulations You are logged in!") inp_ = input("Press 1 to know your summary!\nPress 2 to continue\n") if inp_=="1": cabid = id print("Cabid: ",cabid) print("Cab Driver Name: ",intial_cab_drivers["Name"][cabid-1]) print("Trip Details") print(pd.DataFrame(cabdrivers_summary["cabid"][cabid])) continue else: if id not in intial_cab_drivers["id"]: print("Please Enter Your Id correctly") else: print("Check Your Password and Try Again") continue elif option==2: print("1.Login") print("2.Create Account") print("Choose one option from above") cust = int(input()) # while(1): if cust==1: id = int(input("Enter your ID: ")) password = int(input("Enter your password: ")) if id in intial_customers["id"] and password in intial_customers["Pass"]: # print("Congratulations You are logged in!") while(1): inp_ = input("Press 1 to know your summary!\nPress 2 to continue\n") if inp_=="1": custid = id print("Customerid: ",custid) print("Customer Name: ",intial_customers["Name"][custid-1]) print("Trip Details") print(pd.DataFrame(customer_ride_summary["custid"][custid])) print("Availble Locations are------------------->") print(intial_locations["Name"]) source = input("Choose source location: ").upper() destination = input("Choose destination location: ").upper() # if source== destination: # print("Invalid Ride") # continue locs = intial_locations["Name"] dist = intial_locations["Dist_from_origin"] fare = abs(dist[locs.index(source)] - dist[locs.index(destination)])*10 print() print(f"Your Estimasted Fare is {fare}Rs!") print() print("CAB LOCATIONS!!!") print(pd.DataFrame(intial_cab_positions)) print() cabride = input("Press Y if you want to start your ride or Press N to Quit ") if cabride.lower()=="n": break distances = intial_cab_positions["Location"] source_ = dist[locs.index(source)] mini = 10000 cab_location,cabid = "",1000 for i in distances: index = intial_locations["Name"].index(i) temp = intial_locations["Dist_from_origin"][index] dis = temp - source_ if dis < mini: mini = dis cab_location = i cabidindex = intial_cab_positions["Location"].index(i) cabid = intial_cab_positions["cabid"][cabidindex] print(f"Near Available cab is CABID:{cabid},CABLOCATION:{cab_location} ") if cabride.lower()=="y": print("Your Ride Started!") else: if id not in intial_customers["id"]: print("Please Enter Your Id correctly") else: print("Check Your Password and Try Again") n = input("N to quit") if n.lower()=='n': break elif cust==2: id_ = int(input("Enter id ")) name_ = input("Enter Your Name: ") pass_ = input("Set Your Password: ") age_ = input("Enter Your Age") intial_customers["id"].append(id_) intial_customers["Name"].append(name_) intial_customers["Pass"].append(pass_) intial_customers["Age"].append(age_) print("Thank you account has been sucessfully created!") break elif option==3: inp = input("Press 1 to see Cabs Summary") if inp=="1": cabid = int(input("Enter cabid: ")) print("Cabid: ",cabid) print("Cab Driver Name: ",intial_cab_drivers["Name"][cabid-1]) print("Total Number of Trips: ",cab_summary["cabid"][cabid]["Total Number of Trips"]) print("Total Fare Collected: ",cab_summary["cabid"][cabid]["Total Fare Collected"]) print("Total Zula Commision: ",cab_summary["cabid"][cabid]["Total Zula Commision"]) print("Trip Details--->") print(pd.DataFrame(cabdrivers_summary["cabid"][cabid])) print() continue elif option==4: print("Thank you!") break
nilq/baby-python
python
# ============================================================================= # Copyright (c) 2016, Cisco Systems, Inc # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================= class WorkUnit(object): """ A WorkUnit instance defines the Work which will be processed by a worker as defined in the process_pool class. The Job Manager handles the dispatching of the WorkUnit. It allows only one unique instance of the WorkUnit as defined by get_unique_key() to be executed. """ def __init__(self): self.in_progress_jobs = None self.lock = None def process(self, db_session, logger, process_name): try: self.start(db_session, logger, process_name) except Exception: logger.exception("WorkUnit.process() hit exception") finally: if self.in_progress_jobs is not None and self.lock is not None: with self.lock: if self.get_unique_key() in self.in_progress_jobs: self.in_progress_jobs.remove(self.get_unique_key()) def start(self, db_session, logger, process_name): raise NotImplementedError("Children must override start()") def get_unique_key(self): """ Returns an unique value which represents this instance. An example is an unique prefix with the job id from a specific DB table (e.g. email_job_1). """ raise NotImplementedError("Children must override get_unique_key()")
nilq/baby-python
python
""" link: https://leetcode.com/problems/word-ladder problem: 给起始单词,结尾单词,与单词列表,问能否每次转换一个字母,使用列表中的单词由起始变换到结尾 solution: 无权最短路图,即BFS。难点在于如何构造图,一个很巧妙的思路,增加虚拟节点。将 hit 的相邻节点记为 hi*, h*t, *it, 将 hot 的相邻节点记为 ho*, h*t, *ot,这样两个节点就存在了相连路径。构造图后做BFS即可。 """ class Solution: def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int: d = collections.defaultdict(list) wordList.append(beginWord) for x in wordList: for k in range(len(x)): aim = x[:k] + "*" + x[k + 1:] d[aim].append(x) d[x].append(aim) s = {beginWord} q = [beginWord] res = 0 while len(q) != 0: res += 1 q2 = [] for word in q: for next_word in d[word]: if next_word == endWord: return (res + 2) // 2 if next_word not in s: s.add(next_word) q2.append(next_word) q = q2 return 0
nilq/baby-python
python
import numpy as np import pandas as pd import matplotlib.mlab as mlab import matplotlib.pyplot as plt df_train = pd.read_csv('train.csv') train=pd.DataFrame(df_train) train = pd.crosstab(index=train["Type"],columns="count") type = [[1,"Dog"], [2,"Cat"]] pet = pd.DataFrame(type, columns = ['Type','Animal']) results = train.merge(pet,on='Type') r1 = results[['Animal','count']] print("") print("Data from train.csv") print(train) print("-------------------") print("Self-created type key") print(pet) print("-------------------") print('combined data:') print(r1) #print("")
nilq/baby-python
python
#!/usr/bin/env python import rospy import numpy as np from sensor_msgs.msg import CompressedImage,Image # @UnresolvedImport from duckietown_msgs.msg import AntiInstagramHealth, BoolStamped, AntiInstagramTransform # @UnresolvedImport from anti_instagram.AntiInstagram import * from duckietown_utils.jpg import image_cv_from_jpg from cv_bridge import CvBridge # @UnresolvedImport from line_detector.timekeeper import TimeKeeper class AntiInstagramNode(): def __init__(self): self.node_name = rospy.get_name() self.active = True self.locked = False self.image_pub_switch = rospy.get_param("~publish_corrected_image",False) # Initialize publishers and subscribers self.pub_image = rospy.Publisher("~corrected_image", Image, queue_size=1) self.pub_health = rospy.Publisher("~health", AntiInstagramHealth, queue_size=1,latch=True) self.pub_transform = rospy.Publisher("~transform", AntiInstagramTransform, queue_size=1, latch=True) #self.sub_switch = rospy.Subscriber("~switch",BoolStamped, self.cbSwitch, queue_size=1) #self.sub_image = rospy.Subscriber("~uncorrected_image",Image,self.cbNewImage,queue_size=1) self.sub_image = rospy.Subscriber("~uncorrected_image", CompressedImage, self.cbNewImage,queue_size=1) self.sub_click = rospy.Subscriber("~click", BoolStamped, self.cbClick, queue_size=1) self.trans_timer = rospy.Timer(rospy.Duration.from_sec(20), self.cbPubTrans, True) # Verbose option self.verbose = rospy.get_param('line_detector_node/verbose',True) # Initialize health message self.health = AntiInstagramHealth() # Initialize transform message self.transform = AntiInstagramTransform() # FIXME: read default from configuration and publish it self.ai_scale = np.array([2.2728408473337893, 2.2728273205024614, 2.272844346401005]) self.ai_shift = np.array([21.47181119272393, 37.14653160247276, 4.089311860796786]) self.ai = AntiInstagram() self.corrected_image = Image() self.bridge = CvBridge() self.image_msg = None self.click_on = False def cbPubTrans(self, _): self.transform.s[0], self.transform.s[1], self.transform.s[2] = self.ai_shift self.transform.s[3], self.transform.s[4], self.transform.s[5] = self.ai_scale self.pub_transform.publish(self.transform) rospy.loginfo('ai: Color transform published.') def cbNewImage(self,image_msg): # memorize image self.image_msg = image_msg if self.image_pub_switch: tk = TimeKeeper(image_msg) cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8") corrected_image_cv2 = self.ai.applyTransform(cv_image) tk.completed('applyTransform') corrected_image_cv2 = np.clip(corrected_image_cv2, 0, 255).astype(np.uint8) self.corrected_image = self.bridge.cv2_to_imgmsg(corrected_image_cv2, "bgr8") tk.completed('encode') self.pub_image.publish(self.corrected_image) tk.completed('published') if self.verbose: rospy.loginfo('ai:\n' + tk.getall()) def cbClick(self, _): # if we have seen an image: if self.image_msg is not None: self.click_on = not self.click_on if self.click_on: self.processImage(self.image_msg) else: self.transform.s = [0,0,0,1,1,1] self.pub_transform.publish(self.transform) rospy.loginfo('ai: Color transform is turned OFF!') def processImage(self,msg): ''' Inputs: msg - CompressedImage - uncorrected image from raspberry pi camera Uses anti_instagram library to adjust msg so that it looks like the same color temperature as a duckietown reference image. Calculates health of the node and publishes the corrected image and the health state. Health somehow corresponds to how good of a transformation it is. ''' rospy.loginfo('ai: Computing color transform...') tk = TimeKeeper(msg) #cv_image = self.bridge.imgmsg_to_cv2(msg,"bgr8") try: cv_image = image_cv_from_jpg(msg.data) except ValueError as e: rospy.loginfo('Anti_instagram cannot decode image: %s' % e) return tk.completed('converted') self.ai.calculateTransform(cv_image) tk.completed('calculateTransform') # if health is much below the threshold value, do not update the color correction and log it. if self.ai.health <= 0.001: # health is not good rospy.loginfo("Health is not good") else: self.health.J1 = self.ai.health self.transform.s[0], self.transform.s[1], self.transform.s[2] = self.ai.shift self.transform.s[3], self.transform.s[4], self.transform.s[5] = self.ai.scale rospy.set_param('antiins_shift', self.ai.shift.tolist()) rospy.set_param('antiins_scale', self.ai.scale.tolist()) self.pub_health.publish(self.health) self.pub_transform.publish(self.transform) rospy.loginfo('ai: Color transform published.') if __name__ == '__main__': # Initialize the node with rospy rospy.init_node('anti_instagram_node', anonymous=False) # Create the NodeName object node = AntiInstagramNode() # Setup proper shutdown behavior #rospy.on_shutdown(node.on_shutdown) # Keep it spinning to keep the node alive rospy.spin()
nilq/baby-python
python
# coding: utf-8 import types import pymssql from itertools import chain from .abstract import DatabaseAdapter class MSSQLAdapter(DatabaseAdapter): last_table = None def get_connection(self): if hasattr(self, 'connection') and self.connection: return self.connection params = { 'server': self.params.get('host', 'localhost'), 'user': self.params.get('user'), 'password': self.params.get('password'), 'database': self.params.get('database'), 'autocommit': True, } if self.params.get('unix_socket'): params.update({'unix_socket': self.params.get('unix_socket')}) else: params.update({'port': self.params.get('port', 1433)}) conn = pymssql.connect(**params) return conn def foreign_keys_freeze(self): self.query(""" DECLARE @sql AS NVARCHAR(max)=''; select @sql = @sql + 'ALTER INDEX ALL ON [' + t.[name] + '] DISABLE;' + CHAR(13) from sys.tables t where type = 'u'; select @sql = @sql + 'ALTER INDEX ' + i.[name] + ' ON [' + t.[name] + '] REBUILD;' + CHAR(13) from sys.key_constraints i join sys.tables t on i.parent_object_id = t.object_id where i.type = 'PK'; exec dbo.sp_executesql @sql """) def foreign_keys_unfreeze(self): self.query(''' DECLARE @sql AS NVARCHAR(max)='' select @sql = @sql + 'ALTER INDEX ALL ON [' + t.[name] + '] REBUILD;'+CHAR(13) from sys.tables t where type='u' exec dbo.sp_executesql @sql ''') def drop_all(self): self.query('drop database {0} go'.format(self.params.get('database'))) self.query('create database {0} go'.format(self.params.get('database'))) def reset(self): pass def insert(self, table_name, dict_data): # if identity_insert is on, it wont add null values for primary key. if 'id' in dict_data.keys() and dict_data.get('id') is None: del dict_data['id'] placeholders = ', '.join(['%s'] * len(dict_data)) columns = ', '.join(dict_data.keys()) sql = "INSERT INTO %s ( %s ) VALUES ( %s )" % (table_name, columns, placeholders) on_sql = f"SET IDENTITY_INSERT {table_name} ON" off_sql = f"SET IDENTITY_INSERT {table_name} OFF" if_exists_sql = f"IF EXISTS (SELECT * FROM [sys].[identity_columns] WHERE [object_id] = OBJECT_ID(N'{table_name}'))" if 'id' in dict_data.keys(): sql = "%s %s; %s; %s;" % (if_exists_sql, on_sql, sql, off_sql) return self.query(sql, tuple(dict_data.values())) def query(self, q: str, params=()): super().query(q, params) return self.cursor.execute(q, params) def column_exists(self, table_name, column_name): self.query(""" SELECT count(*) as count FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME=%s AND COLUMN_NAME=%s """, (table_name, column_name)) return bool(self.fetchone()[0]) def table_exists(self, table_name): self.query(""" SELECT count(*) as table_count FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_NAME=%s """, table_name) return bool(self.fetchone()[0]) def get_table_names(self): self.query(""" SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' ORDER BY 1 """) return list(sum(self.fetchall(), ())) def get_table_schema(self, table_name): self.query(""" SELECT column_name, data_type, is_nullable FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = %s ORDER BY LEN(column_name), column_name ASC """, table_name) schema = [dict(zip([column[0] for column in self.cursor.description], row)) for row in self.cursor.fetchall()] return schema def get_records_count(self, table_name): self.query(""" SELECT count(*) AS count FROM {} """.format(table_name)) fetch = self.fetchone() return int(fetch[0]) if fetch is not None else 0 def get_table_as_json(self, table_name, transformer=None): schema = self.get_table_schema(table_name) column_names = [col['column_name'] for col in schema] columns = ', '.join(chain(*zip(map(lambda x: '"%s"' % x, column_names), column_names))) self.query(""" SELECT * FROM {table_name} FOR JSON PATH, INCLUDE_NULL_VALUES """.format(columns=columns, table_name=table_name)) results = '' for row in self.fetchall(): results += row[0] if isinstance(transformer, types.FunctionType): results = transformer(results) return results def fetchone(self): return self.cursor.fetchone() def fetchall(self): return self.cursor.fetchall()
nilq/baby-python
python
class Camera: def __init__(self, game): self.game = game self.dx = 0 self.dy = 0 self.ny = 240 self.is_start = True def start_camera(self): self.dx = -2100 self.dy = -2100 def apply(self, obj): obj.rect.x += self.dx obj.rect.y += self.dy def update(self, x, y): if self.is_start: self.start_camera() self.is_start = False else: self.dx = 0 self.dy = 0 if 260 < self.game.corridor.hero.general_x < 2360: self.dx = -(x - self.game.width // 2 + self.game.corridor.hero.rect.w // 2) elif 20 > self.game.corridor.hero.general_x and \ self.game.corridor.hero.general_y <= 2270: if 360 <= self.game.corridor.hero.general_y <= 380 or \ 1060 <= self.game.corridor.hero.general_y <= 1080 or \ 1760 <= self.game.corridor.hero.general_y <= 1780: self.dy = -700 self.game.corridor.hero.general_y += 600 self.game.corridor.hero.rect.y += 600 if 700 <= self.game.corridor.hero.general_y <= 750 or \ 1400 <= self.game.corridor.hero.general_y <= 1450 or \ 2100 <= self.game.corridor.hero.general_y <= 2150: self.dy = 700 self.game.corridor.hero.general_y -= 440 self.game.corridor.hero.rect.y -= 440
nilq/baby-python
python
import libres import threading from cached_property import cached_property from contextlib import contextmanager from libres.modules import errors missing = object() required = object() class StoppableService(object): """ Services inheriting from this class have their stop_service method called when the service is discarded. Note that this only happens when a service is replaced with a new one and not when libres is stopped (i.e. this is *not* a deconstructor). """ def stop_service(self): pass class ContextServicesMixin(object): """ Provides access methods to the context's services. Expects the class that uses the mixin to provide self.context. The results are cached for performance. """ @cached_property def is_allocation_exposed(self): return self.context.get_service('exposure').is_allocation_exposed @cached_property def generate_uuid(self): return self.context.get_service('uuid_generator') @cached_property def validate_email(self): return self.context.get_service('email_validator') def clear_cache(self): """ Clears the cache of the mixin. """ try: del self.is_allocation_exposed except AttributeError: pass try: del self.generate_uuid except AttributeError: pass try: del self.validate_email except AttributeError: pass @property def session_provider(self): return self.context.get_service('session_provider') @property def session(self): """ Returns the current session. """ return self.session_provider.session() def close(self): """ Closes the current session. """ self.session.close() @property def begin_nested(self): return self.session.begin_nested def commit(self): return self.session.commit() def rollback(self): return self.session.rollback() class Context(object): """ Used throughout Libres, the context holds settings like the database connection string and services like the json dumps/loads functions that should be used. Contexts allow consumers of the Libres library to override these settings / services as they wish. It also makes sure that multiple consumers of Libres can co-exist in a single process, as each consumer must operate on it's own context. Libres holds all contexts in libres.registry and provides a master_context. When a consumer registers its own context, all lookups happen on the custom context. If that context can provide a service or a setting, it is used. If the custom context can't provide a service or a setting, the master_context is used instead. In other words, the custom context inherits from the master context. Note that contexts not meant to be changed often. Classes talking to the database usually cache data form the context freely. That means basically that after changing the context you should get a fresh :class:`~libres.db.scheduler.Scheduler` instance or call :meth:`~.ContextServicesMixin.clear_cache`. A context may be registered as follows:: from libres import registry my_context = registry.register_context('my_app') See also :class:`~libres.context.registry.Registry` """ def __init__(self, name, registry=None, parent=None, locked=False): self.name = name self.registry = registry or libres.registry self.values = {} self.parent = parent self.locked = False self.thread_lock = threading.RLock() def __repr__(self): return "<Libres Context(name='{}')>".format(self.name) @contextmanager def as_current_context(self): with self.registry.context(self.name): yield def switch_to(self): self.registry.switch_context(self.name) def lock(self): with self.thread_lock: self.locked = True def unlock(self): with self.thread_lock: self.locked = False def get(self, key): if key in self.values: return self.values[key] elif self.parent: return self.parent.get(key) else: return missing def set(self, key, value): if self.locked: raise errors.ContextIsLocked with self.thread_lock: # If a value already exists it could be a stoppable service. # Stoppable services are called before they are stop so they # can clean up after themselves without having to wait for the GC. if isinstance(self.values.get(key), StoppableService): self.values[key].stop_service() self.values[key] = value def get_setting(self, name): return self.get('settings.{}'.format(name)) def set_setting(self, name, value): with self.thread_lock: self.set('settings.{}'.format(name), value) def get_service(self, name): service_id = '/'.join(('service', name)) service = self.get(service_id) if service is missing: raise errors.UnknownService(service_id) cache_id = '/'.join(('service', name, 'cache')) cache = self.get(cache_id) # no cache if cache is missing: return service(self) else: # first call, cache it! if cache is required: self.set(cache_id, service(self)) # nth call, use cached value return self.get(cache_id) def set_service(self, name, factory, cache=False): with self.thread_lock: service_id = '/'.join(('service', name)) self.set(service_id, factory) if cache: cache_id = '/'.join(('service', name, 'cache')) self.set(cache_id, required)
nilq/baby-python
python
""" COMMAND: SELECT Select objects by id or name for further command processes. """ import command import cache from util import logger from api import APIRequests class Select(command.Command): @staticmethod def get_invoke(): return 'SELECT' @staticmethod def get_args(): return { 'GUILD(S)|CHANNEL(S)|ROLE(S)|USER(S)': True, 'BY NAME [ID|NAME]': False } @staticmethod def get_help_description(): return 'Select an object by ID or NAME for further command operations.' def execute(self, passed_args: list): if len(passed_args) < 1: logger.fatal('MISSING 1. ARGUMENT: GUILD(S)|CHANNEL(S)|ROLE(S)|USER(S)') raise Exception('manual interruption') # if len(passed_args) < 2: # logger.fatal('MISSING 2. ARGUMENT: ID') # raise Exception('manual interruption') api = self.cmd_parser.api_instance by_name = False if len(passed_args) > 2 and passed_args[1].upper() == 'BY' and passed_args[2].upper() == 'NAME': if len(passed_args) < 4: logger.fatal('MISSING ARGUMENT: [NAME]') raise Exception('manual interruption') by_name = True objecttype = passed_args[0].upper() identifier = passed_args[1] if len(passed_args) > 1 else None if by_name: identifier = passed_args[3] def __check_args_length(must: int, argname: str, soft: bool = False) -> bool: if len(passed_args) < must: if not soft: logger.fatal('MISSING ARGUMENT: [%s]' % argname) raise Exception('manual interruption') logger.error('MISSING ARGUMENT: [%s]' % argname) if objecttype == 'GUILD': __check_args_length(2, 'ID') response = api.get_guild(identifier, by_name) APIRequests.check_status_code(response) cache.selected = cache.Selection('GUILD', response.json()) elif objecttype == 'GUILDS': response = api.get_users_guilds() api.check_status_code(response) cache.selected = cache.Selection('GUILDS', response.json()) elif objecttype == 'CHANNEL': __check_args_length(2, 'ID') if by_name and (cache.selected == None or not cache.selected.type == 'GUILD'): logger.fatal('GUILD needs to be selected to select a channel by name') raise Exception('manual interruption') guild_id = cache.selected.data['id'] if cache.selected != None else '' response = api.get_channel(guild_id, identifier, by_name) APIRequests.check_status_code(response) cache.selected = cache.Selection('CHANNEL', response.json()) pass elif objecttype == 'USER': pass elif objecttype == 'ROLE': pass else: logger.error('UNSUPPORTED TYPE: ', objecttype) raise Exception('manual interruption') logger.debug('SELECTED:\n - TYPE: ', cache.selected.type, '\n - DATA: ', cache.selected.data)
nilq/baby-python
python
from testcases import TestCaseWithFixture as TestCase from django.http import HttpRequest from django.contrib.auth.models import User, Permission from core.models import Note from tastypie.authorization import Authorization, ReadOnlyAuthorization, DjangoAuthorization from tastypie import fields from tastypie.resources import Resource, ModelResource class NoRulesNoteResource(ModelResource): class Meta: resource_name = 'notes' queryset = Note.objects.filter(is_active=True) authorization = Authorization() class ReadOnlyNoteResource(ModelResource): class Meta: resource_name = 'notes' queryset = Note.objects.filter(is_active=True) authorization = ReadOnlyAuthorization() class DjangoNoteResource(ModelResource): class Meta: resource_name = 'notes' queryset = Note.objects.filter(is_active=True) authorization = DjangoAuthorization() class NotAModel(object): name = 'Foo' class NotAModelResource(Resource): name = fields.CharField(attribute='name') class Meta: resource_name = 'notamodel' object_class = NotAModel authorization = DjangoAuthorization() class AuthorizationTestCase(TestCase): fixtures = ['note_testdata.json'] def test_no_rules(self): request = HttpRequest() for method in ('GET', 'POST', 'PUT', 'DELETE'): request.method = method self.assertTrue(NoRulesNoteResource()._meta.authorization.is_authorized(request)) def test_read_only(self): request = HttpRequest() request.method = 'GET' self.assertTrue(ReadOnlyNoteResource()._meta.authorization.is_authorized(request)) for method in ('POST', 'PUT', 'DELETE'): request = HttpRequest() request.method = method self.assertFalse(ReadOnlyNoteResource()._meta.authorization.is_authorized(request)) class DjangoAuthorizationTestCase(TestCase): fixtures = ['note_testdata.json'] def setUp(self): self.add = Permission.objects.get_by_natural_key('add_note', 'core', 'note') self.change = Permission.objects.get_by_natural_key('change_note', 'core', 'note') self.delete = Permission.objects.get_by_natural_key('delete_note', 'core', 'note') self.user = User.objects.all()[0] self.user.user_permissions.clear() def test_no_perms(self): # sanity check: user has no permissions self.assertFalse(self.user.get_all_permissions()) request = HttpRequest() request.method = 'GET' request.user = self.user # with no permissions, api is read-only self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request)) for method in ('POST', 'PUT', 'DELETE'): request.method = method self.assertFalse(DjangoNoteResource()._meta.authorization.is_authorized(request)) def test_add_perm(self): request = HttpRequest() request.user = self.user # give add permission request.user.user_permissions.add(self.add) request.method = 'POST' self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request)) def test_change_perm(self): request = HttpRequest() request.user = self.user # give change permission request.user.user_permissions.add(self.change) request.method = 'PUT' self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request)) def test_delete_perm(self): request = HttpRequest() request.user = self.user # give delete permission request.user.user_permissions.add(self.delete) request.method = 'DELETE' self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request)) def test_all(self): request = HttpRequest() request.user = self.user request.user.user_permissions.add(self.add) request.user.user_permissions.add(self.change) request.user.user_permissions.add(self.delete) for method in ('GET', 'OPTIONS', 'HEAD', 'POST', 'PUT', 'DELETE', 'PATCH'): request.method = method self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request)) def test_not_a_model(self): request = HttpRequest() request.user = self.user # give add permission request.user.user_permissions.add(self.add) request.method = 'POST' self.assertTrue(NotAModelResource()._meta.authorization.is_authorized(request)) def test_patch_perms(self): request = HttpRequest() request.user = self.user request.method = 'PATCH' # Not enough. request.user.user_permissions.add(self.add) request.user.refresh_from_db() self.assertFalse(DjangoNoteResource()._meta.authorization.is_authorized(request)) # Still not enough. request.user.user_permissions.add(self.change) request.user.refresh_from_db() self.assertFalse(DjangoNoteResource()._meta.authorization.is_authorized(request)) # Much better. request.user.user_permissions.add(self.delete) request.user = User.objects.get(pk=self.user.pk) self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request)) def test_unrecognized_method(self): request = HttpRequest() request.user = self.user # Check a non-existent HTTP method. request.method = 'EXPLODE' self.assertFalse(DjangoNoteResource()._meta.authorization.is_authorized(request))
nilq/baby-python
python
from django.core.exceptions import ValidationError from pulpo_forms.fieldtypes.Field import Field from pulpo_forms.statistics.ListStatistics import ListStatistics class ListField(Field): """ List field validator, render and analize methods """ def get_methods(self, **kwargs): base = super(ListField, self).get_methods(**kwargs) base.append(self.belong_check) return base def belong_check(self, value, **kwargs): v = int(value) opt = kwargs['options'] l = [] for o in opt: l.append(o['id']) if v not in l: raise ValidationError("Invalid value, not among options.") def check_consistency(self, field): options = field.options if (options == []): raise ValidationError("List fields need at least one option.") def get_option_labels(self, field): return field["options"] def get_statistics(self, data_list, field): options = self.get_option_labels(field) list_statistics = ListStatistics(data_list, options) statistics = super(ListField, self).get_statistics(data_list, field) statistics.update(list_statistics.getSerializedData()) return statistics def get_options(self, json, f_id): for page in json['pages']: for field in page['fields']: if (field['field_id'] == f_id): return field['options'] class Meta: abstract = True
nilq/baby-python
python
from sqlalchemy import Column, Integer, Float, String, Date, Time from shared.core.db import Base class HourlyMainData(Base): __tablename__ = 'HourlyMain' Id = Column(Integer, primary_key=True, nullable=False) StationId = Column(Integer, nullable=False) Date = Column(Date, nullable=False) Hour = Column(Time, nullable=False) HlyAirTmp = Column(Float, nullable=True) HlyAirTmpQc = Column(String(50), nullable=True, default='') HlyAirTmpUnits = Column(String(50), nullable=True, default='') HlyDewPnt = Column(Float, nullable=True) HlyDewPntQc = Column(String(50), nullable=True, default='') HlyDewPntUnits = Column(String(50), nullable=True, default='') HlyEto = Column(Float, nullable=True) HlyEtoQc = Column(String(50), nullable=True, default='') HlyEtoUnits = Column(String(50), nullable=True, default='') HlyNetRad = Column(Float, nullable=True) HlyNetRadQc = Column(String(50), nullable=True, default='') HlyNetRadUnits = Column(String(50), nullable=True, default='') HlyAsceEto = Column(Float, nullable=True) HlyAsceEtoQc = Column(String(50), nullable=True, default='') HlyAsceEtoUnits = Column(String(50), nullable=True, default='') HlyAsceEtr = Column(Float, nullable=True) HlyAsceEtrQc = Column(String(50), nullable=True, default='') HlyAsceEtrUnits = Column(String(50), nullable=True, default='') HlyPrecip = Column(Float, nullable=True) HlyPrecipQc = Column(String(50), nullable=True, default='') HlyPrecipUnits = Column(String(50), nullable=True, default='') HlyRelHum = Column(Float, nullable=True) HlyRelHumQc = Column(String(50), nullable=True, default='') HlyRelHumUnits = Column(String(50), nullable=True, default='') HlyResWind = Column(Float, nullable=True) HlyResWindQc = Column(String(50), nullable=True, default='') HlyResWindUnits = Column(String(50), nullable=True, default='') HlySoilTmp = Column(Float, nullable=True) HlySoilTmpQc = Column(String(50), nullable=True, default='') HlySoilTmpUnits = Column(String(50), nullable=True, default='') HlySolRad = Column(Float, nullable=True) HlySolRadQc = Column(String(50), nullable=True, default='') HlySolRadUnits = Column(String(50), nullable=True, default='') HlyVapPres = Column(Float, nullable=True) HlyVapPresQc = Column(String(50), nullable=True, default='') HlyVapPresUnits = Column(String(50), nullable=True, default='') HlyWindDir = Column(Float, nullable=True) HlyWindDirQc = Column(String(50), nullable=True, default='') HlyWindDirUnits = Column(String(50), nullable=True, default='') HlyWindSpd = Column(Float, nullable=True) HlyWindSpdQc = Column(String(50), nullable=True, default='') HlyWindSpdUnits = Column(String(50), nullable=True, default='') class DailyMainData(Base): __tablename__ = 'DailyMain' Id = Column(Integer, primary_key=True, nullable=False) StationId = Column(Integer, nullable=False) Date = Column(Date, nullable=False) DayAirTmpAvg = Column(Float, nullable=True) DayAirTmpAvgQc = Column(String(50), nullable=True, default='') DayAirTmpAvgUnits = Column(String(50), nullable=True, default='') DayAirTmpMax = Column(Float, nullable=True) DayAirTmpMaxQc = Column(String(50), nullable=True, default='') DayAirTmpMaxUnits = Column(String(50), nullable=True, default='') DayAirTmpMin = Column(Float, nullable=True) DayAirTmpMinQc = Column(String(50), nullable=True, default='') DayAirTmpMinUnits = Column(String(50), nullable=True, default='') DayDewPnt = Column(Float, nullable=True) DayDewPntQc = Column(String(50), nullable=True, default='') DayDewPntUnits = Column(String(50), nullable=True, default='') DayEto = Column(Float, nullable=True) DayEtoQc = Column(String(50), nullable=True, default='') DayEtoUnits = Column(String(50), nullable=True, default='') DayAsceEto = Column(Float, nullable=True) DayAsceEtoQc = Column(String(50), nullable=True, default='') DayAsceEtoUnits = Column(String(50), nullable=True, default='') DayAsceEtr = Column(Float, nullable=True) DayAsceEtrQc = Column(String(50), nullable=True, default='') DayAsceEtrUnits = Column(String(50), nullable=True, default='') DayPrecip = Column(Float, nullable=True) DayPrecipQc = Column(String(50), nullable=True, default='') DayPrecipUnits = Column(String(50), nullable=True, default='') DayRelHumAvg = Column(Float, nullable=True) DayRelHumAvgQc = Column(String(50), nullable=True, default='') DayRelHumAvgUnits = Column(String(50), nullable=True, default='') DayRelHumMax = Column(Float, nullable=True) DayRelHumMaxQc = Column(String(50), nullable=True, default='') DayRelHumMaxUnits = Column(String(50), nullable=True, default='') DayRelHumMin = Column(Float, nullable=True) DayRelHumMinQc = Column(String(50), nullable=True, default='') DayRelHumMinUnits = Column(String(50), nullable=True, default='') DaySoilTmpAvg = Column(Float, nullable=True) DaySoilTmpAvgQc = Column(String(50), nullable=True, default='') DaySoilTmpAvgUnits = Column(String(50), nullable=True, default='') DaySoilTmpMax = Column(Float, nullable=True) DaySoilTmpMaxQc = Column(String(50), nullable=True, default='') DaySoilTmpMaxUnits = Column(String(50), nullable=True, default='') DaySoilTmpMin = Column(Float, nullable=True) DaySoilTmpMinQc = Column(String(50), nullable=True, default='') DaySoilTmpMinUnits = Column(String(50), nullable=True, default='') DaySolRadAvg = Column(Float, nullable=True) DaySolRadAvgQc = Column(String(50), nullable=True, default='') DaySolRadAvgUnits = Column(String(50), nullable=True, default='') DaySolRadNet = Column(Float, nullable=True) DaySolRadNetQc = Column(String(50), nullable=True, default='') DaySolRadNetUnits = Column(String(50), nullable=True, default='') DayVapPresAvg = Column(Float, nullable=True) DayVapPresAvgQc = Column(String(50), nullable=True, default='') DayVapPresAvgUnits = Column(String(50), nullable=True, default='') DayVapPresMax = Column(Float, nullable=True) DayVapPresMaxQc = Column(String(50), nullable=True, default='') DayVapPresMaxUnits = Column(String(50), nullable=True, default='') DayWindEne = Column(Float, nullable=True) DayWindEneQc = Column(String(50), nullable=True, default='') DayWindEneUnits = Column(String(50), nullable=True, default='') DayWindEse = Column(Float, nullable=True) DayWindEseQc = Column(String(50), nullable=True, default='') DayWindEseUnits = Column(String(50), nullable=True, default='') DayWindNne = Column(Float, nullable=True) DayWindNneQc = Column(String(50), nullable=True, default='') DayWindNneUnits = Column(String(50), nullable=True, default='') DayWindNnw = Column(Float, nullable=True) DayWindNnwQc = Column(String(50), nullable=True, default='') DayWindNnwUnits = Column(String(50), nullable=True, default='') DayWindRun = Column(Float, nullable=True) DayWindRunQc = Column(String(50), nullable=True, default='') DayWindRunUnits = Column(String(50), nullable=True, default='') DayWindSpdAvg = Column(Float, nullable=True) DayWindSpdAvgQc = Column(String(50), nullable=True, default='') DayWindSpdAvgUnits = Column(String(50), nullable=True, default='') DayWindSsw = Column(Float, nullable=True) DayWindSswQc = Column(String(50), nullable=True, default='') DayWindSswUnits = Column(String(50), nullable=True, default='') DayWindSse = Column(Float, nullable=True) DayWindSseQc = Column(String(50), nullable=True, default='') DayWindSseUnits = Column(String(50), nullable=True, default='') DayWindWnw = Column(Float, nullable=True) DayWindWnwQc = Column(String(50), nullable=True, default='') DayWindWnwUnits = Column(String(50), nullable=True, default='') DayWindWsw = Column(Float, nullable=True) DayWindWswQc = Column(String(50), nullable=True, default='') DayWindWswUnits = Column(String(50), nullable=True, default='')
nilq/baby-python
python
#!/usr/bin/env python from __future__ import division from __future__ import print_function from builtins import range from past.utils import old_div import sys from forcebalance.molecule import * # Script to generate virtual sites and rename atoms in .gro file. M = Molecule(sys.argv[1]) if 'M' in M.elem: print("Virtual sites already exist") sys.exit() num_mol = int(M.na/3) for i in range(num_mol)[::-1]: v = i*3 + 3 M.add_virtual_site(v, resid=i+1, elem='M', atomname='MW', resname='SOL', pos=i*3) M.replace_peratom('resname', 'HOH','SOL') M.replace_peratom_conditional('resname', 'SOL', 'atomname', 'H1', 'HW1') M.replace_peratom_conditional('resname', 'SOL', 'atomname', 'H2', 'HW2') M.replace_peratom_conditional('resname', 'SOL', 'atomname', 'O', 'OW') M.write('new.gro')
nilq/baby-python
python
from django.utils import timezone from django.conf import settings import datetime from rest_framework_jwt.settings import api_settings expires_delta = (api_settings.JWT_REFRESH_EXPIRATION_DELTA) - datetime.timedelta(seconds=200) def jwt_response_handler(token, user=None, request=None): return { 'token': token, 'user': user.username, 'expires': timezone.now() + expires_delta }
nilq/baby-python
python
import zeit.cms.testing import zeit.content.article.testing def test_suite(): return zeit.cms.testing.FunctionalDocFileSuite( 'edit.landing.txt', 'edit.txt', 'edit.form.txt', package='zeit.content.article.edit.browser', layer=zeit.content.article.testing.WSGI_LAYER)
nilq/baby-python
python
"""Auxiliar functions that may be used in most modules""" from typing import List import numpy as np def compute_permutation_distance( distance_matrix: np.ndarray, permutation: List[int] ) -> float: """Compute the total route distance of a given permutation Parameters ---------- distance_matrix Distance matrix of shape (n x n) with the (i, j) entry indicating the distance from node i to j. It does not need to be symmetric permutation A list with nodes from 0 to n - 1 in any order Returns ------- Total distance of the path given in ``permutation`` for the provided ``distance_matrix`` Notes ----- Suppose the permutation [0, 1, 2, 3], with four nodes. The total distance of this path will be from 0 to 1, 1 to 2, 2 to 3, and 3 back to 0. This can be fetched from a distance matrix using: distance_matrix[ind1, ind2], where ind1 = [0, 1, 2, 3] # the FROM nodes ind2 = [1, 2, 3, 0] # the TO nodes This can easily be generalized to any permutation by using ind1 as the given permutation, and moving the first node to the end to generate ind2. """ ind1 = permutation ind2 = permutation[1:] + permutation[:1] return distance_matrix[ind1, ind2].sum()
nilq/baby-python
python
''' test_var = False if(test_var == True): print("okay") else: print("this is not true") number_a = 500.6 number_b = 100.4 if(number_a > number_b): print(number_a,"is bigger than",number_b) else: print(number_b,"is bigger than",number_a) # name = input("what's your name? ") # print("your name is",name) def Multiply(num1,num2): result = num1*num2 return result temp = Multiply(2,5) print(">>",temp) ''' text = float("1665.5") print(text*52.5) action = input("?") num1 = float(input('number 1')) num2 = float(input('number 2')) result = 0 if(action == '+'): result = num1+num2 elif(action == '*'): result = num1*num2 else: print('this not a number') print(result) number = 515.5 text = "1235.2" number += float(text) text += str(number)
nilq/baby-python
python
#!/usr/bin/env python """Example script""" from __future__ import division, print_function import random import time from simanneal import Annealer import click import numpy as np import rasterio from rasterio.plot import reshape_as_image from rio_color.operations import parse_operations from rio_color.utils import to_math_type def time_string(seconds): """Returns time in seconds as a string formatted HHHH:MM:SS.""" s = int(round(seconds)) # round to nearest second h, s = divmod(s, 3600) # get hours and remainder m, s = divmod(s, 60) # split remainder into minutes and seconds return "%2i:%02i:%02i" % (h, m, s) def progress_report( curr, best, curr_score, best_score, step, totalsteps, accept, improv, elaps, remain ): """Report progress""" text = """ Current Formula {curr} (hist distance {curr_score}) Best Formula {best} (hist distance {best_score}) Step {step} of {totalsteps} Acceptance Rate : {accept} % Improvement Rate: {improv} % Time {elaps} ( {remain} Remaing)""".format( **locals() ) return text # Plot globals fig = None txt = None imgs = [] class ColorEstimator(Annealer): """Optimizes color using simulated annealing""" keys = "gamma_red,gamma_green,gamma_blue,contrast".split(",") def __init__(self, source, reference, state=None): """Create a new instance""" self.src = source.copy() self.ref = reference.copy() if not state: params = dict(gamma_red=1.0, gamma_green=1.0, gamma_blue=1.0, contrast=10) else: if self._validate(state): params = state else: raise ValueError("invalid state") super(ColorEstimator, self).__init__(params) def validate(self): """Validate keys.""" # todo validate values bt 0..1 for k in self.keys: if k not in self.state: return False def move(self): """Create a state change.""" k = random.choice(self.keys) multiplier = random.choice((0.95, 1.05)) invalid_key = True while invalid_key: # make sure bias doesn't exceed 1.0 if k == "bias": if self.state[k] > 0.909: k = random.choice(self.keys) continue invalid_key = False newval = self.state[k] * multiplier self.state[k] = newval def cmd(self, state): """Get color formula representation of the state.""" ops = ( "gamma r {gamma_red:.2f}, gamma g {gamma_green:.2f}, gamma b {gamma_blue:.2f}, " "sigmoidal rgb {contrast:.2f} 0.5".format(**state) ) return ops def apply_color(self, arr, state): """Apply color formula to an array.""" ops = self.cmd(state) for func in parse_operations(ops): arr = func(arr) return arr def energy(self): """Calculate state's energy.""" arr = self.src.copy() arr = self.apply_color(arr, self.state) scores = [histogram_distance(self.ref[i], arr[i]) for i in range(3)] # Important: scale by 100 for readability return sum(scores) * 100 def to_dict(self): """Serialize as a dict.""" return dict(best=self.best_state, current=self.state) def update(self, step, T, E, acceptance, improvement): """Print progress.""" if acceptance is None: acceptance = 0 if improvement is None: improvement = 0 if step > 0: elapsed = time.time() - self.start remain = (self.steps - step) * (elapsed / step) # print('Time {} ({} Remaing)'.format(time_string(elapsed), time_string(remain))) else: elapsed = 0 remain = 0 curr = self.cmd(self.state) curr_score = float(E) best = self.cmd(self.best_state) best_score = self.best_energy report = progress_report( curr, best, curr_score, best_score, step, self.steps, acceptance * 100, improvement * 100, time_string(elapsed), time_string(remain), ) print(report) if fig: imgs[1].set_data( reshape_as_image(self.apply_color(self.src.copy(), self.state)) ) imgs[2].set_data( reshape_as_image(self.apply_color(self.src.copy(), self.best_state)) ) if txt: txt.set_text(report) fig.canvas.draw() def histogram_distance(arr1, arr2, bins=None): """ This function returns the sum of the squared error Parameters: two arrays constrained to 0..1 Returns: sum of the squared error between the histograms """ eps = 1e-6 assert arr1.min() > 0 - eps assert arr1.max() < 1 + eps assert arr2.min() > 0 - eps assert arr2.max() < 1 + eps if not bins: bins = [x / 10 for x in range(11)] hist1 = np.histogram(arr1, bins=bins)[0] / arr1.size hist2 = np.histogram(arr2, bins=bins)[0] / arr2.size assert abs(hist1.sum() - 1.0) < eps assert abs(hist2.sum() - 1.0) < eps sqerr = (hist1 - hist2) ** 2 return sqerr.sum() def calc_downsample(w, h, target=400): """Calculate downsampling value.""" if w > h: return h / target elif h >= w: return w / target @click.command() @click.argument("source") @click.argument("reference") @click.option("--downsample", "-d", type=int, default=None) @click.option("--steps", "-s", type=int, default=5000) @click.option("--plot/--no-plot", default=True) def main(source, reference, downsample, steps, plot): """Given a source image and a reference image, Find the rio color formula which results in an output with similar histogram to the reference image. Uses simulated annealing to determine optimal settings. Increase the --downsample option to speed things up. Increase the --steps to get better results (longer runtime). """ global fig, txt, imgs click.echo("Reading source data...", err=True) with rasterio.open(source) as src: if downsample is None: ratio = calc_downsample(src.width, src.height) else: ratio = downsample w = int(src.width // ratio) h = int(src.height // ratio) rgb = src.read((1, 2, 3), out_shape=(3, h, w)) orig_rgb = to_math_type(rgb) click.echo("Reading reference data...", err=True) with rasterio.open(reference) as ref: if downsample is None: ratio = calc_downsample(ref.width, ref.height) else: ratio = downsample w = int(ref.width / ratio) h = int(ref.height / ratio) rgb = ref.read((1, 2, 3), out_shape=(3, h, w)) ref_rgb = to_math_type(rgb) click.echo("Annealing...", err=True) est = ColorEstimator(orig_rgb, ref_rgb) if plot: import matplotlib.pyplot as plt fig = plt.figure(figsize=(20, 10)) fig.suptitle("Color Formula Optimization", fontsize=18, fontweight="bold") txt = fig.text(0.02, 0.05, "foo", family="monospace", fontsize=16) type(txt) axs = ( fig.add_subplot(1, 4, 1), fig.add_subplot(1, 4, 2), fig.add_subplot(1, 4, 3), fig.add_subplot(1, 4, 4), ) fig.tight_layout() axs[0].set_title("Source") axs[1].set_title("Current Formula") axs[2].set_title("Best Formula") axs[3].set_title("Reference") imgs.append(axs[0].imshow(reshape_as_image(est.src))) imgs.append(axs[1].imshow(reshape_as_image(est.src))) imgs.append(axs[2].imshow(reshape_as_image(est.src))) imgs.append(axs[3].imshow(reshape_as_image(est.ref))) fig.show() schedule = dict( tmax=25.0, # Max (starting) temperature tmin=1e-4, # Min (ending) temperature steps=steps, # Number of iterations updates=steps / 20, # Number of updates ) est.set_schedule(schedule) est.save_state_on_exit = False optimal, score = est.anneal() optimal["energy"] = score ops = est.cmd(optimal) click.echo("rio color -j4 {} {} {}".format(source, "/tmp/output.tif", ops)) if __name__ == "__main__": main()
nilq/baby-python
python
class Card: def __init__(self, card_type): """ card_type 0 is a skipbo card card_type 1-12 are the normal value cards actual value indicates the value a skipbo card takes on after it is played """ self.card_type = card_type self.actual_value = card_type if card_type > 0 else 0
nilq/baby-python
python
import numpy as np from lagom.envs.spaces import Box from lagom.envs.wrappers import ObservationWrapper class PartialFlattenDict(ObservationWrapper): """ Returns flattened observation from a dictionary space with partial keys into a Box space. """ def __init__(self, env, keys): super().__init__(env) self.keys = keys spaces = self.env.observation_space.spaces assert all([isinstance(space, Box) for space in spaces.values()]) # enforce all Box spaces # Calculate dimensionality shape = (int(np.sum([spaces[key].flat_dim for key in self.keys])), ) self._observation_space = Box(low=-np.inf, high=np.inf, shape=shape, dtype=np.float32) def process_observation(self, observation): return np.concatenate([observation[key].ravel() for key in self.keys]) @property def observation_space(self): return self._observation_space
nilq/baby-python
python
# # Copyright 2022 Red Hat Inc. # SPDX-License-Identifier: Apache-2.0 # import environ ROOT_DIR = environ.Path(__file__) - 3 ENVIRONMENT = environ.Env() if ENVIRONMENT.bool("DJANGO_READ_DOT_ENV_FILE", default=False): # Operating System Environment variables have precedence over variables # defined in the .env file, that is to say variables from the .env files # will only be used if not defined as environment variables. ENV_FILE = str(ROOT_DIR.path(".env")) print(f"Loading : {ENV_FILE}") ENVIRONMENT.read_env(ENV_FILE) print("The .env file has been loaded.")
nilq/baby-python
python
# coding: utf-8 # # Performance of various Machine Learning Algorithms on Electrical Impedance Tomography Images # # ## Copyright (c) 2018, Faststream Technologies # # ## Author: Sudhanva Narayana # In[1]: import numpy as np import pandas as pd import matplotlib.pyplot as plt import os from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report CURR_DIR = os.path.dirname(os.path.abspath('__file__')) PARENT_DIR = os.path.abspath(os.path.join(CURR_DIR, os.pardir)) df = pd.read_csv(PARENT_DIR + '\\assets\\datasets\\eit_data.csv', index_col=[0], header = [0], skiprows= [1] ,skipinitialspace=True) X = df.loc[:, ['gray', 'violet', 'blue', 'green', 'yellow', 'orange', 'red', 'brown']].values.astype(float) y = df.loc[:, ['target']].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0) y_train = y_train.ravel() sc_X = StandardScaler() X_train = sc_X.fit_transform(X_train) X_test = sc_X.transform(X_test) # ### Classifiers # In[2]: classifiers = {} # ### KNN # In[3]: classifier = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifiers['knn'] = classifier.score(X_test, y_test) print(classifier.score(X_test, y_test)) # ### Decision Tree # In[4]: classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifiers['desicion_tree'] = classifier.score(X_test, y_test) print(classifier.score(X_test, y_test)) # ### Kernal SVM # In[5]: classifier = SVC(kernel='rbf', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifiers['kernal_svm'] = classifier.score(X_test, y_test) print(classifier.score(X_test, y_test)) # ### Logistic Regression # In[6]: classifier = LogisticRegression(random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifiers['logistic_regression'] = classifier.score(X_test, y_test) print(classifier.score(X_test, y_test)) # ### Naive Bayes # In[7]: classifier = GaussianNB() classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifiers['naive_bayes'] = classifier.score(X_test, y_test) print(classifier.score(X_test, y_test)) # ### Random Forest # In[8]: classifier = RandomForestClassifier(n_estimators=10, criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifiers['random_forest'] = classifier.score(X_test, y_test) print(classifier.score(X_test, y_test)) # ### Support Vector Machines # In[9]: classifier = SVC(kernel='linear', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifiers['svm'] = classifier.score(X_test, y_test) print(classifier.score(X_test, y_test)) # In[10]: print(classifiers) # In[11]: values = list(classifiers.values()) labels = list(classifiers.keys()) values = [round(i * 100, 2) for i in values] # print(values) # print(labels) index = np.arange(len(labels)) # In[12]: plt.figure(figsize=(15,10)) plt.bar(index, values) plt.xlabel('Machine Learning Algorithms', fontsize=20) plt.ylabel('Performance (%)', fontsize=20) plt.xticks(index, labels, rotation=30, fontsize=15) plt.yticks(fontsize=20) plt.title('Performance of Machine Learning algorithms on EIT Images', fontsize=20) plt.show() # In[13]: plt.figure(figsize=(15,10)) plt.plot(index, values) plt.xlabel('Machine Learning Algorithms', fontsize=20) plt.ylabel('Performance (%)', fontsize=20) plt.xticks(index, labels, rotation=30, fontsize=15) plt.yticks(fontsize=20) plt.title('Performance of Machine Learning algorithms on EIT Images', fontsize=20) plt.show()
nilq/baby-python
python
import os import numpy as np import torch import nibabel as nib from glob import glob import scipy.io import random from PIL import Image import elastic_transform as elt from torch.utils.data import Dataset import torchvision.transforms.functional as F def load_nifty(full_file_name): img = nib.load(full_file_name) #dtype = img.get_data_dtype() # F8 is 64-bit floating-point Number data = img.get_fdata() return data def tensor_2_numpy_image(tensor): img_out = np.moveaxis(tensor.numpy()[:,:,:].squeeze(), 0, -1) return img_out def to_img(batch_of_images): img = batch_of_images[0] img = tensor_2_numpy_image(img) img-=np.min(img[:]) img *= 255.0/img.max() img = img.astype(np.uint8) return img def cvt1to3channels(one_channel): return np.stack((one_channel,)*3, axis=-1) def load_dataset(src_path, mask_path, validation_portion=0.05): # 1- set the paths src_format = 'mat' mask_format = 'nii' src_file_format = '*.{}'.format(src_format) mask_file_format = '*.{}'.format(mask_format) all_src_img = glob(os.path.join(src_path,src_file_format)) all_mask_img = glob(os.path.join(mask_path,mask_file_format)) all_src_img.sort() all_mask_img.sort() # 2- Find the matching pairs src_msk_file_pair_list = [] for i,src_f in enumerate(all_src_img): base_src_name = os.path.basename(src_f) base_src_name = base_src_name.split('.')[0] src_id1, src_id2 = base_src_name.split('_')[1:3] for j,msk_f in enumerate(all_mask_img): base_msk_name = os.path.basename(msk_f) base_msk_name = base_msk_name.split('.')[0] msk_id1, msk_id2 = base_msk_name.split('_')[1:3] if src_id1 == msk_id1 and src_id2 == msk_id2: src_msk_file_pair_list.append([src_f,msk_f]) # 3- load every single frame and stores it into a list src = [] msk = [] for i in range(len(src_msk_file_pair_list)): src_f, msk_f = src_msk_file_pair_list[i] mat = scipy.io.loadmat(src_f) if 'N' in mat: src_mat = mat["N"] msk_mat = load_nifty(msk_f) for j in range(min(src_mat.shape[2], msk_mat.shape[2])): src.append(np.uint8(src_mat[:,:,j])) msk.append(np.uint8(msk_mat[:,:,j])) src = np.array(src) msk = np.array(msk) validation_size = int(len(src) * validation_portion) train_size = len(src)-validation_size src_train, src_val = np.split(src, [train_size]) msk_train, msk_val = np.split(msk, [train_size]) return src_train, msk_train, src_val, msk_val # 5- Define Dataset model class MouseMRIDS(Dataset): def __init__(self, src, msk, transform = None, augmentation=True): self.src = src self.msk = msk indices_with_problem = np.where(np.logical_and( np.min(self.msk, axis=(1,2)) == 0, np.max(self.msk, axis=(1,2)) == 1) == False) if len(indices_with_problem) > 0: for i in indices_with_problem: self.src = np.delete(self.src, i, axis=0) self.msk = np.delete(self.msk, i, axis=0) self.transform = transform self.augmentation = augmentation def __len__(self): return len(self.src) def __getitem__(self,idx): if random.random() > 0.5 or not self.augmentation: src_img = self.src[idx] msk_img = self.msk[idx] else: src_img, msk_img = elt.get_elastic_transforms(self.src[idx], self.msk[idx]) src_im = Image.fromarray(np.uint8(cvt1to3channels(src_img))) msk_im = Image.fromarray(np.uint8(cvt1to3channels(msk_img))) # Apply the same trasnformation to the two images if self.transform: if random.random() > 0.5 and self.augmentation: src_im = F.vflip(src_im) msk_im = F.vflip(msk_im) if random.random() > 0.5 and self.augmentation: src_im = F.hflip(src_im) msk_im = F.hflip(msk_im) if random.random() > 0.5 and self.augmentation: angle=np.random.choice([90,180,270]) src_im = F.rotate(src_im,angle) msk_im = F.rotate(msk_im,angle) src_im = self.transform(src_im) msk_im = self.transform(msk_im) msk_im = (msk_im - torch.min(msk_im)) / torch.max(msk_im) return src_im,\ msk_im[1,:,:].expand(1,-1,-1).type(torch.float)
nilq/baby-python
python
import functools from typing import Type, Generic, TypeVar, Dict, Any, Optional from drf_yasg.utils import swagger_auto_schema from rest_framework.request import Request from rest_framework.response import Response from rest_framework_dataclasses.serializers import DataclassSerializer from thairod.utils.decorators import swagger_example T = TypeVar('T') class TGSerializer(DataclassSerializer[T], Generic[T]): def parse_request(cls, request: Request) -> T: raise NotImplementedError() class AutoSerialize: def to_response(self) -> Response: return Response(self.to_data()) def to_data(self) -> Dict[str, Any]: return self.__class__.serializer()(self).data @classmethod def from_data(cls: Type[T], data: Dict[str, Any]) -> T: ser = cls.serializer()(data=data) ser.is_valid(raise_exception=True) return ser.save() @classmethod def from_get_request(cls: Type[T], request: Request) -> T: return cls.from_data(request.query_params.dict()) @classmethod def from_post_request(cls: Type[T], request: Request) -> T: return cls.from_data(request.data) @classmethod @functools.lru_cache def serializer(cls: Type[T]) -> Type[TGSerializer[T]]: class Serializer(TGSerializer[cls]): class Meta: dataclass = cls ref_name = cls.__name__ @classmethod def parse_request(cls, request: Request) -> T: ser = cls(data=request.data) ser.is_valid(raise_exception=True) return ser.save() if hasattr(cls, 'example') and callable(cls.example): return swagger_example(cls.example())(Serializer) else: return Serializer def swagger_auto_serialize_schema(body_type: Optional[Type[AutoSerialize]], response_type: Type[AutoSerialize], **kwds): return swagger_auto_schema( request_body=body_type.serializer() if body_type is not None else None, responses={200: response_type.serializer()}, **kwds )
nilq/baby-python
python
"""A config store holds the configuration data for running system-of-systems models with smif: - model runs - system-of-systems models - model definitions - strategies - scenarios and scenario variants - narratives """ from abc import ABCMeta, abstractmethod class ConfigStore(metaclass=ABCMeta): """A ConfigStore must implement each of the abstract methods defined in this interface """ # region Model runs @abstractmethod def read_model_runs(self): """Read all system-of-system model runs Returns ------- list[~smif.controller.modelrun.ModelRun] """ @abstractmethod def read_model_run(self, model_run_name): """Read a system-of-system model run Parameters ---------- model_run_name : str Returns ------- ~smif.controller.modelrun.ModelRun """ @abstractmethod def write_model_run(self, model_run): """Write system-of-system model run Parameters ---------- model_run : ~smif.controller.modelrun.ModelRun """ @abstractmethod def update_model_run(self, model_run_name, model_run): """Update system-of-system model run Parameters ---------- model_run_name : str model_run : ~smif.controller.modelrun.ModelRun """ @abstractmethod def delete_model_run(self, model_run_name): """Delete a system-of-system model run Parameters ---------- model_run_name : str """ # endregion # region System-of-systems models @abstractmethod def read_sos_models(self): """Read all system-of-system models Returns ------- list[~smif.model.sos_model.SosModel] """ @abstractmethod def read_sos_model(self, sos_model_name): """Read a specific system-of-system model Parameters ---------- sos_model_name : str Returns ------- ~smif.model.sos_model.SosModel """ @abstractmethod def write_sos_model(self, sos_model): """Write system-of-system model Parameters ---------- sos_model : ~smif.model.sos_model.SosModel """ @abstractmethod def update_sos_model(self, sos_model_name, sos_model): """Update system-of-system model Parameters ---------- sos_model_name : str sos_model : ~smif.model.sos_model.SosModel """ @abstractmethod def delete_sos_model(self, sos_model_name): """Delete a system-of-system model Parameters ---------- sos_model_name : str """ # endregion # region Models @abstractmethod def read_models(self): """Read all models Returns ------- list[~smif.model.Model] """ @abstractmethod def read_model(self, model_name): """Read a model Parameters ---------- model_name : str Returns ------- ~smif.model.Model """ @abstractmethod def write_model(self, model): """Write a model Parameters ---------- model : ~smif.model.Model """ @abstractmethod def update_model(self, model_name, model): """Update a model Parameters ---------- model_name : str model : ~smif.model.Model """ @abstractmethod def delete_model(self, model_name): """Delete a model Parameters ---------- model_name : str """ # endregion # region Scenarios @abstractmethod def read_scenarios(self): """Read scenarios Returns ------- list[~smif.model.ScenarioModel] """ @abstractmethod def read_scenario(self, scenario_name): """Read a scenario Parameters ---------- scenario_name : str Returns ------- ~smif.model.ScenarioModel """ @abstractmethod def write_scenario(self, scenario): """Write scenario Parameters ---------- scenario : ~smif.model.ScenarioModel """ @abstractmethod def update_scenario(self, scenario_name, scenario): """Update scenario Parameters ---------- scenario_name : str scenario : ~smif.model.ScenarioModel """ @abstractmethod def delete_scenario(self, scenario_name): """Delete scenario from project configuration Parameters ---------- scenario_name : str """ # endregion # region Scenario Variants @abstractmethod def read_scenario_variants(self, scenario_name): """Read variants of a given scenario Parameters ---------- scenario_name : str Returns ------- list[dict] """ @abstractmethod def read_scenario_variant(self, scenario_name, variant_name): """Read a scenario variant Parameters ---------- scenario_name : str variant_name : str Returns ------- dict """ @abstractmethod def write_scenario_variant(self, scenario_name, variant): """Write scenario to project configuration Parameters ---------- scenario_name : str variant : dict """ @abstractmethod def update_scenario_variant(self, scenario_name, variant_name, variant): """Update scenario to project configuration Parameters ---------- scenario_name : str variant_name : str variant : dict """ @abstractmethod def delete_scenario_variant(self, scenario_name, variant_name): """Delete scenario from project configuration Parameters ---------- scenario_name : str variant_name : str """ # endregion # region Narratives @abstractmethod def read_narrative(self, sos_model_name, narrative_name): """Read narrative from sos_model Parameters ---------- sos_model_name : str narrative_name : str """ # endregion # region Strategies @abstractmethod def read_strategies(self, modelrun_name): """Read strategies for a given model run Parameters ---------- model_run_name : str Returns ------- list[dict] """ @abstractmethod def write_strategies(self, modelrun_name, strategies): """Write strategies for a given model_run Parameters ---------- model_run_name : str strategies : list[dict] """ # endregion
nilq/baby-python
python
# Copyright (C) 2018 Intel Corporation # # SPDX-License-Identifier: MIT import os path_prefix = os.path.join('cvat', 'apps', 'annotation') BUILTIN_FORMATS = ( os.path.join(path_prefix, 'cvat.py'), os.path.join(path_prefix, 'pascal_voc.py'), os.path.join(path_prefix, 'yolo.py'), os.path.join(path_prefix, 'coco.py'), os.path.join(path_prefix, 'mask.py'), os.path.join(path_prefix, 'tfrecord.py'), )
nilq/baby-python
python
dataFile = open("Day10_Data.txt") asteroidCoordinates =[] for corY,line in enumerate(dataFile): for corX,c in enumerate(line): if(c=="#"): asteroidCoordinates.append([corX,corY]) curMax=0 astBaseX = astBaseY =0 for astBase in range( len(asteroidCoordinates)): seenDivisionsR = [] seenDivisionsL =[] seenAsteroids =[] foundRight = foundLeft = foundUp = foundDown = False foundCount =0 for observeAst in range(len(asteroidCoordinates)): if(asteroidCoordinates[astBase][0] >= asteroidCoordinates[observeAst][0]): if(astBase == observeAst): continue asteroidX = asteroidCoordinates[observeAst][0] asteroidY = asteroidCoordinates[observeAst][1] xDifference = asteroidCoordinates[astBase][0] - asteroidX yDifference = asteroidCoordinates[astBase][1] - asteroidY if(yDifference==0): if(xDifference>0): if(not foundLeft): foundLeft = True seenAsteroids.append([asteroidX, asteroidY]) else: if(not foundRight): foundRight=True seenAsteroids.append([asteroidX, asteroidY]) elif (xDifference == 0): if (yDifference > 0): if(not foundUp): foundUp = True seenAsteroids.append([asteroidX, asteroidY]) else: if(not foundDown): foundDown = True seenAsteroids.append([asteroidX, asteroidY]) else: div = xDifference/yDifference if(div not in seenDivisionsR): seenDivisionsR.append(div) seenAsteroids.append([asteroidX,asteroidY]) else: if (astBase == observeAst): continue asteroidX = asteroidCoordinates[observeAst][0] asteroidY = asteroidCoordinates[observeAst][1] xDifference = asteroidCoordinates[astBase][0] - asteroidX yDifference = asteroidCoordinates[astBase][1] - asteroidY if (yDifference == 0): if (xDifference > 0): if(not foundLeft): foundLeft = True seenAsteroids.append([asteroidX, asteroidY]) else: if(not foundRight): foundRight = True seenAsteroids.append([asteroidX, asteroidY]) elif (xDifference == 0): if (yDifference > 0): if(not foundUp): foundUp = True seenAsteroids.append([asteroidX, asteroidY]) else: if(not foundDown): foundDown = True seenAsteroids.append([asteroidX, asteroidY]) else: div = xDifference / yDifference if (div not in seenDivisionsL): seenDivisionsL.append(div) seenAsteroids.append([asteroidX, asteroidY]) foundCount =len(seenAsteroids) print(foundCount,seenAsteroids) if(foundCount>curMax): astBaseX=asteroidCoordinates[astBase][0] astBaseY = asteroidCoordinates[astBase][1] curMax = foundCount print(curMax,astBaseX,astBaseY)
nilq/baby-python
python
# -*- encoding: utf-8 -*- # # # Copyright (C) 2006-2011 André Wobst <wobsta@users.sourceforge.net> # # This file is part of PyX (http://pyx.sourceforge.net/). # # PyX is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # PyX is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PyX; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # Just a quick'n'dirty ascii art (I'll do a nice PyX plot later on): # # # node1 * # | \ # | \ neighbor2 # | \ # | \ # neighbor3 |element * node3 # | / # | / # | / neighbor1 # | / # node2 * import struct, binascii, zlib, os, tempfile import bbox, canvasitem, color, pdfwriter, unit class node_pt: def __init__(self, coords_pt, value): self.coords_pt = coords_pt self.value = value class node(node_pt): def __init__(self, coords, value): node_pt.__init__(self, [unit.topt(coord) for coord in coords], value) class element: def __init__(self, nodes, neighbors=None): self.nodes = nodes self.neighbors = neighbors def coords24bit_pt(coords_pt, min_pt, max_pt): return struct.pack(">I", int((coords_pt-min_pt)*16777215.0/(max_pt-min_pt)))[1:] class PDFGenericResource(pdfwriter.PDFobject): def __init__(self, type, name, content): pdfwriter.PDFobject.__init__(self, type, name) self.content = content def write(self, file, writer, registry): file.write(self.content) class mesh(canvasitem.canvasitem): def __init__(self, elements, check=1): self.elements = elements if check: colorspacestring = "" for element in elements: if len(element.nodes) != 3: raise ValueError("triangular mesh expected") try: for node in element.nodes: if not colorspacestring: colorspacestring = node.value.colorspacestring() elif node.value.colorspacestring() != colorspacestring: raise ValueError("color space mismatch") except AttributeError: raise ValueError("gray, rgb or cmyk color values expected") for node in element.nodes: if len(node.coords_pt) != 2: raise ValueError("two dimensional coordinates expected") def bbox(self): return bbox.bbox_pt(min([node.coords_pt[0] for element in self.elements for node in element.nodes]), min([node.coords_pt[1] for element in self.elements for node in element.nodes]), max([node.coords_pt[0] for element in self.elements for node in element.nodes]), max([node.coords_pt[1] for element in self.elements for node in element.nodes])) def data(self, bbox): return "".join(["\000%s%s%s" % (coords24bit_pt(node.coords_pt[0], bbox.llx_pt, bbox.urx_pt), coords24bit_pt(node.coords_pt[1], bbox.lly_pt, bbox.ury_pt), node.value.to8bitstring()) for element in self.elements for node in element.nodes]) def processPS(self, file, writer, context, registry, bbox): if writer.mesh_as_bitmap: from pyx import bitmap, canvas import Image c = canvas.canvas() c.insert(self) i = Image.open(c.pipeGS("pngalpha", resolution=writer.mesh_as_bitmap_resolution, seekable=True)) i.load() b = bitmap.bitmap_pt(self.bbox().llx_pt, self.bbox().lly_pt, i) # we slightly shift the bitmap to re-center it, as the bitmap might contain some additional border # unfortunately we need to construct another bitmap instance for that ... b = bitmap.bitmap_pt(self.bbox().llx_pt + 0.5*(self.bbox().width_pt()-b.bbox().width_pt()), self.bbox().lly_pt + 0.5*(self.bbox().height_pt()-b.bbox().height_pt()), i) b.processPS(file, writer, context, registry, bbox) else: thisbbox = self.bbox() bbox += thisbbox file.write("""<< /ShadingType 4 /ColorSpace %s /BitsPerCoordinate 24 /BitsPerComponent 8 /BitsPerFlag 8 /Decode [%f %f %f %f %s] /DataSource currentfile /ASCIIHexDecode filter /FlateDecode filter >> shfill\n""" % (self.elements[0].nodes[0].value.colorspacestring(), thisbbox.llx_pt, thisbbox.urx_pt, thisbbox.lly_pt, thisbbox.ury_pt, " ".join(["0 1" for value in self.elements[0].nodes[0].value.to8bitstring()]))) file.write(binascii.b2a_hex(zlib.compress(self.data(thisbbox)))) file.write(">\n") def processPDF(self, file, writer, context, registry, bbox): if writer.mesh_as_bitmap: from pyx import bitmap, canvas import Image c = canvas.canvas() c.insert(self) i = Image.open(c.pipeGS("pngalpha", resolution=writer.mesh_as_bitmap_resolution, seekable=True)) i.load() b = bitmap.bitmap_pt(self.bbox().llx_pt, self.bbox().lly_pt, i) # we slightly shift the bitmap to re-center it, as the bitmap might contain some additional border # unfortunately we need to construct another bitmap instance for that ... b = bitmap.bitmap_pt(self.bbox().llx_pt + 0.5*(self.bbox().width_pt()-b.bbox().width_pt()), self.bbox().lly_pt + 0.5*(self.bbox().height_pt()-b.bbox().height_pt()), i) b.processPDF(file, writer, context, registry, bbox) else: thisbbox = self.bbox() bbox += thisbbox d = self.data(thisbbox) if writer.compress: filter = "/Filter /FlateDecode\n" d = zlib.compress(d) else: filter = "" name = "shading-%s" % id(self) shading = PDFGenericResource("shading", name, """<< /ShadingType 4 /ColorSpace %s /BitsPerCoordinate 24 /BitsPerComponent 8 /BitsPerFlag 8 /Decode [%f %f %f %f %s] /Length %i %s>> stream %s endstream\n""" % (self.elements[0].nodes[0].value.colorspacestring(), thisbbox.llx_pt, thisbbox.urx_pt, thisbbox.lly_pt, thisbbox.ury_pt, " ".join(["0 1" for value in self.elements[0].nodes[0].value.to8bitstring()]), len(d), filter, d)) registry.add(shading) registry.addresource("Shading", name, shading) file.write("/%s sh\n" % name)
nilq/baby-python
python
'''OpenGL extension ARB.transform_feedback3 This module customises the behaviour of the OpenGL.raw.GL.ARB.transform_feedback3 to provide a more Python-friendly API Overview (from the spec) This extension further extends the transform feedback capabilities provided by the EXT_transform_feedback, NV_transform_feedback, and NV_transform_feedback2 extensions. Those extensions provided a new transform feedback mode, where selected vertex attributes can be recorded to a buffer object for each primitive processed by the GL. This extension provides increased flexibility in how vertex attributes can be written to buffer objects. Previous extensions allowed applications to record a set of attributes interleaved into a single buffer object (interleaved mode) or to record into multiple objects, but with only a single attribute per buffer (separate mode). This extension extends interleaved mode to write into multiple buffers, with multiple attributes per buffer. This capability is supported for all three styles of transform feedback: - "EXT"-style GLSL transform feedback (EXT_transform_feedback), where a list of varyings is provided prior to linking a program object and is used whenever that program object is used. - "NV"-style GLSL transform feedback (NV_transform_feedback), where "locations" of active varyings are queried after linking and are then passed to a function that sets the active transform feedback varyings for the program object. Unlike the "EXT"-style mode, the set of varyings to capture can be changed without relinking. - Transform feedback for fixed-function or assembly vertex/geometry shaders (NV_transform_feedback), where applications specify a set of canonical attribute enums/numbers to capture. Additionally, this extension adds new support for multiple separate vertex streams. New geometry shader functionality provided by the ARB_gpu_shader5 and NV_gpu_program5 extensions allows geometry shaders to direct each vertex arbitrarily at a specified vertex stream. For example, a geometry program might write each "regular" vertex it emits to one vertex stream while writing some per-primitive data it computes to a second vertex stream. This extension allows applications to choose a vertex stream for each buffer object it writes to, and allows the vertices written to each vertex stream to be recorded in separate buffer objects. Only one stream may be selected for rasterization, and in the initial implementation, the geometry shader output topology must be POINTS if multiple streams are used. When geometry shaders are not used, or when an old geometry shader not writing multiple streams is used, all vertices produced by the GL are directed at the stream numbered zero. The set of transform feedback-related query targets is extended to accommodate multiple vertex streams, so it is possible to count the number of processed and recorded primitives for each stream separately. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ARB/transform_feedback3.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.ARB.transform_feedback3 import * from OpenGL.raw.GL.ARB.transform_feedback3 import _EXTENSION_NAME def glInitTransformFeedback3ARB(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) glGetQueryIndexediv=wrapper.wrapper(glGetQueryIndexediv).setOutput( 'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True ) ### END AUTOGENERATED SECTION
nilq/baby-python
python
""" Meshing: Make and plot a 3D prism mesh """ from fatiando import mesher from fatiando.vis import myv mesh = mesher.PrismMesh(bounds=(-2, 2, -3, 3, 0, 1), shape=(4,4,4)) myv.figure() plot = myv.prisms(mesh) axes = myv.axes(plot) myv.show()
nilq/baby-python
python
import click @click.command() def main(): print("This is the CLI!") if __name__ == '__main__': main()
nilq/baby-python
python
from absl import flags FLAGS = flags.FLAGS flags.DEFINE_integer('h_dim', default=32, help='Hidden dim in various models.') flags.DEFINE_integer('rnn_dim', default=256, help='RNN hidden dim.') flags.DEFINE_integer('rnn_n_layers', default=2, help='Number of layers for RNNs.') flags.DEFINE_float('rnn_drop', default=0.1, help='Dropout rate in RNNs.') flags.DEFINE_integer('n_latent', default=24, help='Latent dimension for vaes.') flags.DEFINE_integer('n_batch', default=128, help='Minibatch size to train.') flags.DEFINE_integer('visualize_every', default=10, help='Frequency of visualization.') flags.DEFINE_integer('n_iter', default=200000, help='Number of iteration to train. Might not be used if ' 'n_epoch is used.') flags.DEFINE_integer('n_epoch', default=50, help='Number of epochs to train. Might not be used if ' 'n_iter is used.') flags.DEFINE_integer('n_workers', default=4, help='Sets num workers for data loaders.') flags.DEFINE_integer('seed', default=0, help='Sets global seed.') flags.DEFINE_string("vis_root", default='vis', help='root folder for visualization and logs.') flags.DEFINE_float('decay', default=0.99, help='set learning rate value for optimizers') flags.DEFINE_float('lr', default=1e-3, help='Set learning rate for optimizers.') flags.DEFINE_bool("debug", default=False, help='Enables debug mode.') flags.DEFINE_bool('highdrop', default=False, help='Enables high dropout to encourage copy.') flags.DEFINE_bool('highdroptest', default=False, help='Applies high dropout in test as well.') flags.DEFINE_float("highdropvalue", default=0., help='High dropout value to encourage copying.') flags.DEFINE_bool('copy', default=False, help='Enable copy in seq2seq models') flags.DEFINE_string('model_path', default='', help="Model path to load a pretrained model") flags.DEFINE_bool('extract_codes', default=False, help='Extract VQVAE codes for training and test set given a ' 'pretrained vae') flags.DEFINE_bool('filter_model', default=False, help='To run filter model experiments.') flags.DEFINE_bool('test', default=False, help='Only runs evaluations.') flags.DEFINE_string('tensorboard', default=None, help='Use tensorboard for logging losses.') flags.DEFINE_bool('kl_anneal', default=False, help='Enables kl annealing.') flags.DEFINE_integer('decoder_reset', default=-1, help='Enables decoder reset for vae to prevent posterior collapse.') flags.DEFINE_string("resume", default='', help='Path to the main model to resume training') flags.DEFINE_float("gclip", default=-1, help='gradient clip') flags.DEFINE_integer("gaccum", default=1, help='gradient accumulation') flags.DEFINE_integer("warmup_steps", default=-1, help="noam warmup_steps")
nilq/baby-python
python
""" Compared with model_baseline, do not use correlation output for skip link Compared to model_baseline_fixed, added return values to test whether nsample is set reasonably. """ import tensorflow as tf import numpy as np import math import sys import os BASE_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(BASE_DIR, '../../utils')) sys.path.append(os.path.join(BASE_DIR, '../..')) sys.path.append(os.path.join(BASE_DIR, '../../tf_ops/sampling')) import tf_util from net_utils import * def placeholder_inputs(batch_size, num_point, num_frames): pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point * num_frames, 3 + 3)) labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point * num_frames)) labelweights_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point * num_frames)) masks_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point * num_frames)) return pointclouds_pl, labels_pl, labelweights_pl, masks_pl def get_model(point_cloud, num_frames, is_training, bn_decay=None): """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """ end_points = {} batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value // num_frames l0_xyz = point_cloud[:, :, 0:3] l0_time = tf.concat([tf.ones([batch_size, num_point, 1]) * i for i in range(num_frames)], \ axis=-2) l0_points = tf.concat([point_cloud[:, :, 3:], l0_time], axis=-1) RADIUS1 = np.array([0.98, 0.99, 1.0], dtype='float32') RADIUS2 = RADIUS1 * 2 RADIUS3 = RADIUS1 * 4 RADIUS4 = RADIUS1 * 8 l1_xyz, l1_time, l1_points, l1_indices = meteor_direct_module(l0_xyz, l0_time, l0_points, npoint=2048, radius=RADIUS1, nsample=32, mlp=[32,32,128], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer1') l2_xyz, l2_time, l2_points, l2_indices = meteor_direct_module(l1_xyz, l1_time, l1_points, npoint=512, radius=RADIUS2, nsample=32, mlp=[64,64,256], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') l3_xyz, l3_time, l3_points, l3_indices = meteor_direct_module(l2_xyz, l2_time, l2_points, npoint=128, radius=RADIUS3, nsample=32, mlp=[128,128,512], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer3') l4_xyz, l4_time, l4_points, l4_indices = meteor_direct_module(l3_xyz, l3_time, l3_points, npoint=64, radius=RADIUS4, nsample=32, mlp=[256,256,1024], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer4') # Feature Propagation layers l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1') l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2') l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3') l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128], is_training, bn_decay, scope='fa_layer4') ##### debug net = tf_util.conv1d(l0_points, 12, 1, padding='VALID', activation_fn=None, scope='fc2') return net, end_points def get_loss(pred, label, mask, end_points, label_weights): """ pred: BxNx3, label: BxN, mask: BxN """ classify_loss = tf.losses.sparse_softmax_cross_entropy( labels=label, \ logits=pred, \ weights=label_weights, \ reduction=tf.losses.Reduction.NONE) classify_loss = tf.reduce_sum(classify_loss * mask) / (tf.reduce_sum(mask) + 1) tf.summary.scalar('classify loss', classify_loss) tf.add_to_collection('losses', classify_loss) return classify_loss if __name__=='__main__': with tf.Graph().as_default(): inputs = tf.zeros((32,1024*2,6)) outputs = get_model(inputs, tf.constant(True)) print(outputs)
nilq/baby-python
python
""" TODO module docstring """ from re import fullmatch from typing import Optional from datetime import timedelta, datetime from jose import JWTError, jwt from passlib.hash import bcrypt from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm from fastapi import APIRouter, Depends, HTTPException, status from app.utils.user_db import UserDB from app.utils.message import Message, log router = APIRouter(prefix="/api/authorization", tags=["Authorizations"]) user_db = UserDB() SECRET_KEY = "7505d3e581d01c02fd31667cdc67cdb64173a9d4f715e73bf0a8e196fa02a15c" ALGORITHM = "HS256" ACCESS_TOKEN_EXPIRE_MINUTES = 30 oauth2_scheme = OAuth2PasswordBearer(tokenUrl="authorization/login") def verify_password(plain_password, hashed_password): """ TODO function docstring """ return bcrypt.verify(plain_password, hashed_password) def hash_password(plain_password): """ TODO function docstring """ return bcrypt.hash(plain_password) def create_access_token(data: dict, expires_delta: Optional[timedelta] = None): """ TODO function docstring """ to_encode = data.copy() if expires_delta: expire = datetime.utcnow() + expires_delta else: expire = datetime.utcnow() + timedelta(minutes=15) to_encode.update({"exp": expire}) encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM) return encoded_jwt def authenticate_user(username: str, password: str): """ Look for user in user_db.json (TinyDB). Parameters: username (str): username from form password (str): password from form Returns: bool: False if a user doesn't exist dict: Dict with user info if it does exist """ user = user_db.get_user_data(username) if isinstance(user, Message): log(user) return False if not verify_password(password, user["password_hash"]): return False return user async def get_current_user(token: str = Depends(oauth2_scheme)): """ TODO function docstring """ credentials_exception = HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials", headers={"WWW-Authenticate": "Bearer"}, ) try: payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) username: str = payload.get("username") if username is None: raise credentials_exception except JWTError as jwt_error: raise credentials_exception from jwt_error user = user_db.get_user_data(username) if isinstance(user, Message): log(user) raise credentials_exception return user @router.post("/login") async def generate_token(form: OAuth2PasswordRequestForm = Depends()): """ TODO function docstring """ user = authenticate_user(form.username, form.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect username or password", headers={"WWW-Authenticate": "Bearer"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={"username": user["username"]}, expires_delta=access_token_expires ) return {"access_token": access_token, "token_type": "bearer"} @router.post("/register") async def create_user(form: OAuth2PasswordRequestForm = Depends()): """ TODO function docstring """ username = form.username password = form.password email = form.scopes[0] if not fullmatch("[A-Za-z0-9-_]+", username): raise HTTPException( status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Username must contain only upper and lower case characters, " + "numbers, and symbols - or _ ", headers={"WWW-Authenticate": "Bearer"}, ) if len(password) < 8: raise HTTPException( status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Password must be at least 8 characters long.", headers={"WWW-Authenticate": "Bearer"}, ) if not fullmatch("^[a-zA-Z0-9]+@[a-zA-Z0-9]+.[A-Za-z]+$", email): raise HTTPException( status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Incorrect e-mail address.", headers={"WWW-Authenticate": "Bearer"}, ) if user_db.does_user_exist(username): raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Given user already exists", headers={"WWW-Authenticate": "Bearer"}, ) return user_db.add_user(form.username, hash_password(form.password), email)
nilq/baby-python
python
from webapp import db, login_manager from datetime import datetime from werkzeug.security import generate_password_hash, check_password_hash from flask_login import UserMixin # is_authenticated is_loggedin usw... @login_manager.user_loader # if user is authenticated, then.... def load_user(user_id): return Users.query.get(user_id) class Users(db.Model, UserMixin): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(64), unique=True, index=True) password_hash = db.Column(db.String(500)) def __init__(self, username, password): self.username = username self.password_hash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password_hash, password) def __repr__(self): return f"Username: {self.username}" class Abonnenten(db.Model): __tablename__ = 'abonnenten' abonnenten_url = db.Column(db.String(100), primary_key=True) datum = db.Column(db.DateTime, default=datetime.utcnow) def __init__(self, abonnenten_url): self.abonnenten_url = abonnenten_url def __repr__(self): return f"Abonnent: {self.abonnenten_url}" class Abonniert(db.Model): __tablename__ = 'abonniert' abonniet_url = db.Column(db.String(100), primary_key=True) datum = db.Column(db.DateTime, default=datetime.utcnow) def __init__(self, abonniet_url): self.abonniet_url = abonniet_url def __repr__(self): return f"Abonniert: {self.abonniet_url}" class Source(db.Model): __tablename__ = 'source' id = db.Column(db.Integer, primary_key=True) source_url = db.Column(db.String(100), index=True) targets_total = db.Column(db.Integer) datum = db.Column(db.DateTime, default=datetime.utcnow) targets_raw = db.relationship('Targets_raw', backref='targets_raw_quelle') targets_done = db.relationship('Targets_raw', backref='targets_done_quelle') def __init__(self, source_url): self.source_url = source_url def __repr__(self): return f"Target-Source: {self.source_url} vom: {self.datum}" class Targets_raw(db.Model): __tablename__ = 'targets_raw' id = db.Column(db.Integer, primary_key=True) target_url = db.Column(db.String(100), index=True) source_id = db.Column(db.Integer, db.ForeignKey('source.id')) def __init__(self, target_url, source_id): self.target_url = target_url self.source_id = source_id def __repr__(self): return f"Target-Account: {self.target_url} und Source-ID: {self.source_id}" class Targets_done(db.Model): __tablename__ = 'targets_done' id = db.Column(db.Integer, primary_key=True) source_id = db.Column(db.Integer, db.ForeignKey('source.id')) target_url = db.Column(db.String(100), index=True) target_abonnenten = db.Column(db.Integer) target_abonniert = db.Column(db.Integer) match = db.Column(db.String(10)) datum_bearbeitet = db.Column(db.DateTime, default=datetime.utcnow) pics_liked = db.Column(db.Integer) followed = db.Column(db.DateTime) unfollowed = db.Column(db.DateTime) followed_back = db.Column(db.DateTime) t5_indicator = db.Column(db.String(3)) t1_indicator = db.Column(db.String(3)) t5_timestamp = db.Column(db.DateTime) t1_timestamp = db.Column(db.DateTime) def __init__(self, target_url, target_abonnenten, target_abonniert, source_id): self.target_url = target_url self.target_abonnenten = target_abonnenten self.target_abonniert = target_abonniert self.source_id = source_id def __repr__(self): return f"Target-URL: {self.target_url} bearbeitet am {self.datum_bearbeitet}, Anzahl Abonnenten: {self.target_abonnenten}, Anzahl Abonniert: {self.target_abonniert}" class Statistiken(db.Model): __tablename__ = "statistik" id = db.Column(db.Integer, primary_key=True) source_id = db.Column(db.Integer) targets_total = db.Column(db.Integer) pics_liked = db.Column(db.Integer) followed = db.Column(db.Integer) unfollowed = db.Column(db.Integer) followed_back = db.Column(db.Integer) def __init__(self, source_id, targets_total): self.source_id = source_id self.targets_total = targets_total class Counter(db.Model): __tablename__ = "counter" datum = db.Column(db.DateTime, default=datetime.now().date(), primary_key=True) like_counter = db.Column(db.Integer) follow_counter = db.Column(db.Integer) class Blacklist(db.Model): __tablename__ = "blacklist" id = db.Column(db.Integer, primary_key=True) url = db.Column(db.String(100)) datum = db.Column(db.DateTime, default=datetime.now().date()) def __init__(self, url): self.url = url class Historical_follower(db.Model): __tablename__ = "historical_follower" id = db.Column(db.Integer, primary_key=True) target_url = db.Column(db.String(100)) datum = db.Column(db.DateTime, default=datetime.now().date()) def __init__(self, target_url): self.target_url = target_url class Tasks(db.Model): __tablename__ = "tasks" task_id = db.Column(db.String(72), primary_key=True) task_type = db.Column(db.String(21)) timestamp = db.Column(db.DateTime, default=datetime.utcnow) taskid = db.relationship('Taskstatus', backref="status") def __init__(self, task_id, task_type): self.task_id = task_id self.task_type = task_type class Taskstatus(db.Model): __tablename__ = "taskstatus" id = db.Column(db.Integer, primary_key=True) taskid = db.Column(db.String(72), db.ForeignKey('tasks.task_id')) target_url = db.Column(db.String(100)) check0 = db.Column(db.String(100)) check1 = db.Column(db.String(100)) check2 = db.Column(db.String(100)) check3 = db.Column(db.String(100)) check4 = db.Column(db.String(100)) check5 = db.Column(db.String(100)) check6 = db.Column(db.String(100)) match = db.Column(db.String(4)) followed = db.Column(db.DateTime) unfollowed = db.Column(db.DateTime) pics_liked = db.Column(db.Integer) t5_timestamp = db.Column(db.DateTime) t1_timestamp = db.Column(db.DateTime) def __init__(self, target_url): self.target_url = target_url
nilq/baby-python
python
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # /*########################################################################## # # Copyright (c) 2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """ Pan and zoom interaction to plug on a matplotlib Figure. Interaction: - Zoom in/out with the mouse wheel - Pan figures by dragging the mouse with left button pressed - Select a zoom-in area by dragging the mouse with right button pressed It provides a figure_pz function to create a Figure with interaction. Example: .. code-block:: python import matplotlib.pyplot as plt from mpl_interaction import figure_pz fig = figure_pz() ax = fig.add_subplot(1, 1, 1) ax.plot((1, 2, 1)) plt.show() The :class:`PanAndZoom` class can be used to add interaction to an existing Figure. Example: .. code-block:: python import matplotlib.pyplot as plt from mpl_interaction import PanAndZoom fig = plt.figure() pan_zoom = PanAndZoom(fig) # Add support for pan and zoom ax = fig.add_subplot(1, 1, 1) ax.plot((1, 2, 1)) plt.show() Known limitations: - Only support linear and log scale axes. - Zoom area not working well with keep aspect ratio. - Interfere with matplotlib toolbar. """ import logging import math import warnings import weakref import matplotlib.pyplot as _plt import numpy from PySide2.QtCore import Qt from PySide2.QtGui import QIcon from PySide2.QtWidgets import QAction, QLabel from ... import Dict __all__ = ['figure_pz', 'MplInteraction', 'PanAndZoom'] class MplInteraction(object): """Base class for class providing interaction to a matplotlib Figure.""" def __init__(self, figure): """ Args: figure (figure): The matplotlib figure to attach the behavior to. """ self._fig_ref = weakref.ref(figure) self._cids = [] def __del__(self): """Disconnnect.""" self.disconnect() def _add_connection(self, event_name, callback): """Called to add a connection to an event of the figure. Args: event_name (str): The matplotlib event name to connect to. callback (callback): The callback to register to this event. """ cid = self.figure.canvas.mpl_connect(event_name, callback) self._cids.append(cid) def disconnect(self): """Disconnect interaction from Figure.""" if self._fig_ref is not None: figure = self._fig_ref() if figure is not None: for cid in self._cids: figure.canvas.mpl_disconnect(cid) self._fig_ref = None @property def figure(self): """The Figure this interaction is connected to or None if not connected.""" return self._fig_ref() if self._fig_ref is not None else None def _axes_to_update(self, event): """Returns two sets of Axes to update according to event. Takes care of multiple axes and shared axes. Args: event (MouseEvent): Matplotlib event to consider Returns: tuple: Axes for which to update xlimits and ylimits. 2-tuple of set (xaxes, yaxes) """ x_axes, y_axes = set(), set() # Go through all axes to enable zoom for multiple axes subplots for ax in self.figure.axes: if ax.contains(event)[0]: # For twin x axes, makes sure the zoom is applied once shared_x_axes = set(ax.get_shared_x_axes().get_siblings(ax)) if x_axes.isdisjoint(shared_x_axes): x_axes.add(ax) # For twin y axes, makes sure the zoom is applied once shared_y_axes = set(ax.get_shared_y_axes().get_siblings(ax)) if y_axes.isdisjoint(shared_y_axes): y_axes.add(ax) return x_axes, y_axes def _draw(self): """Conveninent method to redraw the figure.""" self.figure.canvas.draw() class ZoomOnWheel(MplInteraction): """Class providing zoom on wheel interaction to a matplotlib Figure. This class extends the `MplInteraction` class. Supports subplots, twin Axes and log scales. """ def __init__(self, figure=None, scale_factor=1.1): """ Args: figure (figure): The matplotlib figure to attach the behavior to. scale_factor (float): The scale factor to apply on wheel event. """ super(ZoomOnWheel, self).__init__(figure) self._add_connection('scroll_event', self._on_mouse_wheel) self.scale_factor = scale_factor @staticmethod def _zoom_range(begin, end, center, scale_factor, scale): """Compute a 1D range zoomed around center. Args: begin (float): The begin bound of the range end (float): The end bound of the range center (float): The center of the zoom (i.e., invariant point) scale_factor (float): The scale factor to apply scale (str): The scale of the axis Returns: tuple: The zoomed range (min, max) """ if begin < end: min_, max_ = begin, end else: min_, max_ = end, begin if scale == 'linear': old_min, old_max = min_, max_ elif scale == 'log': old_min = numpy.log10(min_ if min_ > 0. else numpy.nextafter(0, 1)) center = numpy.log10( center if center > 0. else numpy.nextafter(0, 1)) old_max = numpy.log10(max_) if max_ > 0. else 0. else: logging.warning('Zoom on wheel not implemented for scale "%s"' % scale) return begin, end offset = (center - old_min) / (old_max - old_min) range_ = (old_max - old_min) / scale_factor new_min = center - offset * range_ new_max = center + (1. - offset) * range_ if scale == 'log': try: new_min, new_max = 10.**float(new_min), 10.**float(new_max) except OverflowError: # Limit case new_min, new_max = min_, max_ if new_min <= 0. or new_max <= 0.: # Limit case new_min, new_max = min_, max_ if begin < end: return new_min, new_max else: return new_max, new_min def _on_mouse_wheel(self, event): """Mouse wheel event.""" if event.step > 0: scale_factor = self.scale_factor else: scale_factor = 1. / self.scale_factor # Go through all axes to enable zoom for multiple axes subplots x_axes, y_axes = self._axes_to_update(event) for ax in x_axes: transform = ax.transData.inverted() xdata, ydata = transform.transform_point((event.x, event.y)) xlim = ax.get_xlim() xlim = self._zoom_range(xlim[0], xlim[1], xdata, scale_factor, ax.get_xscale()) ax.set_xlim(xlim) for ax in y_axes: ylim = ax.get_ylim() ylim = self._zoom_range(ylim[0], ylim[1], ydata, scale_factor, ax.get_yscale()) ax.set_ylim(ylim) if x_axes or y_axes: self._draw() class PanAndZoom(ZoomOnWheel): """Class providing pan & zoom interaction to a matplotlib Figure. Left button for pan, right button for zoom area and zoom on wheel. Support subplots, twin Axes and log scales. This class extends the `ZoomOnWheel` class. """ def __init__(self, figure=None, scale_factor=1.1): """ Args: figure (figure): The matplotlib figure to attach the behavior to. scale_factor (float): The scale factor to apply on wheel event. """ super(PanAndZoom, self).__init__(figure, scale_factor) self._add_connection('button_press_event', self._on_mouse_press) self._add_connection('button_release_event', self._on_mouse_release) self._add_connection('motion_notify_event', self._on_mouse_motion) self._pressed_button = None # To store active button self._axes = None # To store x and y axes concerned by interaction self._event = None # To store reference event during interaction self.options = Dict(dict(report_point_position=True,)) self.logger = None self._statusbar_label = None #self._get_images_path() #self._add_toolbar_tools() self._style_figure() self._ix_iy_old = (0, 0) def _get_images_path(self): """Get the path to images. Returns: str: path Raises: Exception: path error """ # to be removed try: # Get tool image path from pathlib import Path from ... import _gui imgs_path = Path(_gui.__file__).parent / '_imgs' if imgs_path.is_dir() == False: print(f'Bad File path for images! {imgs_path}') imgs_path = None except Exception as e: print('ERROR: ', e) imgs_path = None self.imgs_path = imgs_path return imgs_path def _add_toolbar_tools(self): """Add tools.""" # TODO: Outdated - to be removed from matplotlib.backend_tools import ToolToggleBase # ToolBase class ToolPointPosition(ToolToggleBase): '''Tools.''' default_keymap = 'Ctrl+p' description = 'Click to get point coordinate printed' default_toggled = False image = None # str(imgs_path) def __init__(self, *args, parent=None, **kwargs): super().__init__(*args, **kwargs) if parent is None: raise ('Pass a parent') self.parent = parent def enable(self, *args): self.parent.options.report_point_position = True def disable(self, *args): self.parent.options.report_point_position = False fig = self.figure imgs_path = self.imgs_path toolbar = self.toolbar = fig.canvas.manager.toolbar # Get tool manager # TODO: Remove use of tool manager just use PySide2 bare as below # ToolbarQt --- https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/backends/backend_qt5.py tm = fig.canvas.manager.toolmanager self.tm = tm # Tool: Print point location ToolPointPosition.image = str(imgs_path / 'click.png') with warnings.catch_warnings(): warnings.simplefilter("ignore") tm.add_tool("Point_position", ToolPointPosition, parent=self) fig.canvas.manager.toolbar.add_tool(tm.get_tool("Point_position"), "toolgroup") # Tool: Copy to Clipboard from matplotlib.backend_tools import ToolCopyToClipboard ToolCopyToClipboard.image = str(imgs_path / 'copy.png') with warnings.catch_warnings(): warnings.simplefilter("ignore") # OVvrwties Ctrl+C and issues warning tm.add_tool("Copy_to_clipboard", ToolCopyToClipboard) fig.canvas.manager.toolbar.add_tool(tm.get_tool("Copy_to_clipboard"), "toolgroup") if 1: # add QT Pieces toolbar.action_ascale = QAction( QIcon(str(imgs_path / 'auto_zoom.png')), 'Auto scale', toolbar) toolbar.action_ascale.setShortcut('A') toolbar.action_ascale.setShortcutContext(Qt.WindowShortcut) toolbar.action_ascale.setStatusTip('Autoscale') toolbar.action_ascale.triggered.connect(self.auto_scale) toolbar.addAction(toolbar.action_ascale) # Status Bar: Second label to report figManager = fig.canvas.manager # plt.get_current_fig_manager() status_bar = figManager.window.statusBar() self._status_label_2 = QLabel(status_bar) self._status_label_2.setText('') status_bar.addWidget(self._status_label_2) #from matplotlib.backends.backend_qt5 import StatusbarQt #st = StatusbarQt(figManager.window, figManager.toolmanager) # figManager.statusbar.set_message('') def auto_scale(self): """Auto scaler.""" for ax in self.figure.axes: ax.autoscale() # self.figure.canvas.flush_events() self.figure.canvas.draw() def _style_figure(self): """Style figure.""" #self.figure.dpi = 150 pass @staticmethod def _pan_update_limits(ax, axis_id, event, last_event): """Compute limits with applied pan. Args: axis_id (int): ID of the axis event (event): The event last_event (event): The previous event Returns: double: New limit Raises: ValueError: Value error OverflowError: Overflow error """ assert axis_id in (0, 1) if axis_id == 0: lim = ax.get_xlim() scale = ax.get_xscale() else: lim = ax.get_ylim() scale = ax.get_yscale() pixel_to_data = ax.transData.inverted() data = pixel_to_data.transform_point((event.x, event.y)) last_data = pixel_to_data.transform_point((last_event.x, last_event.y)) if scale == 'linear': delta = data[axis_id] - last_data[axis_id] new_lim = lim[0] - delta, lim[1] - delta elif scale == 'log': try: delta = math.log10(data[axis_id]) - \ math.log10(last_data[axis_id]) new_lim = [ pow(10., (math.log10(lim[0]) - delta)), pow(10., (math.log10(lim[1]) - delta)) ] except (ValueError, OverflowError): new_lim = lim # Keep previous limits else: logging.warning('Pan not implemented for scale "%s"' % scale) new_lim = lim return new_lim def _pan(self, event): """Pan. Args: event (event): The event """ if event.name == 'button_press_event': # begin pan self._event = event elif event.name == 'button_release_event': # end pan self._event = None elif event.name == 'motion_notify_event': # pan if self._event is None: return if event.x != self._event.x: for ax in self._axes[0]: xlim = self._pan_update_limits(ax, 0, event, self._event) ax.set_xlim(xlim) if event.y != self._event.y: for ax in self._axes[1]: ylim = self._pan_update_limits(ax, 1, event, self._event) ax.set_ylim(ylim) if event.x != self._event.x or event.y != self._event.y: self._draw() self._event = event def _zoom_area(self, event): """Zoom Args: event (event): The event """ if event.name == 'button_press_event': # begin drag self._event = event self._patch = _plt.Rectangle(xy=(event.xdata, event.ydata), width=0, height=0, fill=False, linewidth=1., linestyle='solid', color='black') self._event.inaxes.add_patch(self._patch) elif event.name == 'button_release_event': # end drag self._patch.remove() del self._patch if (abs(event.x - self._event.x) < 3 or abs(event.y - self._event.y) < 3): return # No zoom when points are too close x_axes, y_axes = self._axes for ax in x_axes: pixel_to_data = ax.transData.inverted() begin_pt = pixel_to_data.transform_point((event.x, event.y)) end_pt = pixel_to_data.transform_point( (self._event.x, self._event.y)) min_ = min(begin_pt[0], end_pt[0]) max_ = max(begin_pt[0], end_pt[0]) if not ax.xaxis_inverted(): ax.set_xlim(min_, max_) else: ax.set_xlim(max_, min_) for ax in y_axes: pixel_to_data = ax.transData.inverted() begin_pt = pixel_to_data.transform_point((event.x, event.y)) end_pt = pixel_to_data.transform_point( (self._event.x, self._event.y)) min_ = min(begin_pt[1], end_pt[1]) max_ = max(begin_pt[1], end_pt[1]) if not ax.yaxis_inverted(): ax.set_ylim(min_, max_) else: ax.set_ylim(max_, min_) self._event = None elif event.name == 'motion_notify_event': # drag if self._event is None: return if event.inaxes != self._event.inaxes: return # Ignore event outside plot self._patch.set_width(event.xdata - self._event.xdata) self._patch.set_height(event.ydata - self._event.ydata) self._draw() def _on_mouse_press(self, event): """Mouse press event Args: event (event): The event """ if self._pressed_button is not None: return # Discard event if a button is already pressed if event.button in (1, 3): # Start x_axes, y_axes = self._axes_to_update(event) if x_axes or y_axes: self._axes = x_axes, y_axes self._pressed_button = event.button if self._pressed_button == 1: # pan self._pan(event) if self.options.report_point_position: # check if we want to report point self._report_point_position(event) elif self._pressed_button == 3: # zoom area self._zoom_area(event) def _on_mouse_release(self, event): """Mouse release event Args: event (event): The event """ if self._pressed_button == event.button: if self._pressed_button == 1: # pan self._pan(event) elif self._pressed_button == 3: # zoom area self._zoom_area(event) self._pressed_button = None def _on_mouse_motion(self, event): """Mouse motion event Args: event (event): The event """ if self._pressed_button == 1: # pan self._pan(event) elif self._pressed_button == 3: # zoom area self._zoom_area(event) def _report_point_position(self, event): """Report point position Args: event (event): the event """ ix, iy = event.xdata, event.ydata if hasattr(self, '_ix_iy_old'): ix_old, iy_old = self._ix_iy_old else: ix_old, iy_old = (ix, iy) self._ix_iy_old = ix, iy _text = f'(x,y) = ({ix:.4f}, {iy:.4f}) Δ last point ({ix-ix_old:.4f}, {iy-iy_old:.4f})' if self.logger: self.logger.info(_text) if self._statusbar_label: self._statusbar_label.setText(_text) #print(_text) def figure_pz(*args, **kwargs): """matplotlib.pyplot.figure with pan and zoom interaction.""" #import warnings # warnings.filterwarnings(action='ignore') with warnings.catch_warnings(): warnings.simplefilter("ignore") fig = _plt.figure(*args, **kwargs) fig.pan_zoom = PanAndZoom(fig) # warnings.resetwarnings() return fig """ if __name__ == "__main__": import matplotlib.pyplot as plt fig = figure_pz() # Alternative: # fig = plt.figure() # pan_zoom = PanAndZoom(fig) nrow, ncol = 2, 3 ax1 = fig.add_subplot(nrow, ncol, 1) ax1.set_title('basic') ax1.plot((1, 2, 3)) ax2 = fig.add_subplot(nrow, ncol, 2) ax2.set_title('log + twinx') ax2.set_yscale('log') ax2.plot((1, 2, 1)) ax2bis = ax2.twinx() ax2bis.plot((3, 2, 1), color='red') ax3 = fig.add_subplot(nrow, ncol, 3) ax3.set_title('inverted y axis') ax3.plot((1, 2, 3)) lim = ax3.get_ylim() ax3.set_ylim(lim[1], lim[0]) ax4 = fig.add_subplot(nrow, ncol, 4) ax4.set_title('keep ratio') ax4.axis('equal') ax4.imshow(numpy.arange(100).reshape(10, 10)) ax5 = fig.add_subplot(nrow, ncol, 5) ax5.set_xlabel('symlog scale + twiny') ax5.set_xscale('symlog') ax5.plot((1, 2, 3)) ax5bis = ax5.twiny() ax5bis.plot((3, 2, 1), color='red') # The following is taken from: # http://matplotlib.org/examples/axes_grid/demo_curvelinear_grid.html from mpl_toolkits.axisartist import Subplot from mpl_toolkits.axisartist.grid_helper_curvelinear import \ GridHelperCurveLinear def tr(x, y): # source (data) to target (rectilinear plot) coordinates x, y = numpy.asarray(x), numpy.asarray(y) return x + 0.2 * y, y - x def inv_tr(x, y): x, y = numpy.asarray(x), numpy.asarray(y) return x - 0.2 * y, y + x grid_helper = GridHelperCurveLinear((tr, inv_tr)) ax6 = Subplot(fig, nrow, ncol, 6, grid_helper=grid_helper) fig.add_subplot(ax6) ax6.set_title('non-ortho axes') xx, yy = tr([3, 6], [5.0, 10.]) ax6.plot(xx, yy) ax6.set_aspect(1.) ax6.set_xlim(0, 10.) ax6.set_ylim(0, 10.) ax6.axis["t"] = ax6.new_floating_axis(0, 3.) ax6.axis["t2"] = ax6.new_floating_axis(1, 7.) ax6.grid(True) plt.show() """
nilq/baby-python
python
var1 = 'Geeks' print("Original String :-", var1) print("Updated String :- ", var1[:5] + 'for' + 'Geeks') # statement 1
nilq/baby-python
python
import scapy.all as scapy import time from iemlav import logger class SynFlood(object): """SynFlood Class.""" def __init__(self, debug=False): """ Initialize SynFlood. Args: debug (bool): Log on terminal or not Raises: None Returns: None """ # Initialize logger self.logger = logger.IemlAVLogger( __name__, debug=debug ) # Initialize SYN dictionary self.syn_dict = dict() # Set threshold to 1000 SYN packets / per second self._THRESHOLD = 1000 # inter = 0.001 def detect_syn_flood(self, pkt): """ Detect SYN flood attack. Args: pkt (scapy_object): Packet to dissect and observe Raises: None Returns: None """ if (pkt.haslayer(scapy.IP) and pkt.haslayer(scapy.TCP)): flag = pkt[scapy.TCP].flags source_ip = pkt[scapy.IP].src if flag == "S": # SYN flag if self.syn_dict.get(source_ip) is None: # If new IP address self.syn_dict[source_ip] = { "start_time": time.time(), "count": 1 } else: count = self.syn_dict[source_ip]["count"] self.syn_dict[source_ip]["count"] = count + 1 if flag == "A": # ACK flag if self.syn_dict.get(source_ip) is not None: # Handshake completed, delete the IP entry (not suspicious) del self.syn_dict[source_ip] # Detect intrusion self.calc_intrusion() def calc_intrusion(self): """ Detect intrusion by comparing threshold ratios. Args: None Raises: None Returns: None """ if len(self.syn_dict) != 0: # If syn dict is not empty start_ip = [ip for ip in self.syn_dict.keys()][0] start_time = self.syn_dict[start_ip]["start_time"] current_time = time.time() delta_time = int(current_time - start_time) size_of_syn_dict = len(self.syn_dict) try: calc_threshold = int(size_of_syn_dict / delta_time) except ZeroDivisionError: calc_threshold = int(size_of_syn_dict) if (calc_threshold >= self._THRESHOLD): self.logger.log( "Possible SYN flood attack detected.", logtype="warning" )
nilq/baby-python
python
# -*- coding: utf8 -*- # test encoding: à-é-è-ô-ï-€ # Copyright 2021 Adrien Crovato # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## Physical flux # Adrien Crovato # Base class class PFlux: def __init__(self): pass def __str__(self): raise RuntimeError('Physical flux not implemented!') # Advection class Advection(PFlux): '''Advection flux ''' def __init__(self, a): PFlux.__init__(self) self.a = a # advection (transport) velocity def __str__(self): return 'Advection flux (a = ' + str(self.a) + ')' def eval(self, u): '''Compute the physical flux vector f = a*u ''' return [self.a * u[0]] def evald(self, u): '''Compute the physical flux derivative matrix df = a ''' return [[self.a]] class Advection2(PFlux): '''Advection flux ''' def __init__(self, a, b): PFlux.__init__(self) self.a = a # first advection (transport) velocity self.b = b # second advection (transport) velocity def __str__(self): return 'Advection flux (a = ' + str(self.a) + ', b = ' + str(self.b) + ')' def eval(self, u): '''Compute the physical flux vector f = [a*u, b*v] ''' f = [0.] * len(u) f[0] = self.a * u[0] f[1] = self.b * u[1] return f def evald(self, u): '''Compute the physical flux derivative matrix df = [a, 0; 0, b] ''' df = [[0.] * len(u) for _ in range(len(u))] df[0][0] = self.a df[1][1] = self.b return df # Burger's class Burger(PFlux): '''Burger's flux ''' def __init__(self): PFlux.__init__(self) def __str__(self): return 'Burger\'s flux' def eval(self, u): '''Compute the physical flux vector f = u*u/2 ''' return [0.5 * u[0] * u[0]] def evald(self, u): '''Compute the physical flux derivative matrix df = u ''' return [[u[0]]] # Euler class Euler(PFlux): '''Euler flux ''' def __init__(self, gamma): PFlux.__init__(self) self.gamma = gamma # heat capacity ratio def __str__(self): return 'Euler flux (gamma = ' + str(self.gamma) + ')' def eval(self, u): '''Compute the physical flux vector f = [rho*u, rho*u^2+p, (E+p)*u] ''' # Pre-pro v = u[1] / u[0] # u = rho * u / rho p = (self.gamma - 1) * (u[2] - 0.5 * u[1] * v) # (gamma - 1) * (E - 0.5 * rho*u*u) # Flux f = [0.] * len(u) f[0] = u[1] f[1] = u[1] * v + p f[2] = (u[2] + p) * v return f def evald(self, u): '''Compute the physical flux derivative matrix df = [0, 1, 0; (gamma-3)/2*u^2, (3-gamma)*u, gamma-1; -gamma*E*u/rho + (gamma-1)*u^3, gamma*E/rho + 3*(1-gamma)/2*u^2, gamma*u] ''' # Pre-pro v = u[1] / u[0] # = rho * u / rho e = u[2] / u[0] # = E / rho # Flux df = [[0.] * len(u) for _ in range(len(u))] df[0][1] = 1. df[1][0] = 0.5 * (self.gamma - 3) * v * v df[1][1] = (3 - self.gamma) * v df[1][2] = self.gamma - 1 df[2][0] = -self.gamma * e * v + (self.gamma - 1) * v * v * v df[2][1] = self.gamma * e + 1.5 * (1 - self.gamma) * v * v df[2][2] = self.gamma * v return df class ShallowWater(PFlux): '''Shallow water flux ''' def __init__(self, g): PFlux.__init__(self) self.g = g # acceleration due to gravity def __str__(self): return 'Shallow water flux (g = ' + str(self.g) + ')' def eval(self, u): '''Compute the physical flux vector f = [h*u, gh + u^2/2] ''' f = [0.] * len(u) f[0] = u[0] * u[1] f[1] = self.g * u[0] + 0.5 * u[1] * u[1] return f def evald(self, u): '''Compute the physical flux derivative matrix df = [u, h; g, u] ''' df = [[0.] * len(u) for _ in range(len(u))] df[0][0] = u[1] df[0][1] = u[0] df[1][0] = self.g df[1][1] = u[1] return df
nilq/baby-python
python
# name: person_builder.py # version: 0.0.1 # date: 20211225 # author: Leam Hall # desc: Build a person. from person import Person class PersonBuilder: def set_data(self, person, data = {}): person.idx = data.get('idx', -1) person.gender = data.get('gender', '') person.first_name = data.get('first_name', '') person.last_name = data.get('last_name', '') person.birthdate = data.get('birthdate', 0) person.notes = data.get('notes', '') return person def gen_data(self, person, data = {}): person.gender = data.get('gender', self.gen_gender()) person.first_name = data.get('first_name', self.gen_firstname(person.gender)) person.last_name = data.get('last_name', self.gen_lastname()) person.birthdate = data.get('birthdate', self.gen_birthdate()) person.notes = data.get('notes', '') return person def gen_firstname(self, gender): return 'John' def gen_lastname(self): return 'Dough' def gen_birthdate(self): return 1234056 def gen_gender(self): return 'm' def return_person(self): return self.person
nilq/baby-python
python
import rasterio from sklearn.cluster import AgglomerativeClustering from shapely.geometry import Polygon from shapely.ops import nearest_points from math import sqrt from gisele import initialization from shapely import geometry from gisele.functions import * from math import * #from gisele import LV_routing_new_strategy as new_strategy def points_region_to_casestudy(points_region, polygon_casestudy): points_CaseStudy = points_region.clip(polygon_casestudy) return points_CaseStudy def create_polygon_from_clusters(final_clus, clusters_file, substations_file, resolution, crs): Clusters = gpd.read_file(clusters_file) Substations = gpd.read_file(substations_file) Clusters = Clusters[Clusters['final_clus'] == final_clus] k = 0 for index, row in Clusters.iterrows(): area = row['geometry'] minx = area.bounds[0] miny = area.bounds[1] maxx = area.bounds[2] maxy = area.bounds[3] if k == 0: min_x = minx min_y = miny max_x = maxx max_y = maxy k = 1 else: if minx < min_x: min_x = minx if miny < min_y: min_y = miny if maxx > max_x: max_x = maxx if maxy > max_y: max_y = maxy for index, row in Substations.iterrows(): substation = row['geometry'] if substation.x < min_x: min_x = substation.x if substation.y < min_y: min_y = substation.y if substation.x > max_x: max_x = substation.x if substation.y > max_y: max_y = substation.y study_area = Polygon([Point(min_x, min_y), Point(min_x, max_y), Point(max_x, max_y), Point(max_x, min_y)]) study_area_buffered = study_area.buffer(4 * resolution) polygon = gpd.GeoDataFrame({'ID': [0], 'geometry': study_area_buffered}) polygon.crs = crs polygon.to_file('area_polygon', index=False) return polygon # geodataframe with the polygon ''' The goal of this function is to create a polygon starting from a cluster of clusters''' def create_grid(crs, resolution, study_area): # crs and resolution should be a numbers, while the study area is a polygon df = pd.DataFrame(columns=['X', 'Y']) min_x, min_y, max_x, max_y = study_area.bounds # create one-dimensional arrays for x and y lon = np.arange(min_x, max_x, resolution) lat = np.arange(min_y, max_y, resolution) lon, lat = np.meshgrid(lon, lat) df['X'] = lon.reshape((np.prod(lon.shape),)) df['Y'] = lat.reshape((np.prod(lat.shape),)) geo_df = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.X, df.Y), crs=crs) geo_df_clipped = gpd.clip(geo_df, study_area) # geo_df_clipped.to_file(r'Test\grid_of_points.shp') return geo_df_clipped def street_to_points(streets): streets_points = [] for line in streets['geometry']: # print(line.geometryType) if line.geometryType() == 'MultiLineString': for line1 in line: for x in list(zip(line1.xy[0], line1.xy[1])): # print(line1) streets_points.append(x) else: for x in list(zip(line.xy[0], line.xy[1])): # print(line) streets_points.append(x) streets_multipoint = MultiPoint(streets_points) return streets_multipoint def coincidence_factor(population, pop_per_household): return 0.35 + 0.65 / sqrt(population / pop_per_household) def categorize_substation(clusters_list, substations): values = [] costs = [] substations['Rated_power [kVA]'] = substations['Rated_power [kVA]'] substations1 = substations['Rated_power [kVA]'].to_list() for index, row in clusters_list.iterrows(): load_kVA = row.loc['Load [kW]'] / 0.9 # considering a power factor of 0.9 substations2 = [i - load_kVA if i - load_kVA > 0 else 10000 for i in substations1] power = int(min(substations2) + load_kVA) values.append(power) locate_cost = substations[substations['Rated_power [kVA]'] == power]['Cost[euro]'] costs.append(locate_cost.values[0]) clusters_list['Transformer_rated_power [kVA]'] = values clusters_list['Cost[euro]'] = costs return clusters_list def locate_secondary_ss(crs, resolution, load_capita, pop_per_household, road_coef, Clusters, case_study, LV_distance, ss_data,landcover_option,gisele_dir): dir_input = r'Case studies/' + case_study + '/Input' dir_output = '/Case studies/' + case_study grid_500m_weighted = pd.read_csv(dir_input + '/weighted_grid_of_points.csv') grid_500m_with_ss = grid_500m_weighted.copy() grid_500m_gdf = gpd.GeoDataFrame(grid_500m_weighted, geometry=gpd.points_from_xy(grid_500m_weighted.X, grid_500m_weighted.Y), crs=crs) grid_500m_with_ss = gpd.GeoDataFrame(grid_500m_with_ss, geometry=gpd.points_from_xy(grid_500m_with_ss.X, grid_500m_with_ss.Y), crs=crs) # Create a clusters.exe file LV_resume = pd.DataFrame() for index, row in Clusters.iterrows(): os.chdir(gisele_dir) dir = gisele_dir + dir_output + '/Output/Clusters/' + str(row['cluster_ID']) clus = row['cluster_ID'] if not os.path.exists(dir): os.makedirs(dir) os.makedirs(dir + '/grids') area = row['geometry'] # THIS IS SPECIFIC FOR THE CASE OF THUSO - ISSUE NEEDS TO BE FIXED WITH CLUSTERS THAT ARE TOO NARROW. If in one line # of the grid points there are no points -> the graph for the steiner tree will not be connected area_buffered = area # area_buffered = row['geometry'].buffer((resolution_MV * 0.1 / 11250) / 2) area_list = [area_buffered] # Create grid of points with a 30m resolution grid_of_points = create_grid(crs, resolution, area) grid_of_points.to_file(dir + '/points.shp') # Load and clip protected araes and streets # protected_areas = gpd.read_file(global_dir+db_dir+case_study+'/Protected_areas/Protected_area-Uganda.shp') # protected_areas = protected_areas.to_crs(crs) # protected_areas_clipped=gpd.clip(protected_areas,area) # protected_areas_clipped.to_file(dir + '/protected.shp) # To make sure the roads are not cut min_x, min_y, max_x, max_y = area.bounds area_for_roads = geometry.Polygon( [geometry.Point(min_x, min_y), geometry.Point(min_x, max_y), geometry.Point(max_x, max_y), geometry.Point(max_x, min_y)]) streets = gpd.read_file(dir_input + '/Roads.shp') streets = streets.to_crs(crs) streets_clipped = gpd.clip(streets, area_for_roads) if not streets_clipped.empty: streets_clipped.to_file(dir + '/Roads.shp') Street_Multipoint = street_to_points(streets_clipped) # OPEN THE RASTERS FOR THE SPECIFIC REGION WHERE OUR CLUSTER IS Population = rasterio.open(dir_input + '/Population_' + str(crs) + '.tif') Elevation = rasterio.open(dir_input + '/Elevation_' + str(crs) + '.tif') Slope = rasterio.open(dir_input + '/Slope_' + str(crs) + '.tif') LandCover = rasterio.open(dir_input + '/LandCover_' + str(crs) + '.tif') # POPULATE THE GRID OF POINTS coords = [(x, y) for x, y in zip(grid_of_points.X, grid_of_points.Y)] grid_of_points = grid_of_points.reset_index(drop=True) grid_of_points['ID'] = grid_of_points.index grid_of_points['Population'] = [x[0] for x in Population.sample(coords)] grid_of_points['Elevation'] = [x[0] for x in Elevation.sample(coords)] grid_of_points['Slope'] = [x[0] for x in Slope.sample(coords)] grid_of_points['Land_cover'] = [x[0] for x in LandCover.sample(coords)] # THIS IS JUST A PROXY, NEEDS TO BE PROPERLY SET grid_of_points['Protected_area'] = ['FALSE' for x in LandCover.sample(coords)] print('Sampling rasters finished') grid_of_points.to_file(dir + '/points.shp') # AGLOMERATIVE CLUSTERING scale_factor = 10 populated_grid = grid_of_points[grid_of_points['Population'] > 0] populated_grid['Population'] = populated_grid['Population'].div(scale_factor).round(0) + 0.51 populated_grid['Population'] = populated_grid['Population'].round(0) populated_grid = populated_grid.loc[populated_grid.index.repeat(populated_grid.Population.astype(int))] loc = {'x': populated_grid['X'], 'y': populated_grid['Y']} pop_points = pd.DataFrame(data=loc).values clustering = AgglomerativeClustering(distance_threshold=LV_distance * 1.8, linkage='complete', n_clusters=None).fit(pop_points) geo_df_clustered = gpd.GeoDataFrame(data=pop_points, columns=['X', 'Y'], geometry=gpd.points_from_xy(populated_grid['X'], populated_grid['Y'])) geo_df_clustered['Cluster'] = clustering.labels_ geo_df_clustered = geo_df_clustered.drop_duplicates() geo_df_clustered.set_crs(epsg=crs, inplace=True) geo_df_clustered.to_file(dir + '/pointsCluster.shp') # number_clusters=max(geo_df_clustered.Cluster) # calculate the total distances from one point to another inside the clusters for cluster in range(max(geo_df_clustered['Cluster']) + 1): total_distances = [] geo_df_clustered_slice = geo_df_clustered[geo_df_clustered['Cluster'] == cluster] print(cluster) for index, row2 in geo_df_clustered_slice.iterrows(): tot_dist = 0 for index1, row1 in geo_df_clustered_slice.iterrows(): tot_dist += sqrt((row2.X - row1.X) ** 2 + (row2.Y - row1.Y) ** 2) total_distances.append(tot_dist) geo_df_clustered.loc[geo_df_clustered['Cluster'] == cluster, 'tot_distance'] = total_distances joinDF = gpd.sjoin(grid_of_points, geo_df_clustered, how='left', op="contains") grid_of_points['Cluster'] = joinDF['Cluster'] grid_of_points['Cluster'] = grid_of_points['Cluster'].fillna(-1) grid_of_points['tot_distance'] = joinDF['tot_distance'] # ASSIGN ROAD DISTANCE if not streets_clipped.empty: road_distances = [] for index, point in grid_of_points.iterrows(): x = point['geometry'].xy[0][0] y = point['geometry'].xy[1][0] nearest_geoms = nearest_points(Point(x, y), Street_Multipoint) road_distance = nearest_geoms[0].distance(nearest_geoms[1]) road_distances.append(road_distance) grid_of_points['Road_dist'] = road_distances # CHOOSE A SUBSTATION number_clusters = max(grid_of_points.Cluster) substations = [] # Create a clusters.exe file clusters_list = pd.DataFrame(columns=['Cluster','Sub_cluster', 'Population', 'Load [kW]']) # ASSIGN MV POWER FOR THE SECONDARY SUBSTATIONS grid_of_points['MV_Power'] = 0 for i in range(int(number_clusters) + 1): subset = grid_of_points[grid_of_points['Cluster'] == i] sum_pop = subset['Population'].sum() load = sum_pop * load_capita * coincidence_factor(sum_pop, pop_per_household) data = np.array([[int(row['cluster_ID']),int(i), sum_pop, load]]) df2 = pd.DataFrame(data, columns=['Cluster','Sub_cluster', 'Population', 'Load [kW]']) clusters_list = clusters_list.append(df2) average_distance = subset['tot_distance'] / len(subset) min_weight = 100000 ID = 0 for index, ROW in subset.iterrows(): if not streets_clipped.empty: weight = ROW['Road_dist'] * road_coef + average_distance[index] else: weight = average_distance[index] if weight < min_weight: min_weight = weight ID = index substations.append(ID) grid_of_points['Substation'] = 0 for substation in substations: grid_of_points.loc[grid_of_points['ID'] == substation, 'Substation'] = 1 for i in range(int(number_clusters) + 1): population = clusters_list.loc[clusters_list['Sub_cluster'] == i, 'Population'][0] load = clusters_list.loc[clusters_list['Sub_cluster'] == i, 'Load [kW]'][0] grid_of_points.loc[ (grid_of_points['Substation'] == 1) & (grid_of_points['Cluster'] == i), 'Population'] = population grid_of_points.loc[ (grid_of_points['Substation'] == 1) & (grid_of_points['Cluster'] == i), 'MV_Power'] = load substation_data = pd.read_csv(gisele_dir + '/general_input/' + ss_data) clusters_list = categorize_substation(clusters_list, substation_data) clusters_list['Population']=[ceil(i) for i in clusters_list['Population']] clusters_list.to_csv(dir + '/clusters_list.csv',index=False) LV_resume=LV_resume.append(clusters_list) weights_grid = initialization.weighting(grid_of_points,resolution,landcover_option) grid_of_points['Weight'] = weights_grid['Weight'] grid_of_points.crs = crs grid_of_points.to_csv(dir + '/Input.csv') grid_of_points.to_file(dir + '/points.shp') secondary_substations = grid_of_points[grid_of_points['Substation'] == 1] secondary_substations.to_file(dir + '/substations.shp') total_costs = sum(clusters_list['Cost[euro]'].to_list()) print('The total costs for substations are ' + str(total_costs / 1000) + ' thousand euros') # cluster_polygons_gpd.to_file(dir + '/clusters_polygons.shp') print('The maximum loading of a cluster is ' + str(max(clusters_list['Load [kW]']))) print('The minimum loading of a cluster is ' + str(min(clusters_list['Load [kW]']))) # study_area = Clusters.loc[row['cluster_ID']]['geometry'] study_area = area_buffered area = study_area area.crs = crs grid_500m_clip = gpd.clip(grid_500m_gdf, area) # this is our 500m resolution grid of points substations = grid_of_points[grid_of_points['Substation'] == 1] number_substations = substations.shape[0] ss_starting_id = int(grid_500m_with_ss['ID'].max()) + 1 substations['ID'] = [i for i in range(ss_starting_id, ss_starting_id + number_substations)] # ss_starting_id+=number_substations substations.to_file(dir + '/substations.shp') substations['Cluster'] = clus substations_weighted = initialization.weighting(substations, resolution, landcover_option) substations['Weight'] = substations_weighted['Weight'] grid_500m_clip = grid_500m_clip.append(substations) grid_500m_with_ss = grid_500m_with_ss.append(substations) #geo_df = initialization.weighting(grid_500m_clip, resolution_MV, landcover_option) grid_500m_clip.to_file(dir + '/points500m.shp') # geo_df['Substation'] = grid_500m_clip['Substation'] # geo_df['geometry'] = grid_500m_clip['geometry'] clus += 1 grid_500m_with_ss.to_csv(gisele_dir + '/' + dir_input + '/weighted_grid_of_points_with_ss.csv') LV_resume.to_csv(gisele_dir + '/' + dir_input +'/LV_resume.csv')
nilq/baby-python
python
from typing import Any, Dict, Optional, Union import httpx from ...client import AuthenticatedClient from ...models.osidb_api_v1_schema_retrieve_format import OsidbApiV1SchemaRetrieveFormat from ...models.osidb_api_v1_schema_retrieve_lang import OsidbApiV1SchemaRetrieveLang from ...models.osidb_api_v1_schema_retrieve_response_200 import OsidbApiV1SchemaRetrieveResponse200 from ...types import UNSET, Response, Unset def _get_kwargs( *, client: AuthenticatedClient, format_: Union[Unset, None, OsidbApiV1SchemaRetrieveFormat] = UNSET, lang: Union[Unset, None, OsidbApiV1SchemaRetrieveLang] = UNSET, ) -> Dict[str, Any]: url = "{}/osidb/api/v1/schema/".format( client.base_url, ) headers: Dict[str, Any] = client.get_headers() json_format_: Union[Unset, None, str] = UNSET if not isinstance(format_, Unset): json_format_ = OsidbApiV1SchemaRetrieveFormat(format_).value if format_ else None json_lang: Union[Unset, None, str] = UNSET if not isinstance(lang, Unset): json_lang = OsidbApiV1SchemaRetrieveLang(lang).value if lang else None params: Dict[str, Any] = { "format": json_format_, "lang": json_lang, } params = {k: v for k, v in params.items() if v is not UNSET and v is not None} return { "url": url, "headers": headers, "params": params, } def _parse_response(*, response: httpx.Response) -> Optional[OsidbApiV1SchemaRetrieveResponse200]: if response.status_code == 200: _response_200 = response.json() response_200: OsidbApiV1SchemaRetrieveResponse200 if isinstance(_response_200, Unset): response_200 = UNSET else: response_200 = OsidbApiV1SchemaRetrieveResponse200.from_dict(_response_200) return response_200 return None def _build_response(*, response: httpx.Response) -> Response[OsidbApiV1SchemaRetrieveResponse200]: return Response( status_code=response.status_code, content=response.content, headers=response.headers, parsed=_parse_response(response=response), ) def sync_detailed( *, client: AuthenticatedClient, format_: Union[Unset, None, OsidbApiV1SchemaRetrieveFormat] = UNSET, lang: Union[Unset, None, OsidbApiV1SchemaRetrieveLang] = UNSET, ) -> Response[OsidbApiV1SchemaRetrieveResponse200]: kwargs = _get_kwargs( client=client, format_=format_, lang=lang, ) response = httpx.get( verify=client.verify_ssl, auth=client.auth, timeout=client.timeout, **kwargs, ) response.raise_for_status() return _build_response(response=response) def sync( *, client: AuthenticatedClient, format_: Union[Unset, None, OsidbApiV1SchemaRetrieveFormat] = UNSET, lang: Union[Unset, None, OsidbApiV1SchemaRetrieveLang] = UNSET, ) -> Optional[OsidbApiV1SchemaRetrieveResponse200]: """OpenApi3 schema for this API. Format can be selected via content negotiation. - YAML: application/vnd.oai.openapi - JSON: application/vnd.oai.openapi+json""" return sync_detailed( client=client, format_=format_, lang=lang, ).parsed async def asyncio_detailed( *, client: AuthenticatedClient, format_: Union[Unset, None, OsidbApiV1SchemaRetrieveFormat] = UNSET, lang: Union[Unset, None, OsidbApiV1SchemaRetrieveLang] = UNSET, ) -> Response[OsidbApiV1SchemaRetrieveResponse200]: kwargs = _get_kwargs( client=client, format_=format_, lang=lang, ) async with httpx.AsyncClient(verify=client.verify_ssl) as _client: response = await _client.get(**kwargs) return _build_response(response=response) async def asyncio( *, client: AuthenticatedClient, format_: Union[Unset, None, OsidbApiV1SchemaRetrieveFormat] = UNSET, lang: Union[Unset, None, OsidbApiV1SchemaRetrieveLang] = UNSET, ) -> Optional[OsidbApiV1SchemaRetrieveResponse200]: """OpenApi3 schema for this API. Format can be selected via content negotiation. - YAML: application/vnd.oai.openapi - JSON: application/vnd.oai.openapi+json""" return ( await asyncio_detailed( client=client, format_=format_, lang=lang, ) ).parsed
nilq/baby-python
python
def work_well(): print("ok") return "ok" def say_hello(): print("hello") return "hello"
nilq/baby-python
python
# uses minimax to look ahead import sys from functools import partial from game import * import interactive_game def compute_score(board, res=TURN_OK): score = board[0]**5 \ + board[1]**4 + board[4]**4 \ + board[2]**2 + board[5]**2 + board[8]**2 \ + board[3] + board[6] + board[9] + board[12] if res==TURN_GAME_OVER or res==TURN_ILLEGAL: return -score return score def minimax(board, lm=None, depth=5): if not lm: lm = legal_moves(board) if lm == []: return (compute_score(board,TURN_ILLEGAL), -1) best_move = None for move in lm: new_board, res = perform_turn(board.copy(), move, ins_random=False, skip_check=True) score = compute_score(new_board) if depth != 1: new_board = insert_random(new_board) next_score, next_move = minimax(new_board.copy(), depth=depth-1) score += next_score score_move = (score,move) if best_move==None: best_move = score_move elif best_move<score_move: best_move = score_move return best_move def ai_2_compute_func(board, lm, depth): next_score, next_move = minimax(board, lm=lm, depth=depth) return next_move if __name__=="__main__": if len(sys.argv)>=2: depth = int(sys.argv[1]) else: depth = 3 interactive_game.start(partial(ai_2_compute_func, depth=depth))
nilq/baby-python
python
"""Module with function to regularize a 2D curve (with uniform resolution).""" import math import numpy def _get_perimeter(x, y): """Return the perimeter of the geometry. Parameters ---------- x : numpy.ndarray x-coordinate of the points along the curve. y : numpy.ndarray y-coordinate of the points along the curve. Returns ------- perimeter : float The perimeter. """ # Duplicate point if necessary to get a closed surface. atol = 1e-6 if abs(x[0] - x[-1]) > atol or abs(y[0] - y[-1]) > atol: x, y = numpy.append(x, x[0]), numpy.append(y, y[0]) return numpy.sum(numpy.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)) def regularize2d(xo, yo, N=None, ds=None, atol=1.0E-06): """Regularize the geometry. Parameters ---------- xo: numpy.ndarray of floats The x-coordinates of the boundary to regularize. yo: numpy.ndarray of floats The y-coordinates of the boundary to regularize. N: integer, optional Number of divisions; default: None. ds: float, optional Desired segment-length; default: None. atol: float, optional Desired tolerance for discretization; default: 1.0E-06. Returns ------- x: numpy.ndarray of floats The x-coordinates of the regularized boundary. y: numpy.ndarray of floats The y-coordinates of the regularized boundary. """ if not (N or ds): return xo.copy(), yo.copy() if not N: N = int(math.ceil(_get_perimeter(xo, yo) / ds)) ds = _get_perimeter(xo, yo) / N # Duplicate point if necessary to get a closed surface. if abs(xo[0] - xo[-1]) > atol or abs(yo[0] - yo[-1]) > atol: xo, yo = numpy.append(xo, xo[0]), numpy.append(yo, yo[0]) # Regularize the geometry. next_idx = 1 last_idx = xo.size - 1 x, y = [xo[0]], [yo[0]] for _ in range(1, N): xs, ys = x[-1], y[-1] # Start point xe, ye = xo[next_idx], yo[next_idx] # End point length = numpy.sqrt((xe - xs)**2 + (ye - ys)**2) if abs(ds - length) <= atol: # Copy x.append(xe) y.append(ye) next_idx += 1 elif ds < length: # Interpolate between start and end points length2 = numpy.sqrt((xe - xs)**2 + (ye - ys)**2) x.append(xs + ds / length2 * (xe - xs)) y.append(ys + ds / length2 * (ye - ys)) else: # Project the new point # Get segment index. while length < ds and next_idx < last_idx: next_idx += 1 length = numpy.sqrt((xo[next_idx] - xs)**2 + (yo[next_idx] - ys)**2) xp, yp = xo[next_idx - 1], yo[next_idx - 1] xe, ye = xo[next_idx], yo[next_idx] # Interpolate on segment. precision = 1 coeff = 0.0 while abs(ds - length) > atol and precision < 6: xn, yn = xp + coeff * (xe - xp), yp + coeff * (ye - yp) length = numpy.sqrt((xn - xs)**2 + (yn - ys)**2) if length > ds: coeff -= 0.1**precision precision += 1 coeff += 0.1**precision # Check new point not too close from first point before adding. length = numpy.sqrt((xn - x[0])**2 + (yn - y[0])**2) if length > 0.5 * ds: x.append(xn) y.append(yn) x, y = numpy.array(x), numpy.array(y) return x, y
nilq/baby-python
python
from bstools import bsSsh from bstools import bsPrint from bstools import bsTime name = "bstools" __version__ = "0.1.9"
nilq/baby-python
python
""" libmonster.py - mixed support library # TODO: consider replacing pauthor in keyid with _bibtex.names # TODO: enusure \emph is dropped from titles in keyid calculation """ import re from heapq import nsmallest from collections import defaultdict from itertools import groupby from operator import itemgetter from csvw.dsv import UnicodeWriter from ..util import unique, Trigger from .bibfiles import Entry from .bibtex_undiacritic import undiacritic from .roman import roman, romanint INF = float('inf') lgcodestr = Entry.lgcodes def opv(d, func, *args): """ Apply func to all values of a dictionary. :param d: A dictionary. :param func: Callable accepting a value of `d` as first parameter. :param args: Additional positional arguments to be passed to `func`. :return: `dict` mapping the keys of `d` to the return value of the function call. """ return {i: func(v, *args) for i, v in d.items()} def grp2(l): """ Turn a list of pairs into a dictionary, mapping first elements to lists of co-occurring second elements in pairs. :param l: :return: """ return {a: [pair[1] for pair in pairs] for a, pairs in groupby(sorted(l, key=itemgetter(0)), itemgetter(0))} def grp2fd(l): """ Turn a list of pairs into a nested dictionary, thus grouping by the first element in the pair. :param l: :return: """ return {k: {vv: 1 for vv in v} for k, v in grp2(l).items()} reauthor = [re.compile(pattern) for pattern in [ "(?P<lastname>[^,]+),\s((?P<jr>[JS]r\.|[I]+),\s)?(?P<firstname>[^,]+)$", "(?P<firstname>[^{][\S]+(\s[A-Z][\S]+)*)\s" "(?P<lastname>([a-z]+\s)*[A-Z\\\\][\S]+)(?P<jr>,\s[JS]r\.|[I]+)?$", "(?P<firstname>\\{[\S]+\\}[\S]+(\s[A-Z][\S]+)*)\s" "(?P<lastname>([a-z]+\s)*[A-Z\\\\][\S]+)(?P<jr>,\s[JS]r\.|[I]+)?$", "(?P<firstname>[\s\S]+?)\s\{(?P<lastname>[\s\S]+)\}(?P<jr>,\s[JS]r\.|[I]+)?$", "\{(?P<firstname>[\s\S]+)\}\s(?P<lastname>[\s\S]+?)(?P<jr>,\s[JS]r\.|[I]+)?$", "(?P<lastname>[A-Z][\S]+)$", "\{(?P<lastname>[\s\S]+)\}$", "(?P<lastname>[aA]nonymous)$", "(?P<lastname>\?)$", "(?P<lastname>[\s\S]+)$", ]] def psingleauthor(n): if not n: return for pattern in reauthor: o = pattern.match(n) if o: return o.groupdict() print("Couldn't parse name:", n) # pragma: no cover def pauthor(s): pas = [psingleauthor(a) for a in s.split(' and ')] if [a for a in pas if not a]: if s: print(s) return [a for a in pas if a] relu = re.compile("\s+|(d\')(?=[A-Z])") recapstart = re.compile("\[?[A-Z]") def lowerupper(s): parts, lower, upper = [x for x in relu.split(s) if x], [], [] for i, x in enumerate(parts): if not recapstart.match(undiacritic(x)): lower.append(x) else: upper = parts[i:] break return lower, upper def lastnamekey(s): _, upper = lowerupper(s) return max(upper) if upper else '' def rangecomplete(incomplete, complete): """ >>> rangecomplete('2', '10') '12' """ if len(complete) > len(incomplete): # if the second number in a range of pages has less digits than the the first, # we assume it's meant as only the last digits of the bigger number, # i.e. 10-2 is interpreted as 10-12. return complete[:len(complete) - len(incomplete)] + incomplete return incomplete rebracketyear = re.compile("\[([\d\,\-\/]+)\]") reyl = re.compile("[\,\-\/\s\[\]]+") def pyear(s): if rebracketyear.search(s): s = rebracketyear.search(s).group(1) my = [x for x in reyl.split(s) if x.strip()] if len(my) == 0: return "[nd]" if len(my) != 1: return my[0] + "-" + rangecomplete(my[-1], my[0]) return my[-1] bibord = {k: i for i, k in enumerate([ 'author', 'editor', 'title', 'booktitle', 'journal', 'school', 'publisher', 'address', 'series', 'volume', 'number', 'pages', 'year', 'issn', 'url', ])} def bibord_iteritems(fields): for f in sorted(fields, key=lambda f: (bibord.get(f, INF), f)): yield f, fields[f] resplittit = re.compile("[\(\)\[\]\:\,\.\s\-\?\!\;\/\~\=]+") def wrds(txt): txt = undiacritic(txt.lower()) txt = txt.replace("'", "").replace('"', "") return [x for x in resplittit.split(txt) if x] def renfn(e, ups): for k, field, newvalue in ups: typ, fields = e[k] fields[field] = newvalue e[k] = (typ, fields) return e INLG = 'inlg' def add_inlg_e(e, trigs, verbose=True, return_newtrain=False): # FIXME: does not honor 'NOT' for now, only maps words to iso codes. dh = {word: t.type for t in trigs for _, word in t.clauses} # map record keys to lists of words in titles: ts = [(k, wrds(fields['title']) + wrds(fields.get('booktitle', ''))) for (k, (typ, fields)) in e.items() if 'title' in fields and INLG not in fields] if verbose: print(len(ts), "without", INLG) # map record keys to sets of assigned iso codes, based on words in the title ann = [(k, set(dh[w] for w in tit if w in dh)) for k, tit in ts] # list of record keys which have been assigned exactly one iso code unique_ = [(k, lgs.pop()) for (k, lgs) in ann if len(lgs) == 1] if verbose: print(len(unique_), "cases of unique hits") t2 = renfn(e, [(k, INLG, v) for (k, v) in unique_]) if return_newtrain: # pragma: no cover newtrain = grp2fd([ (lgcodestr(fields[INLG])[0], w) for (k, (typ, fields)) in t2.items() if 'title' in fields and INLG in fields if len(lgcodestr(fields[INLG])) == 1 for w in wrds(fields['title'])]) for (lg, wf) in sorted(newtrain.items(), key=lambda x: len(x[1])): cm = [(1 + f, float(1 - f + sum(owf.get(w, 0) for owf in newtrain.values())), w) for (w, f) in wf.items() if f > 9] cms = [(f / fn, f, fn, w) for (f, fn, w) in cm] cms.sort(reverse=True) return t2, newtrain, cms return t2 rerpgs = re.compile("([xivmcl]+)\-?([xivmcl]*)") repgs = re.compile("([\d]+)\-?([\d]*)") def pagecount(pgstr): rpgs = rerpgs.findall(pgstr) pgs = repgs.findall(pgstr) rsump = sum(romanint(b) - romanint(a) + 1 if b else romanint(a) for (a, b) in rpgs) sump = sum(int(rangecomplete(b, a)) - int(a) + 1 if b else int(a) for (a, b) in pgs) if rsump != 0 and sump != 0: return "%s+%s" % (rsump, sump) if rsump == 0 and sump == 0: return '' return '%s' % (rsump + sump) rewrdtok = re.compile("[a-zA-Z].+") reokkey = re.compile("[^a-z\d\-\_\[\]]") def keyid(fields, fd, ti=2, infinity=float('inf')): if 'author' not in fields: if 'editor' not in fields: values = ''.join( v for f, v in bibord_iteritems(fields) if f != 'glottolog_ref_id') return '__missingcontrib__' + reokkey.sub('_', values.lower()) else: astring = fields['editor'] else: astring = fields['author'] authors = pauthor(astring) if len(authors) != len(astring.split(' and ')): print("Unparsed author in", authors) print(" ", astring, astring.split(' and ')) print(fields.get('title')) ak = [undiacritic(x) for x in sorted(lastnamekey(a['lastname']) for a in authors)] yk = pyear(fields.get('year', '[nd]'))[:4] tks = wrds(fields.get("title", "no.title")) # takeuntil : # select the (leftmost) two least frequent words from the title types = list(unique(w for w in tks if rewrdtok.match(w))) tk = nsmallest(ti, types, key=lambda w: fd.get(w, infinity)) # put them back into the title order (i.e. 'spam eggs' != 'eggs spam') order = {w: i for i, w in enumerate(types)} tk.sort(key=lambda w: order[w]) if 'volume' in fields and all( f not in fields for f in ['journal', 'booktitle', 'series']): vk = roman(fields['volume']) else: vk = '' if 'extra_hash' in fields: yk = yk + fields['extra_hash'] key = '-'.join(ak) + "_" + '-'.join(tk) + vk + yk return reokkey.sub("", key.lower()) def lgcode(arg): fields = arg[1] return lgcodestr(fields['lgcode']) if 'lgcode' in fields else [] def sd(es, hht): # most signficant piece of descriptive material # hhtype, pages, year mi = [(k, (hht.parse(fields.get('hhtype', 'unknown')), fields.get('pages', ''), fields.get('year', ''))) for (k, (typ, fields)) in es.items()] d = accd(mi) return [sorted(((p, y, k, t.id) for (k, (p, y)) in d[t.id].items()), reverse=True) for t in hht if t.id in d] def pcy(pagecountstr): if not pagecountstr: return 0 return eval(pagecountstr) # int(takeafter(pagecountstr, "+")) def accd(mi): r = defaultdict(dict) for (k, (hhts, pgs, year)) in mi: pci = pcy(pagecount(pgs)) for t in hhts: r[t][k] = (pci / float(len(hhts)), year) return r def byid(es): return grp2([(cfn, k) for (k, tf) in es.items() for cfn in lgcode(tf)]) def sdlgs(e, hht): eindex = byid(e) fes = opv(eindex, lambda ks: {k: e[k] for k in ks}) fsd = opv(fes, sd, hht) return fsd, fes def lstat(e, hht): (lsd, lse) = sdlgs(e, hht) return opv(lsd, lambda xs: (xs + [[[None]]])[0][0][-1]) def lstat_witness(e, hht): def statwit(xs): assert xs [(typ, ks)] = grp2([(t, k) for [p, y, k, t] in xs[0]]).items() return typ, ks (lsd, lse) = sdlgs(e, hht) return opv(lsd, statwit) def markconservative(m, trigs, ref, hht, outfn, verbose=True, rank=None): blamefield = "hhtype" mafter = markall(m, trigs, verbose=verbose, rank=rank) ls = lstat(ref, hht) lsafter = lstat_witness(mafter, hht) log = [] no_status = defaultdict(set) for (lg, (stat, wits)) in lsafter.items(): if not ls.get(lg): srctrickles = [mafter[k][1].get('srctrickle') for k in wits] for t in srctrickles: if t and not t.startswith('iso6393'): no_status[lg].add(t) continue if hht[stat] > hht[ls[lg]]: log = log + [ (lg, [(mafter[k][1].get(blamefield, "No %s" % blamefield), k, mafter[k][1].get('title', 'no title'), mafter[k][1].get('srctrickle', 'no srctrickle')) for k in wits], ls[lg])] for k in wits: (t, f) = mafter[k] if blamefield in f: del f[blamefield] mafter[k] = (t, f) for lg in no_status: print('{0} lacks status'.format(lg)) with UnicodeWriter(outfn, dialect='excel-tab') as writer: writer.writerows(((lg, was) + mis for (lg, miss, was) in log for mis in miss)) return mafter def markall(e, trigs, verbose=True, rank=None): # the set of fields triggers relate to: clss = set(t.field for t in trigs) # all bibitems lacking any of the potential triggered fields: ei = {k: (typ, fields) for k, (typ, fields) in e.items() if any(c not in fields for c in clss)} eikeys = set(list(ei.keys())) # map words in titles to lists of bibitem keys having the word in the title: wk = defaultdict(set) for k, (typ, fields) in ei.items(): for w in wrds(fields.get('title', '')): wk[w].add(k) u = defaultdict(lambda: defaultdict(list)) for clauses, triggers in Trigger.group(trigs): for k in triggers[0](eikeys, wk): for t in triggers: u[k][t.cls].append(t) for k, t_by_c in sorted(u.items(), key=lambda i: i[0]): t, f = e[k] f2 = {a: b for a, b in f.items()} for (field, type_), triggers in sorted(t_by_c.items(), key=lambda i: len(i[1])): # Make sure we handle the trigger class with the biggest number of matching # triggers last. if rank and field in f2: # only update the assigned hhtype if something better comes along: if rank(f2[field].split(' (comp')[0]) >= rank(type_): continue f2[field] = Trigger.format(type_, triggers) e[k] = (t, f2) if verbose: print("trigs", len(trigs)) print("label classes", len(clss)) print("unlabeled refs", len(ei)) print("updates", len(u)) return e
nilq/baby-python
python
## Code taken from https://github.com/vqdang/hover_net/blob/master/metrics/stats_utils.py import warnings import numpy as np import scipy from scipy.optimize import linear_sum_assignment # --------------------------Optimised for Speed def get_fast_aji(true, pred): """AJI version distributed by MoNuSeg, has no permutation problem but suffered from over-penalisation similar to DICE2. Fast computation requires instance IDs are in contiguous orderding i.e [1, 2, 3, 4] not [2, 3, 6, 10]. Please call `remap_label` before hand and `by_size` flag has no effect on the result. """ if true.sum() == 0 and pred.sum() == 0: return 1. true = np.copy(true) # ? do we need this pred = np.copy(pred) true_id_list = list(np.unique(true)) pred_id_list = list(np.unique(pred)) true_masks = [ None, ] for t in true_id_list[1:]: t_mask = np.array(true == t, np.uint8) true_masks.append(t_mask) pred_masks = [ None, ] for p in pred_id_list[1:]: p_mask = np.array(pred == p, np.uint8) pred_masks.append(p_mask) # prefill with value pairwise_inter = np.zeros( [len(true_id_list) - 1, len(pred_id_list) - 1], dtype=np.float64 ) pairwise_union = np.zeros( [len(true_id_list) - 1, len(pred_id_list) - 1], dtype=np.float64 ) # caching pairwise for true_id in true_id_list[1:]: # 0-th is background t_mask = true_masks[true_id] pred_true_overlap = pred[t_mask > 0] pred_true_overlap_id = np.unique(pred_true_overlap) pred_true_overlap_id = list(pred_true_overlap_id) for pred_id in pred_true_overlap_id: if pred_id == 0: # ignore continue # overlaping background p_mask = pred_masks[pred_id] total = (t_mask + p_mask).sum() inter = (t_mask * p_mask).sum() pairwise_inter[true_id - 1, pred_id - 1] = inter pairwise_union[true_id - 1, pred_id - 1] = total - inter pairwise_iou = pairwise_inter / (pairwise_union + 1.0e-6) # pair of pred that give highest iou for each true, dont care # about reusing pred instance multiple times paired_pred = np.argmax(pairwise_iou, axis=1) pairwise_iou = np.max(pairwise_iou, axis=1) # exlude those dont have intersection paired_true = np.nonzero(pairwise_iou > 0.0)[0] paired_pred = paired_pred[paired_true] # print(paired_true.shape, paired_pred.shape) overall_inter = (pairwise_inter[paired_true, paired_pred]).sum() overall_union = (pairwise_union[paired_true, paired_pred]).sum() paired_true = list(paired_true + 1) # index to instance ID paired_pred = list(paired_pred + 1) # add all unpaired GT and Prediction into the union unpaired_true = np.array( [idx for idx in true_id_list[1:] if idx not in paired_true] ) unpaired_pred = np.array( [idx for idx in pred_id_list[1:] if idx not in paired_pred] ) for true_id in unpaired_true: overall_union += true_masks[true_id].sum() for pred_id in unpaired_pred: overall_union += pred_masks[pred_id].sum() if overall_union == 0: aji_score = 0. else: aji_score = overall_inter / overall_union return aji_score ##### def get_fast_aji_plus(true, pred): """AJI+, an AJI version with maximal unique pairing to obtain overall intersecion. Every prediction instance is paired with at most 1 GT instance (1 to 1) mapping, unlike AJI where a prediction instance can be paired against many GT instances (1 to many). Remaining unpaired GT and Prediction instances will be added to the overall union. The 1 to 1 mapping prevents AJI's over-penalisation from happening. Fast computation requires instance IDs are in contiguous orderding i.e [1, 2, 3, 4] not [2, 3, 6, 10]. Please call `remap_label` before hand and `by_size` flag has no effect on the result. """ if true.sum() == 0 and pred.sum() == 0: return 1. true = np.copy(true) # ? do we need this pred = np.copy(pred) true_id_list = list(np.unique(true)) pred_id_list = list(np.unique(pred)) true_masks = [ None, ] for t in true_id_list[1:]: t_mask = np.array(true == t, np.uint8) true_masks.append(t_mask) pred_masks = [ None, ] for p in pred_id_list[1:]: p_mask = np.array(pred == p, np.uint8) pred_masks.append(p_mask) # prefill with value pairwise_inter = np.zeros( [len(true_id_list) - 1, len(pred_id_list) - 1], dtype=np.float64 ) pairwise_union = np.zeros( [len(true_id_list) - 1, len(pred_id_list) - 1], dtype=np.float64 ) # caching pairwise for true_id in true_id_list[1:]: # 0-th is background t_mask = true_masks[true_id] pred_true_overlap = pred[t_mask > 0] pred_true_overlap_id = np.unique(pred_true_overlap) pred_true_overlap_id = list(pred_true_overlap_id) for pred_id in pred_true_overlap_id: if pred_id == 0: # ignore continue # overlaping background p_mask = pred_masks[pred_id] total = (t_mask + p_mask).sum() inter = (t_mask * p_mask).sum() pairwise_inter[true_id - 1, pred_id - 1] = inter pairwise_union[true_id - 1, pred_id - 1] = total - inter # pairwise_iou = pairwise_inter / (pairwise_union + 1.0e-6) #### Munkres pairing to find maximal unique pairing paired_true, paired_pred = linear_sum_assignment(-pairwise_iou) ### extract the paired cost and remove invalid pair paired_iou = pairwise_iou[paired_true, paired_pred] # now select all those paired with iou != 0.0 i.e have intersection paired_true = paired_true[paired_iou > 0.0] paired_pred = paired_pred[paired_iou > 0.0] paired_inter = pairwise_inter[paired_true, paired_pred] paired_union = pairwise_union[paired_true, paired_pred] paired_true = list(paired_true + 1) # index to instance ID paired_pred = list(paired_pred + 1) overall_inter = paired_inter.sum() overall_union = paired_union.sum() # add all unpaired GT and Prediction into the union unpaired_true = np.array( [idx for idx in true_id_list[1:] if idx not in paired_true] ) unpaired_pred = np.array( [idx for idx in pred_id_list[1:] if idx not in paired_pred] ) for true_id in unpaired_true: overall_union += true_masks[true_id].sum() for pred_id in unpaired_pred: overall_union += pred_masks[pred_id].sum() # if overall_union == 0: aji_score = 0. else: aji_score = overall_inter / overall_union return aji_score ##### def get_fast_pq(true, pred, match_iou=0.5): """`match_iou` is the IoU threshold level to determine the pairing between GT instances `p` and prediction instances `g`. `p` and `g` is a pair if IoU > `match_iou`. However, pair of `p` and `g` must be unique (1 prediction instance to 1 GT instance mapping). If `match_iou` < 0.5, Munkres assignment (solving minimum weight matching in bipartite graphs) is caculated to find the maximal amount of unique pairing. If `match_iou` >= 0.5, all IoU(p,g) > 0.5 pairing is proven to be unique and the number of pairs is also maximal. Fast computation requires instance IDs are in contiguous orderding i.e [1, 2, 3, 4] not [2, 3, 6, 10]. Please call `remap_label` beforehand and `by_size` flag has no effect on the result. Returns: [dq, sq, pq]: measurement statistic [paired_true, paired_pred, unpaired_true, unpaired_pred]: pairing information to perform measurement """ assert match_iou >= 0.0, "Cant' be negative" true = np.copy(true) pred = np.copy(pred) true_id_list = list(np.unique(true)) pred_id_list = list(np.unique(pred)) true_masks = [ None, ] for t in true_id_list[1:]: t_mask = np.array(true == t, np.uint8) true_masks.append(t_mask) pred_masks = [ None, ] for p in pred_id_list[1:]: p_mask = np.array(pred == p, np.uint8) pred_masks.append(p_mask) # prefill with value pairwise_iou = np.zeros( [len(true_id_list) - 1, len(pred_id_list) - 1], dtype=np.float64 ) # caching pairwise iou for true_id in true_id_list[1:]: # 0-th is background t_mask = true_masks[true_id] pred_true_overlap = pred[t_mask > 0] pred_true_overlap_id = np.unique(pred_true_overlap) pred_true_overlap_id = list(pred_true_overlap_id) for pred_id in pred_true_overlap_id: if pred_id == 0: # ignore continue # overlaping background p_mask = pred_masks[pred_id] total = (t_mask + p_mask).sum() inter = (t_mask * p_mask).sum() iou = inter / (total - inter) pairwise_iou[true_id - 1, pred_id - 1] = iou # if match_iou >= 0.5: paired_iou = pairwise_iou[pairwise_iou > match_iou] pairwise_iou[pairwise_iou <= match_iou] = 0.0 paired_true, paired_pred = np.nonzero(pairwise_iou) paired_iou = pairwise_iou[paired_true, paired_pred] paired_true += 1 # index is instance id - 1 paired_pred += 1 # hence return back to original else: # * Exhaustive maximal unique pairing #### Munkres pairing with scipy library # the algorithm return (row indices, matched column indices) # if there is multiple same cost in a row, index of first occurence # is return, thus the unique pairing is ensure # inverse pair to get high IoU as minimum paired_true, paired_pred = linear_sum_assignment(-pairwise_iou) ### extract the paired cost and remove invalid pair paired_iou = pairwise_iou[paired_true, paired_pred] # now select those above threshold level # paired with iou = 0.0 i.e no intersection => FP or FN paired_true = list(paired_true[paired_iou > match_iou] + 1) paired_pred = list(paired_pred[paired_iou > match_iou] + 1) paired_iou = paired_iou[paired_iou > match_iou] # get the actual FP and FN unpaired_true = [idx for idx in true_id_list[1:] if idx not in paired_true] unpaired_pred = [idx for idx in pred_id_list[1:] if idx not in paired_pred] # print(paired_iou.shape, paired_true.shape, len(unpaired_true), len(unpaired_pred)) # tp = len(paired_true) fp = len(unpaired_pred) fn = len(unpaired_true) # get the F1-score i.e DQ dq = tp / (tp + 0.5 * fp + 0.5 * fn) # get the SQ, no paired has 0 iou so not impact sq = paired_iou.sum() / (tp + 1.0e-6) return [dq, sq, dq * sq], [paired_true, paired_pred, unpaired_true, unpaired_pred] ##### def get_fast_dice_2(true, pred): """Ensemble dice.""" true = np.copy(true) pred = np.copy(pred) true_id = list(np.unique(true)) pred_id = list(np.unique(pred)) overall_total = 0 overall_inter = 0 true_masks = [np.zeros(true.shape)] for t in true_id[1:]: t_mask = np.array(true == t, np.uint8) true_masks.append(t_mask) pred_masks = [np.zeros(true.shape)] for p in pred_id[1:]: p_mask = np.array(pred == p, np.uint8) pred_masks.append(p_mask) for true_idx in range(1, len(true_id)): t_mask = true_masks[true_idx] pred_true_overlap = pred[t_mask > 0] pred_true_overlap_id = np.unique(pred_true_overlap) pred_true_overlap_id = list(pred_true_overlap_id) try: # blinly remove background pred_true_overlap_id.remove(0) except ValueError: pass # just mean no background for pred_idx in pred_true_overlap_id: p_mask = pred_masks[pred_idx] total = (t_mask + p_mask).sum() inter = (t_mask * p_mask).sum() overall_total += total overall_inter += inter return 2 * overall_inter / overall_total #####--------------------------As pseudocode def get_dice_1(true, pred): """Traditional dice.""" # cast to binary 1st true = np.copy(true) pred = np.copy(pred) true[true > 0] = 1 pred[pred > 0] = 1 inter = true * pred denom = true + pred return 2.0 * np.sum(inter) / np.sum(denom) #### def get_dice_2(true, pred): """Ensemble Dice as used in Computational Precision Medicine Challenge.""" true = np.copy(true) pred = np.copy(pred) true_id = list(np.unique(true)) pred_id = list(np.unique(pred)) # remove background aka id 0 true_id.remove(0) pred_id.remove(0) total_markup = 0 total_intersect = 0 for t in true_id: t_mask = np.array(true == t, np.uint8) for p in pred_id: p_mask = np.array(pred == p, np.uint8) intersect = p_mask * t_mask if intersect.sum() > 0: total_intersect += intersect.sum() total_markup += t_mask.sum() + p_mask.sum() return 2 * total_intersect / total_markup ##### def remap_label(pred, by_size=False): """Rename all instance id so that the id is contiguous i.e [0, 1, 2, 3] not [0, 2, 4, 6]. The ordering of instances (which one comes first) is preserved unless by_size=True, then the instances will be reordered so that bigger nucler has smaller ID. Args: pred : the 2d array contain instances where each instances is marked by non-zero integer by_size : renaming with larger nuclei has smaller id (on-top) """ pred_id = list(np.unique(pred)) pred_id.remove(0) if len(pred_id) == 0: return pred # no label if by_size: pred_size = [] for inst_id in pred_id: size = (pred == inst_id).sum() pred_size.append(size) # sort the id by size in descending order pair_list = zip(pred_id, pred_size) pair_list = sorted(pair_list, key=lambda x: x[1], reverse=True) pred_id, pred_size = zip(*pair_list) new_pred = np.zeros(pred.shape, np.int32) for idx, inst_id in enumerate(pred_id): new_pred[pred == inst_id] = idx + 1 return new_pred ##### def pair_coordinates(setA, setB, radius): """Use the Munkres or Kuhn-Munkres algorithm to find the most optimal unique pairing (largest possible match) when pairing points in set B against points in set A, using distance as cost function. Args: setA, setB: np.array (float32) of size Nx2 contains the of XY coordinate of N different points radius: valid area around a point in setA to consider a given coordinate in setB a candidate for match Return: pairing: pairing is an array of indices where point at index pairing[0] in set A paired with point in set B at index pairing[1] unparedA, unpairedB: remaining poitn in set A and set B unpaired """ # * Euclidean distance as the cost matrix pair_distance = scipy.spatial.distance.cdist(setA, setB, metric='euclidean') # * Munkres pairing with scipy library # the algorithm return (row indices, matched column indices) # if there is multiple same cost in a row, index of first occurence # is return, thus the unique pairing is ensured indicesA, paired_indicesB = linear_sum_assignment(pair_distance) # extract the paired cost and remove instances # outside of designated radius pair_cost = pair_distance[indicesA, paired_indicesB] pairedA = indicesA[pair_cost <= radius] pairedB = paired_indicesB[pair_cost <= radius] pairing = np.concatenate([pairedA[:,None], pairedB[:,None]], axis=-1) unpairedA = np.delete(np.arange(setA.shape[0]), pairedA) unpairedB = np.delete(np.arange(setB.shape[0]), pairedB) return pairing, unpairedA, unpairedB
nilq/baby-python
python
from ivy import ivy_module as im from ivy.ivy_compiler import ivy_from_string from ivy.tk_ui import new_ui from ivy import ivy_utils as iu from ivy import ivy_check as ick from ivy import ivy_logic as il prog = """#lang ivy1.5 type t type p relation sent(X:p,Y:t) function pid(X:t):p axiom X:t = X axiom X:t < Y & Y < Z -> X < Z axiom X:t = Y + 0 axiom X:t = Y + Z axiom X = (Y:t + 1) if (Y = Z) else 0 axiom forall X,Y,Z. X:t < Y & Y < Z -> X < Z axiom forall NO0:t,NO1:t. ~(pid(NO0:t) < pid(NO1:t) & sent(pid(NO0:t),NO0:t)) """ with im.Module(): iu.set_parameters({'mode':'induction','show_compiled':'true'}) ivy_from_string(prog,create_isolate=False) for adecl in im.module.labeled_axioms: f = adecl.args[1] print print str(f) print il.to_str_with_var_sorts(f) print il.fmla_to_str_ambiguous(f) # main_ui.answer("OK") # ui.check_inductiveness() # # ui = ui.cti # cg = ui.current_concept_graph # cg.show_relation(cg.relation('link(X,Y)'),'+') # cg.gather() # main_ui.answer("OK") # cg.strengthen() # main_ui.answer("OK") # ui.check_inductiveness() # # cg.show_relation(cg.relation('semaphore'),'+') # cg.gather() # main_ui.answer("View") # cg.bmc_conjecture(bound=1) # # main_ui.mainloop()
nilq/baby-python
python
# -*- coding: utf-8 -*- # Ambry Bundle Library File # Use this file for code that may be imported into other bundles
nilq/baby-python
python
import kydb from portfolio_management.common.base_item import BaseItem, Factory from portfolio_management.common.account_container_mixin import AccountContainerMixin from portfolio_management.common.position_mixin import PositionMixin from portfolio_management.portfolio.event import Event, EventType from portfolio_management.portfolio.instrument import Instrument, InstrumentFx import portfolio_management.utils.func_utils as fu class Deal(BaseItem, AccountContainerMixin, PositionMixin, kydb.DbObj): """ Represents a buy / sell transaction for a financial instrument """ @kydb.stored def state(self) -> str: return '' def state_obj(self) -> Event: return self.db[Factory.get_class_path('Event', self.state())] def positions(self) -> dict: return {} def __str__(self): return '{0}[{1}]'.format(self.id(), self.state()) def apply_event(self, event_type, price=None, qty=None, ccy=None): if event_type == EventType.Amend: if price: self.price.setvalue(price) if qty: self.price.setvalue(qty) if ccy: self.ccy.setvalue(ccy) self.events().add(self.state()) event = Factory.create('Event', db=self.db, event_type=event_type) self.state.setvalue(event.id()) @kydb.stored def instrument(self) -> str: return '' def instrument_obj(self) -> Instrument: return self.db[Factory.get_class_path('Instrument', self.instrument())] @kydb.stored def direction(self) -> str: return 'B' @kydb.stored def ccy(self) -> str: return '' @kydb.stored def qty(self) -> float: return 0.00 @kydb.stored def events(self) -> set: return set() @kydb.stored def events_obj(self) -> list: return [self.db[Factory.get_class_path('Event', a)] for a in self.events()] @kydb.stored def instrument(self) -> str: return '' class DealEq(Deal): """ Equity type deal """ @kydb.stored def price(self) -> float: return 0.00 def positions(self) -> dict: positions = { self.ccy() : - self.qty() * self.price(), self.instrument_obj().symbol(): self.qty() } return positions @kydb.stored def notional(self) -> float: return self.qty() if self.qty() else 0.00 * self.price() if self.price() else 0.00 class DealFx(Deal): """ FX Spot type deal """ @kydb.stored def rate(self) -> float: return 0.00 @kydb.stored def ccy1(self) -> str: return '' @kydb.stored def ccy2(self) -> str: return '' @kydb.stored def ccy1_amount(self) -> float: return 0.00 @kydb.stored def ccy2_amount(self) -> float: return 0.00 def positions(self) -> dict: factor = 1 if self.direction() == 'B' else -1 positions = {self.ccy1(): self.ccy1_amount() * factor, self.ccy2(): self.ccy2_amount() * factor * -1} return positions def instrument_obj(self) -> InstrumentFx: return self.db[Factory.get_class_path('InstrumentFx', self.instrument())] def main(): pass if __name__ == '__main__': main()
nilq/baby-python
python
uctable = [ [ 48 ], [ 49 ], [ 50 ], [ 51 ], [ 52 ], [ 53 ], [ 54 ], [ 55 ], [ 56 ], [ 57 ], [ 194, 178 ], [ 194, 179 ], [ 194, 185 ], [ 194, 188 ], [ 194, 189 ], [ 194, 190 ], [ 217, 160 ], [ 217, 161 ], [ 217, 162 ], [ 217, 163 ], [ 217, 164 ], [ 217, 165 ], [ 217, 166 ], [ 217, 167 ], [ 217, 168 ], [ 217, 169 ], [ 219, 176 ], [ 219, 177 ], [ 219, 178 ], [ 219, 179 ], [ 219, 180 ], [ 219, 181 ], [ 219, 182 ], [ 219, 183 ], [ 219, 184 ], [ 219, 185 ], [ 223, 128 ], [ 223, 129 ], [ 223, 130 ], [ 223, 131 ], [ 223, 132 ], [ 223, 133 ], [ 223, 134 ], [ 223, 135 ], [ 223, 136 ], [ 223, 137 ], [ 224, 165, 166 ], [ 224, 165, 167 ], [ 224, 165, 168 ], [ 224, 165, 169 ], [ 224, 165, 170 ], [ 224, 165, 171 ], [ 224, 165, 172 ], [ 224, 165, 173 ], [ 224, 165, 174 ], [ 224, 165, 175 ], [ 224, 167, 166 ], [ 224, 167, 167 ], [ 224, 167, 168 ], [ 224, 167, 169 ], [ 224, 167, 170 ], [ 224, 167, 171 ], [ 224, 167, 172 ], [ 224, 167, 173 ], [ 224, 167, 174 ], [ 224, 167, 175 ], [ 224, 167, 180 ], [ 224, 167, 181 ], [ 224, 167, 182 ], [ 224, 167, 183 ], [ 224, 167, 184 ], [ 224, 167, 185 ], [ 224, 169, 166 ], [ 224, 169, 167 ], [ 224, 169, 168 ], [ 224, 169, 169 ], [ 224, 169, 170 ], [ 224, 169, 171 ], [ 224, 169, 172 ], [ 224, 169, 173 ], [ 224, 169, 174 ], [ 224, 169, 175 ], [ 224, 171, 166 ], [ 224, 171, 167 ], [ 224, 171, 168 ], [ 224, 171, 169 ], [ 224, 171, 170 ], [ 224, 171, 171 ], [ 224, 171, 172 ], [ 224, 171, 173 ], [ 224, 171, 174 ], [ 224, 171, 175 ], [ 224, 173, 166 ], [ 224, 173, 167 ], [ 224, 173, 168 ], [ 224, 173, 169 ], [ 224, 173, 170 ], [ 224, 173, 171 ], [ 224, 173, 172 ], [ 224, 173, 173 ], [ 224, 173, 174 ], [ 224, 173, 175 ], [ 224, 173, 178 ], [ 224, 173, 179 ], [ 224, 173, 180 ], [ 224, 173, 181 ], [ 224, 173, 182 ], [ 224, 173, 183 ], [ 224, 175, 166 ], [ 224, 175, 167 ], [ 224, 175, 168 ], [ 224, 175, 169 ], [ 224, 175, 170 ], [ 224, 175, 171 ], [ 224, 175, 172 ], [ 224, 175, 173 ], [ 224, 175, 174 ], [ 224, 175, 175 ], [ 224, 175, 176 ], [ 224, 175, 177 ], [ 224, 175, 178 ], [ 224, 177, 166 ], [ 224, 177, 167 ], [ 224, 177, 168 ], [ 224, 177, 169 ], [ 224, 177, 170 ], [ 224, 177, 171 ], [ 224, 177, 172 ], [ 224, 177, 173 ], [ 224, 177, 174 ], [ 224, 177, 175 ], [ 224, 177, 184 ], [ 224, 177, 185 ], [ 224, 177, 186 ], [ 224, 177, 187 ], [ 224, 177, 188 ], [ 224, 177, 189 ], [ 224, 177, 190 ], [ 224, 179, 166 ], [ 224, 179, 167 ], [ 224, 179, 168 ], [ 224, 179, 169 ], [ 224, 179, 170 ], [ 224, 179, 171 ], [ 224, 179, 172 ], [ 224, 179, 173 ], [ 224, 179, 174 ], [ 224, 179, 175 ], [ 224, 181, 166 ], [ 224, 181, 167 ], [ 224, 181, 168 ], [ 224, 181, 169 ], [ 224, 181, 170 ], [ 224, 181, 171 ], [ 224, 181, 172 ], [ 224, 181, 173 ], [ 224, 181, 174 ], [ 224, 181, 175 ], [ 224, 181, 176 ], [ 224, 181, 177 ], [ 224, 181, 178 ], [ 224, 181, 179 ], [ 224, 181, 180 ], [ 224, 181, 181 ], [ 224, 183, 166 ], [ 224, 183, 167 ], [ 224, 183, 168 ], [ 224, 183, 169 ], [ 224, 183, 170 ], [ 224, 183, 171 ], [ 224, 183, 172 ], [ 224, 183, 173 ], [ 224, 183, 174 ], [ 224, 183, 175 ], [ 224, 185, 144 ], [ 224, 185, 145 ], [ 224, 185, 146 ], [ 224, 185, 147 ], [ 224, 185, 148 ], [ 224, 185, 149 ], [ 224, 185, 150 ], [ 224, 185, 151 ], [ 224, 185, 152 ], [ 224, 185, 153 ], [ 224, 187, 144 ], [ 224, 187, 145 ], [ 224, 187, 146 ], [ 224, 187, 147 ], [ 224, 187, 148 ], [ 224, 187, 149 ], [ 224, 187, 150 ], [ 224, 187, 151 ], [ 224, 187, 152 ], [ 224, 187, 153 ], [ 224, 188, 160 ], [ 224, 188, 161 ], [ 224, 188, 162 ], [ 224, 188, 163 ], [ 224, 188, 164 ], [ 224, 188, 165 ], [ 224, 188, 166 ], [ 224, 188, 167 ], [ 224, 188, 168 ], [ 224, 188, 169 ], [ 224, 188, 170 ], [ 224, 188, 171 ], [ 224, 188, 172 ], [ 224, 188, 173 ], [ 224, 188, 174 ], [ 224, 188, 175 ], [ 224, 188, 176 ], [ 224, 188, 177 ], [ 224, 188, 178 ], [ 224, 188, 179 ], [ 225, 129, 128 ], [ 225, 129, 129 ], [ 225, 129, 130 ], [ 225, 129, 131 ], [ 225, 129, 132 ], [ 225, 129, 133 ], [ 225, 129, 134 ], [ 225, 129, 135 ], [ 225, 129, 136 ], [ 225, 129, 137 ], [ 225, 130, 144 ], [ 225, 130, 145 ], [ 225, 130, 146 ], [ 225, 130, 147 ], [ 225, 130, 148 ], [ 225, 130, 149 ], [ 225, 130, 150 ], [ 225, 130, 151 ], [ 225, 130, 152 ], [ 225, 130, 153 ], [ 225, 141, 169 ], [ 225, 141, 170 ], [ 225, 141, 171 ], [ 225, 141, 172 ], [ 225, 141, 173 ], [ 225, 141, 174 ], [ 225, 141, 175 ], [ 225, 141, 176 ], [ 225, 141, 177 ], [ 225, 141, 178 ], [ 225, 141, 179 ], [ 225, 141, 180 ], [ 225, 141, 181 ], [ 225, 141, 182 ], [ 225, 141, 183 ], [ 225, 141, 184 ], [ 225, 141, 185 ], [ 225, 141, 186 ], [ 225, 141, 187 ], [ 225, 141, 188 ], [ 225, 155, 174 ], [ 225, 155, 175 ], [ 225, 155, 176 ], [ 225, 159, 160 ], [ 225, 159, 161 ], [ 225, 159, 162 ], [ 225, 159, 163 ], [ 225, 159, 164 ], [ 225, 159, 165 ], [ 225, 159, 166 ], [ 225, 159, 167 ], [ 225, 159, 168 ], [ 225, 159, 169 ], [ 225, 159, 176 ], [ 225, 159, 177 ], [ 225, 159, 178 ], [ 225, 159, 179 ], [ 225, 159, 180 ], [ 225, 159, 181 ], [ 225, 159, 182 ], [ 225, 159, 183 ], [ 225, 159, 184 ], [ 225, 159, 185 ], [ 225, 160, 144 ], [ 225, 160, 145 ], [ 225, 160, 146 ], [ 225, 160, 147 ], [ 225, 160, 148 ], [ 225, 160, 149 ], [ 225, 160, 150 ], [ 225, 160, 151 ], [ 225, 160, 152 ], [ 225, 160, 153 ], [ 225, 165, 134 ], [ 225, 165, 135 ], [ 225, 165, 136 ], [ 225, 165, 137 ], [ 225, 165, 138 ], [ 225, 165, 139 ], [ 225, 165, 140 ], [ 225, 165, 141 ], [ 225, 165, 142 ], [ 225, 165, 143 ], [ 225, 167, 144 ], [ 225, 167, 145 ], [ 225, 167, 146 ], [ 225, 167, 147 ], [ 225, 167, 148 ], [ 225, 167, 149 ], [ 225, 167, 150 ], [ 225, 167, 151 ], [ 225, 167, 152 ], [ 225, 167, 153 ], [ 225, 167, 154 ], [ 225, 170, 128 ], [ 225, 170, 129 ], [ 225, 170, 130 ], [ 225, 170, 131 ], [ 225, 170, 132 ], [ 225, 170, 133 ], [ 225, 170, 134 ], [ 225, 170, 135 ], [ 225, 170, 136 ], [ 225, 170, 137 ], [ 225, 170, 144 ], [ 225, 170, 145 ], [ 225, 170, 146 ], [ 225, 170, 147 ], [ 225, 170, 148 ], [ 225, 170, 149 ], [ 225, 170, 150 ], [ 225, 170, 151 ], [ 225, 170, 152 ], [ 225, 170, 153 ], [ 225, 173, 144 ], [ 225, 173, 145 ], [ 225, 173, 146 ], [ 225, 173, 147 ], [ 225, 173, 148 ], [ 225, 173, 149 ], [ 225, 173, 150 ], [ 225, 173, 151 ], [ 225, 173, 152 ], [ 225, 173, 153 ], [ 225, 174, 176 ], [ 225, 174, 177 ], [ 225, 174, 178 ], [ 225, 174, 179 ], [ 225, 174, 180 ], [ 225, 174, 181 ], [ 225, 174, 182 ], [ 225, 174, 183 ], [ 225, 174, 184 ], [ 225, 174, 185 ], [ 225, 177, 128 ], [ 225, 177, 129 ], [ 225, 177, 130 ], [ 225, 177, 131 ], [ 225, 177, 132 ], [ 225, 177, 133 ], [ 225, 177, 134 ], [ 225, 177, 135 ], [ 225, 177, 136 ], [ 225, 177, 137 ], [ 225, 177, 144 ], [ 225, 177, 145 ], [ 225, 177, 146 ], [ 225, 177, 147 ], [ 225, 177, 148 ], [ 225, 177, 149 ], [ 225, 177, 150 ], [ 225, 177, 151 ], [ 225, 177, 152 ], [ 225, 177, 153 ], [ 226, 129, 176 ], [ 226, 129, 180 ], [ 226, 129, 181 ], [ 226, 129, 182 ], [ 226, 129, 183 ], [ 226, 129, 184 ], [ 226, 129, 185 ], [ 226, 130, 128 ], [ 226, 130, 129 ], [ 226, 130, 130 ], [ 226, 130, 131 ], [ 226, 130, 132 ], [ 226, 130, 133 ], [ 226, 130, 134 ], [ 226, 130, 135 ], [ 226, 130, 136 ], [ 226, 130, 137 ], [ 226, 133, 144 ], [ 226, 133, 145 ], [ 226, 133, 146 ], [ 226, 133, 147 ], [ 226, 133, 148 ], [ 226, 133, 149 ], [ 226, 133, 150 ], [ 226, 133, 151 ], [ 226, 133, 152 ], [ 226, 133, 153 ], [ 226, 133, 154 ], [ 226, 133, 155 ], [ 226, 133, 156 ], [ 226, 133, 157 ], [ 226, 133, 158 ], [ 226, 133, 159 ], [ 226, 133, 160 ], [ 226, 133, 161 ], [ 226, 133, 162 ], [ 226, 133, 163 ], [ 226, 133, 164 ], [ 226, 133, 165 ], [ 226, 133, 166 ], [ 226, 133, 167 ], [ 226, 133, 168 ], [ 226, 133, 169 ], [ 226, 133, 170 ], [ 226, 133, 171 ], [ 226, 133, 172 ], [ 226, 133, 173 ], [ 226, 133, 174 ], [ 226, 133, 175 ], [ 226, 133, 176 ], [ 226, 133, 177 ], [ 226, 133, 178 ], [ 226, 133, 179 ], [ 226, 133, 180 ], [ 226, 133, 181 ], [ 226, 133, 182 ], [ 226, 133, 183 ], [ 226, 133, 184 ], [ 226, 133, 185 ], [ 226, 133, 186 ], [ 226, 133, 187 ], [ 226, 133, 188 ], [ 226, 133, 189 ], [ 226, 133, 190 ], [ 226, 133, 191 ], [ 226, 134, 128 ], [ 226, 134, 129 ], [ 226, 134, 130 ], [ 226, 134, 133 ], [ 226, 134, 134 ], [ 226, 134, 135 ], [ 226, 134, 136 ], [ 226, 134, 137 ], [ 226, 145, 160 ], [ 226, 145, 161 ], [ 226, 145, 162 ], [ 226, 145, 163 ], [ 226, 145, 164 ], [ 226, 145, 165 ], [ 226, 145, 166 ], [ 226, 145, 167 ], [ 226, 145, 168 ], [ 226, 145, 169 ], [ 226, 145, 170 ], [ 226, 145, 171 ], [ 226, 145, 172 ], [ 226, 145, 173 ], [ 226, 145, 174 ], [ 226, 145, 175 ], [ 226, 145, 176 ], [ 226, 145, 177 ], [ 226, 145, 178 ], [ 226, 145, 179 ], [ 226, 145, 180 ], [ 226, 145, 181 ], [ 226, 145, 182 ], [ 226, 145, 183 ], [ 226, 145, 184 ], [ 226, 145, 185 ], [ 226, 145, 186 ], [ 226, 145, 187 ], [ 226, 145, 188 ], [ 226, 145, 189 ], [ 226, 145, 190 ], [ 226, 145, 191 ], [ 226, 146, 128 ], [ 226, 146, 129 ], [ 226, 146, 130 ], [ 226, 146, 131 ], [ 226, 146, 132 ], [ 226, 146, 133 ], [ 226, 146, 134 ], [ 226, 146, 135 ], [ 226, 146, 136 ], [ 226, 146, 137 ], [ 226, 146, 138 ], [ 226, 146, 139 ], [ 226, 146, 140 ], [ 226, 146, 141 ], [ 226, 146, 142 ], [ 226, 146, 143 ], [ 226, 146, 144 ], [ 226, 146, 145 ], [ 226, 146, 146 ], [ 226, 146, 147 ], [ 226, 146, 148 ], [ 226, 146, 149 ], [ 226, 146, 150 ], [ 226, 146, 151 ], [ 226, 146, 152 ], [ 226, 146, 153 ], [ 226, 146, 154 ], [ 226, 146, 155 ], [ 226, 147, 170 ], [ 226, 147, 171 ], [ 226, 147, 172 ], [ 226, 147, 173 ], [ 226, 147, 174 ], [ 226, 147, 175 ], [ 226, 147, 176 ], [ 226, 147, 177 ], [ 226, 147, 178 ], [ 226, 147, 179 ], [ 226, 147, 180 ], [ 226, 147, 181 ], [ 226, 147, 182 ], [ 226, 147, 183 ], [ 226, 147, 184 ], [ 226, 147, 185 ], [ 226, 147, 186 ], [ 226, 147, 187 ], [ 226, 147, 188 ], [ 226, 147, 189 ], [ 226, 147, 190 ], [ 226, 147, 191 ], [ 226, 157, 182 ], [ 226, 157, 183 ], [ 226, 157, 184 ], [ 226, 157, 185 ], [ 226, 157, 186 ], [ 226, 157, 187 ], [ 226, 157, 188 ], [ 226, 157, 189 ], [ 226, 157, 190 ], [ 226, 157, 191 ], [ 226, 158, 128 ], [ 226, 158, 129 ], [ 226, 158, 130 ], [ 226, 158, 131 ], [ 226, 158, 132 ], [ 226, 158, 133 ], [ 226, 158, 134 ], [ 226, 158, 135 ], [ 226, 158, 136 ], [ 226, 158, 137 ], [ 226, 158, 138 ], [ 226, 158, 139 ], [ 226, 158, 140 ], [ 226, 158, 141 ], [ 226, 158, 142 ], [ 226, 158, 143 ], [ 226, 158, 144 ], [ 226, 158, 145 ], [ 226, 158, 146 ], [ 226, 158, 147 ], [ 226, 179, 189 ], [ 227, 128, 135 ], [ 227, 128, 161 ], [ 227, 128, 162 ], [ 227, 128, 163 ], [ 227, 128, 164 ], [ 227, 128, 165 ], [ 227, 128, 166 ], [ 227, 128, 167 ], [ 227, 128, 168 ], [ 227, 128, 169 ], [ 227, 128, 184 ], [ 227, 128, 185 ], [ 227, 128, 186 ], [ 227, 134, 146 ], [ 227, 134, 147 ], [ 227, 134, 148 ], [ 227, 134, 149 ], [ 227, 136, 160 ], [ 227, 136, 161 ], [ 227, 136, 162 ], [ 227, 136, 163 ], [ 227, 136, 164 ], [ 227, 136, 165 ], [ 227, 136, 166 ], [ 227, 136, 167 ], [ 227, 136, 168 ], [ 227, 136, 169 ], [ 227, 137, 136 ], [ 227, 137, 137 ], [ 227, 137, 138 ], [ 227, 137, 139 ], [ 227, 137, 140 ], [ 227, 137, 141 ], [ 227, 137, 142 ], [ 227, 137, 143 ], [ 227, 137, 145 ], [ 227, 137, 146 ], [ 227, 137, 147 ], [ 227, 137, 148 ], [ 227, 137, 149 ], [ 227, 137, 150 ], [ 227, 137, 151 ], [ 227, 137, 152 ], [ 227, 137, 153 ], [ 227, 137, 154 ], [ 227, 137, 155 ], [ 227, 137, 156 ], [ 227, 137, 157 ], [ 227, 137, 158 ], [ 227, 137, 159 ], [ 227, 138, 128 ], [ 227, 138, 129 ], [ 227, 138, 130 ], [ 227, 138, 131 ], [ 227, 138, 132 ], [ 227, 138, 133 ], [ 227, 138, 134 ], [ 227, 138, 135 ], [ 227, 138, 136 ], [ 227, 138, 137 ], [ 227, 138, 177 ], [ 227, 138, 178 ], [ 227, 138, 179 ], [ 227, 138, 180 ], [ 227, 138, 181 ], [ 227, 138, 182 ], [ 227, 138, 183 ], [ 227, 138, 184 ], [ 227, 138, 185 ], [ 227, 138, 186 ], [ 227, 138, 187 ], [ 227, 138, 188 ], [ 227, 138, 189 ], [ 227, 138, 190 ], [ 227, 138, 191 ], [ 234, 152, 160 ], [ 234, 152, 161 ], [ 234, 152, 162 ], [ 234, 152, 163 ], [ 234, 152, 164 ], [ 234, 152, 165 ], [ 234, 152, 166 ], [ 234, 152, 167 ], [ 234, 152, 168 ], [ 234, 152, 169 ], [ 234, 155, 166 ], [ 234, 155, 167 ], [ 234, 155, 168 ], [ 234, 155, 169 ], [ 234, 155, 170 ], [ 234, 155, 171 ], [ 234, 155, 172 ], [ 234, 155, 173 ], [ 234, 155, 174 ], [ 234, 155, 175 ], [ 234, 160, 176 ], [ 234, 160, 177 ], [ 234, 160, 178 ], [ 234, 160, 179 ], [ 234, 160, 180 ], [ 234, 160, 181 ], [ 234, 163, 144 ], [ 234, 163, 145 ], [ 234, 163, 146 ], [ 234, 163, 147 ], [ 234, 163, 148 ], [ 234, 163, 149 ], [ 234, 163, 150 ], [ 234, 163, 151 ], [ 234, 163, 152 ], [ 234, 163, 153 ], [ 234, 164, 128 ], [ 234, 164, 129 ], [ 234, 164, 130 ], [ 234, 164, 131 ], [ 234, 164, 132 ], [ 234, 164, 133 ], [ 234, 164, 134 ], [ 234, 164, 135 ], [ 234, 164, 136 ], [ 234, 164, 137 ], [ 234, 167, 144 ], [ 234, 167, 145 ], [ 234, 167, 146 ], [ 234, 167, 147 ], [ 234, 167, 148 ], [ 234, 167, 149 ], [ 234, 167, 150 ], [ 234, 167, 151 ], [ 234, 167, 152 ], [ 234, 167, 153 ], [ 234, 167, 176 ], [ 234, 167, 177 ], [ 234, 167, 178 ], [ 234, 167, 179 ], [ 234, 167, 180 ], [ 234, 167, 181 ], [ 234, 167, 182 ], [ 234, 167, 183 ], [ 234, 167, 184 ], [ 234, 167, 185 ], [ 234, 169, 144 ], [ 234, 169, 145 ], [ 234, 169, 146 ], [ 234, 169, 147 ], [ 234, 169, 148 ], [ 234, 169, 149 ], [ 234, 169, 150 ], [ 234, 169, 151 ], [ 234, 169, 152 ], [ 234, 169, 153 ], [ 234, 175, 176 ], [ 234, 175, 177 ], [ 234, 175, 178 ], [ 234, 175, 179 ], [ 234, 175, 180 ], [ 234, 175, 181 ], [ 234, 175, 182 ], [ 234, 175, 183 ], [ 234, 175, 184 ], [ 234, 175, 185 ], [ 239, 188, 144 ], [ 239, 188, 145 ], [ 239, 188, 146 ], [ 239, 188, 147 ], [ 239, 188, 148 ], [ 239, 188, 149 ], [ 239, 188, 150 ], [ 239, 188, 151 ], [ 239, 188, 152 ], [ 239, 188, 153 ], [ 240, 144, 132, 135 ], [ 240, 144, 132, 136 ], [ 240, 144, 132, 137 ], [ 240, 144, 132, 138 ], [ 240, 144, 132, 139 ], [ 240, 144, 132, 140 ], [ 240, 144, 132, 141 ], [ 240, 144, 132, 142 ], [ 240, 144, 132, 143 ], [ 240, 144, 132, 144 ], [ 240, 144, 132, 145 ], [ 240, 144, 132, 146 ], [ 240, 144, 132, 147 ], [ 240, 144, 132, 148 ], [ 240, 144, 132, 149 ], [ 240, 144, 132, 150 ], [ 240, 144, 132, 151 ], [ 240, 144, 132, 152 ], [ 240, 144, 132, 153 ], [ 240, 144, 132, 154 ], [ 240, 144, 132, 155 ], [ 240, 144, 132, 156 ], [ 240, 144, 132, 157 ], [ 240, 144, 132, 158 ], [ 240, 144, 132, 159 ], [ 240, 144, 132, 160 ], [ 240, 144, 132, 161 ], [ 240, 144, 132, 162 ], [ 240, 144, 132, 163 ], [ 240, 144, 132, 164 ], [ 240, 144, 132, 165 ], [ 240, 144, 132, 166 ], [ 240, 144, 132, 167 ], [ 240, 144, 132, 168 ], [ 240, 144, 132, 169 ], [ 240, 144, 132, 170 ], [ 240, 144, 132, 171 ], [ 240, 144, 132, 172 ], [ 240, 144, 132, 173 ], [ 240, 144, 132, 174 ], [ 240, 144, 132, 175 ], [ 240, 144, 132, 176 ], [ 240, 144, 132, 177 ], [ 240, 144, 132, 178 ], [ 240, 144, 132, 179 ], [ 240, 144, 133, 128 ], [ 240, 144, 133, 129 ], [ 240, 144, 133, 130 ], [ 240, 144, 133, 131 ], [ 240, 144, 133, 132 ], [ 240, 144, 133, 133 ], [ 240, 144, 133, 134 ], [ 240, 144, 133, 135 ], [ 240, 144, 133, 136 ], [ 240, 144, 133, 137 ], [ 240, 144, 133, 138 ], [ 240, 144, 133, 139 ], [ 240, 144, 133, 140 ], [ 240, 144, 133, 141 ], [ 240, 144, 133, 142 ], [ 240, 144, 133, 143 ], [ 240, 144, 133, 144 ], [ 240, 144, 133, 145 ], [ 240, 144, 133, 146 ], [ 240, 144, 133, 147 ], [ 240, 144, 133, 148 ], [ 240, 144, 133, 149 ], [ 240, 144, 133, 150 ], [ 240, 144, 133, 151 ], [ 240, 144, 133, 152 ], [ 240, 144, 133, 153 ], [ 240, 144, 133, 154 ], [ 240, 144, 133, 155 ], [ 240, 144, 133, 156 ], [ 240, 144, 133, 157 ], [ 240, 144, 133, 158 ], [ 240, 144, 133, 159 ], [ 240, 144, 133, 160 ], [ 240, 144, 133, 161 ], [ 240, 144, 133, 162 ], [ 240, 144, 133, 163 ], [ 240, 144, 133, 164 ], [ 240, 144, 133, 165 ], [ 240, 144, 133, 166 ], [ 240, 144, 133, 167 ], [ 240, 144, 133, 168 ], [ 240, 144, 133, 169 ], [ 240, 144, 133, 170 ], [ 240, 144, 133, 171 ], [ 240, 144, 133, 172 ], [ 240, 144, 133, 173 ], [ 240, 144, 133, 174 ], [ 240, 144, 133, 175 ], [ 240, 144, 133, 176 ], [ 240, 144, 133, 177 ], [ 240, 144, 133, 178 ], [ 240, 144, 133, 179 ], [ 240, 144, 133, 180 ], [ 240, 144, 133, 181 ], [ 240, 144, 133, 182 ], [ 240, 144, 133, 183 ], [ 240, 144, 133, 184 ], [ 240, 144, 134, 138 ], [ 240, 144, 134, 139 ], [ 240, 144, 139, 161 ], [ 240, 144, 139, 162 ], [ 240, 144, 139, 163 ], [ 240, 144, 139, 164 ], [ 240, 144, 139, 165 ], [ 240, 144, 139, 166 ], [ 240, 144, 139, 167 ], [ 240, 144, 139, 168 ], [ 240, 144, 139, 169 ], [ 240, 144, 139, 170 ], [ 240, 144, 139, 171 ], [ 240, 144, 139, 172 ], [ 240, 144, 139, 173 ], [ 240, 144, 139, 174 ], [ 240, 144, 139, 175 ], [ 240, 144, 139, 176 ], [ 240, 144, 139, 177 ], [ 240, 144, 139, 178 ], [ 240, 144, 139, 179 ], [ 240, 144, 139, 180 ], [ 240, 144, 139, 181 ], [ 240, 144, 139, 182 ], [ 240, 144, 139, 183 ], [ 240, 144, 139, 184 ], [ 240, 144, 139, 185 ], [ 240, 144, 139, 186 ], [ 240, 144, 139, 187 ], [ 240, 144, 140, 160 ], [ 240, 144, 140, 161 ], [ 240, 144, 140, 162 ], [ 240, 144, 140, 163 ], [ 240, 144, 141, 129 ], [ 240, 144, 141, 138 ], [ 240, 144, 143, 145 ], [ 240, 144, 143, 146 ], [ 240, 144, 143, 147 ], [ 240, 144, 143, 148 ], [ 240, 144, 143, 149 ], [ 240, 144, 146, 160 ], [ 240, 144, 146, 161 ], [ 240, 144, 146, 162 ], [ 240, 144, 146, 163 ], [ 240, 144, 146, 164 ], [ 240, 144, 146, 165 ], [ 240, 144, 146, 166 ], [ 240, 144, 146, 167 ], [ 240, 144, 146, 168 ], [ 240, 144, 146, 169 ], [ 240, 144, 161, 152 ], [ 240, 144, 161, 153 ], [ 240, 144, 161, 154 ], [ 240, 144, 161, 155 ], [ 240, 144, 161, 156 ], [ 240, 144, 161, 157 ], [ 240, 144, 161, 158 ], [ 240, 144, 161, 159 ], [ 240, 144, 161, 185 ], [ 240, 144, 161, 186 ], [ 240, 144, 161, 187 ], [ 240, 144, 161, 188 ], [ 240, 144, 161, 189 ], [ 240, 144, 161, 190 ], [ 240, 144, 161, 191 ], [ 240, 144, 162, 167 ], [ 240, 144, 162, 168 ], [ 240, 144, 162, 169 ], [ 240, 144, 162, 170 ], [ 240, 144, 162, 171 ], [ 240, 144, 162, 172 ], [ 240, 144, 162, 173 ], [ 240, 144, 162, 174 ], [ 240, 144, 162, 175 ], [ 240, 144, 163, 187 ], [ 240, 144, 163, 188 ], [ 240, 144, 163, 189 ], [ 240, 144, 163, 190 ], [ 240, 144, 163, 191 ], [ 240, 144, 164, 150 ], [ 240, 144, 164, 151 ], [ 240, 144, 164, 152 ], [ 240, 144, 164, 153 ], [ 240, 144, 164, 154 ], [ 240, 144, 164, 155 ], [ 240, 144, 166, 188 ], [ 240, 144, 166, 189 ], [ 240, 144, 167, 128 ], [ 240, 144, 167, 129 ], [ 240, 144, 167, 130 ], [ 240, 144, 167, 131 ], [ 240, 144, 167, 132 ], [ 240, 144, 167, 133 ], [ 240, 144, 167, 134 ], [ 240, 144, 167, 135 ], [ 240, 144, 167, 136 ], [ 240, 144, 167, 137 ], [ 240, 144, 167, 138 ], [ 240, 144, 167, 139 ], [ 240, 144, 167, 140 ], [ 240, 144, 167, 141 ], [ 240, 144, 167, 142 ], [ 240, 144, 167, 143 ], [ 240, 144, 167, 146 ], [ 240, 144, 167, 147 ], [ 240, 144, 167, 148 ], [ 240, 144, 167, 149 ], [ 240, 144, 167, 150 ], [ 240, 144, 167, 151 ], [ 240, 144, 167, 152 ], [ 240, 144, 167, 153 ], [ 240, 144, 167, 154 ], [ 240, 144, 167, 155 ], [ 240, 144, 167, 156 ], [ 240, 144, 167, 157 ], [ 240, 144, 167, 158 ], [ 240, 144, 167, 159 ], [ 240, 144, 167, 160 ], [ 240, 144, 167, 161 ], [ 240, 144, 167, 162 ], [ 240, 144, 167, 163 ], [ 240, 144, 167, 164 ], [ 240, 144, 167, 165 ], [ 240, 144, 167, 166 ], [ 240, 144, 167, 167 ], [ 240, 144, 167, 168 ], [ 240, 144, 167, 169 ], [ 240, 144, 167, 170 ], [ 240, 144, 167, 171 ], [ 240, 144, 167, 172 ], [ 240, 144, 167, 173 ], [ 240, 144, 167, 174 ], [ 240, 144, 167, 175 ], [ 240, 144, 167, 176 ], [ 240, 144, 167, 177 ], [ 240, 144, 167, 178 ], [ 240, 144, 167, 179 ], [ 240, 144, 167, 180 ], [ 240, 144, 167, 181 ], [ 240, 144, 167, 182 ], [ 240, 144, 167, 183 ], [ 240, 144, 167, 184 ], [ 240, 144, 167, 185 ], [ 240, 144, 167, 186 ], [ 240, 144, 167, 187 ], [ 240, 144, 167, 188 ], [ 240, 144, 167, 189 ], [ 240, 144, 167, 190 ], [ 240, 144, 167, 191 ], [ 240, 144, 169, 128 ], [ 240, 144, 169, 129 ], [ 240, 144, 169, 130 ], [ 240, 144, 169, 131 ], [ 240, 144, 169, 132 ], [ 240, 144, 169, 133 ], [ 240, 144, 169, 134 ], [ 240, 144, 169, 135 ], [ 240, 144, 169, 189 ], [ 240, 144, 169, 190 ], [ 240, 144, 170, 157 ], [ 240, 144, 170, 158 ], [ 240, 144, 170, 159 ], [ 240, 144, 171, 171 ], [ 240, 144, 171, 172 ], [ 240, 144, 171, 173 ], [ 240, 144, 171, 174 ], [ 240, 144, 171, 175 ], [ 240, 144, 173, 152 ], [ 240, 144, 173, 153 ], [ 240, 144, 173, 154 ], [ 240, 144, 173, 155 ], [ 240, 144, 173, 156 ], [ 240, 144, 173, 157 ], [ 240, 144, 173, 158 ], [ 240, 144, 173, 159 ], [ 240, 144, 173, 184 ], [ 240, 144, 173, 185 ], [ 240, 144, 173, 186 ], [ 240, 144, 173, 187 ], [ 240, 144, 173, 188 ], [ 240, 144, 173, 189 ], [ 240, 144, 173, 190 ], [ 240, 144, 173, 191 ], [ 240, 144, 174, 169 ], [ 240, 144, 174, 170 ], [ 240, 144, 174, 171 ], [ 240, 144, 174, 172 ], [ 240, 144, 174, 173 ], [ 240, 144, 174, 174 ], [ 240, 144, 174, 175 ], [ 240, 144, 179, 186 ], [ 240, 144, 179, 187 ], [ 240, 144, 179, 188 ], [ 240, 144, 179, 189 ], [ 240, 144, 179, 190 ], [ 240, 144, 179, 191 ], [ 240, 144, 185, 160 ], [ 240, 144, 185, 161 ], [ 240, 144, 185, 162 ], [ 240, 144, 185, 163 ], [ 240, 144, 185, 164 ], [ 240, 144, 185, 165 ], [ 240, 144, 185, 166 ], [ 240, 144, 185, 167 ], [ 240, 144, 185, 168 ], [ 240, 144, 185, 169 ], [ 240, 144, 185, 170 ], [ 240, 144, 185, 171 ], [ 240, 144, 185, 172 ], [ 240, 144, 185, 173 ], [ 240, 144, 185, 174 ], [ 240, 144, 185, 175 ], [ 240, 144, 185, 176 ], [ 240, 144, 185, 177 ], [ 240, 144, 185, 178 ], [ 240, 144, 185, 179 ], [ 240, 144, 185, 180 ], [ 240, 144, 185, 181 ], [ 240, 144, 185, 182 ], [ 240, 144, 185, 183 ], [ 240, 144, 185, 184 ], [ 240, 144, 185, 185 ], [ 240, 144, 185, 186 ], [ 240, 144, 185, 187 ], [ 240, 144, 185, 188 ], [ 240, 144, 185, 189 ], [ 240, 144, 185, 190 ], [ 240, 145, 129, 146 ], [ 240, 145, 129, 147 ], [ 240, 145, 129, 148 ], [ 240, 145, 129, 149 ], [ 240, 145, 129, 150 ], [ 240, 145, 129, 151 ], [ 240, 145, 129, 152 ], [ 240, 145, 129, 153 ], [ 240, 145, 129, 154 ], [ 240, 145, 129, 155 ], [ 240, 145, 129, 156 ], [ 240, 145, 129, 157 ], [ 240, 145, 129, 158 ], [ 240, 145, 129, 159 ], [ 240, 145, 129, 160 ], [ 240, 145, 129, 161 ], [ 240, 145, 129, 162 ], [ 240, 145, 129, 163 ], [ 240, 145, 129, 164 ], [ 240, 145, 129, 165 ], [ 240, 145, 129, 166 ], [ 240, 145, 129, 167 ], [ 240, 145, 129, 168 ], [ 240, 145, 129, 169 ], [ 240, 145, 129, 170 ], [ 240, 145, 129, 171 ], [ 240, 145, 129, 172 ], [ 240, 145, 129, 173 ], [ 240, 145, 129, 174 ], [ 240, 145, 129, 175 ], [ 240, 145, 131, 176 ], [ 240, 145, 131, 177 ], [ 240, 145, 131, 178 ], [ 240, 145, 131, 179 ], [ 240, 145, 131, 180 ], [ 240, 145, 131, 181 ], [ 240, 145, 131, 182 ], [ 240, 145, 131, 183 ], [ 240, 145, 131, 184 ], [ 240, 145, 131, 185 ], [ 240, 145, 132, 182 ], [ 240, 145, 132, 183 ], [ 240, 145, 132, 184 ], [ 240, 145, 132, 185 ], [ 240, 145, 132, 186 ], [ 240, 145, 132, 187 ], [ 240, 145, 132, 188 ], [ 240, 145, 132, 189 ], [ 240, 145, 132, 190 ], [ 240, 145, 132, 191 ], [ 240, 145, 135, 144 ], [ 240, 145, 135, 145 ], [ 240, 145, 135, 146 ], [ 240, 145, 135, 147 ], [ 240, 145, 135, 148 ], [ 240, 145, 135, 149 ], [ 240, 145, 135, 150 ], [ 240, 145, 135, 151 ], [ 240, 145, 135, 152 ], [ 240, 145, 135, 153 ], [ 240, 145, 135, 161 ], [ 240, 145, 135, 162 ], [ 240, 145, 135, 163 ], [ 240, 145, 135, 164 ], [ 240, 145, 135, 165 ], [ 240, 145, 135, 166 ], [ 240, 145, 135, 167 ], [ 240, 145, 135, 168 ], [ 240, 145, 135, 169 ], [ 240, 145, 135, 170 ], [ 240, 145, 135, 171 ], [ 240, 145, 135, 172 ], [ 240, 145, 135, 173 ], [ 240, 145, 135, 174 ], [ 240, 145, 135, 175 ], [ 240, 145, 135, 176 ], [ 240, 145, 135, 177 ], [ 240, 145, 135, 178 ], [ 240, 145, 135, 179 ], [ 240, 145, 135, 180 ], [ 240, 145, 139, 176 ], [ 240, 145, 139, 177 ], [ 240, 145, 139, 178 ], [ 240, 145, 139, 179 ], [ 240, 145, 139, 180 ], [ 240, 145, 139, 181 ], [ 240, 145, 139, 182 ], [ 240, 145, 139, 183 ], [ 240, 145, 139, 184 ], [ 240, 145, 139, 185 ], [ 240, 145, 147, 144 ], [ 240, 145, 147, 145 ], [ 240, 145, 147, 146 ], [ 240, 145, 147, 147 ], [ 240, 145, 147, 148 ], [ 240, 145, 147, 149 ], [ 240, 145, 147, 150 ], [ 240, 145, 147, 151 ], [ 240, 145, 147, 152 ], [ 240, 145, 147, 153 ], [ 240, 145, 153, 144 ], [ 240, 145, 153, 145 ], [ 240, 145, 153, 146 ], [ 240, 145, 153, 147 ], [ 240, 145, 153, 148 ], [ 240, 145, 153, 149 ], [ 240, 145, 153, 150 ], [ 240, 145, 153, 151 ], [ 240, 145, 153, 152 ], [ 240, 145, 153, 153 ], [ 240, 145, 155, 128 ], [ 240, 145, 155, 129 ], [ 240, 145, 155, 130 ], [ 240, 145, 155, 131 ], [ 240, 145, 155, 132 ], [ 240, 145, 155, 133 ], [ 240, 145, 155, 134 ], [ 240, 145, 155, 135 ], [ 240, 145, 155, 136 ], [ 240, 145, 155, 137 ], [ 240, 145, 156, 176 ], [ 240, 145, 156, 177 ], [ 240, 145, 156, 178 ], [ 240, 145, 156, 179 ], [ 240, 145, 156, 180 ], [ 240, 145, 156, 181 ], [ 240, 145, 156, 182 ], [ 240, 145, 156, 183 ], [ 240, 145, 156, 184 ], [ 240, 145, 156, 185 ], [ 240, 145, 156, 186 ], [ 240, 145, 156, 187 ], [ 240, 145, 163, 160 ], [ 240, 145, 163, 161 ], [ 240, 145, 163, 162 ], [ 240, 145, 163, 163 ], [ 240, 145, 163, 164 ], [ 240, 145, 163, 165 ], [ 240, 145, 163, 166 ], [ 240, 145, 163, 167 ], [ 240, 145, 163, 168 ], [ 240, 145, 163, 169 ], [ 240, 145, 163, 170 ], [ 240, 145, 163, 171 ], [ 240, 145, 163, 172 ], [ 240, 145, 163, 173 ], [ 240, 145, 163, 174 ], [ 240, 145, 163, 175 ], [ 240, 145, 163, 176 ], [ 240, 145, 163, 177 ], [ 240, 145, 163, 178 ], [ 240, 146, 144, 128 ], [ 240, 146, 144, 129 ], [ 240, 146, 144, 130 ], [ 240, 146, 144, 131 ], [ 240, 146, 144, 132 ], [ 240, 146, 144, 133 ], [ 240, 146, 144, 134 ], [ 240, 146, 144, 135 ], [ 240, 146, 144, 136 ], [ 240, 146, 144, 137 ], [ 240, 146, 144, 138 ], [ 240, 146, 144, 139 ], [ 240, 146, 144, 140 ], [ 240, 146, 144, 141 ], [ 240, 146, 144, 142 ], [ 240, 146, 144, 143 ], [ 240, 146, 144, 144 ], [ 240, 146, 144, 145 ], [ 240, 146, 144, 146 ], [ 240, 146, 144, 147 ], [ 240, 146, 144, 148 ], [ 240, 146, 144, 149 ], [ 240, 146, 144, 150 ], [ 240, 146, 144, 151 ], [ 240, 146, 144, 152 ], [ 240, 146, 144, 153 ], [ 240, 146, 144, 154 ], [ 240, 146, 144, 155 ], [ 240, 146, 144, 156 ], [ 240, 146, 144, 157 ], [ 240, 146, 144, 158 ], [ 240, 146, 144, 159 ], [ 240, 146, 144, 160 ], [ 240, 146, 144, 161 ], [ 240, 146, 144, 162 ], [ 240, 146, 144, 163 ], [ 240, 146, 144, 164 ], [ 240, 146, 144, 165 ], [ 240, 146, 144, 166 ], [ 240, 146, 144, 167 ], [ 240, 146, 144, 168 ], [ 240, 146, 144, 169 ], [ 240, 146, 144, 170 ], [ 240, 146, 144, 171 ], [ 240, 146, 144, 172 ], [ 240, 146, 144, 173 ], [ 240, 146, 144, 174 ], [ 240, 146, 144, 175 ], [ 240, 146, 144, 176 ], [ 240, 146, 144, 177 ], [ 240, 146, 144, 178 ], [ 240, 146, 144, 179 ], [ 240, 146, 144, 180 ], [ 240, 146, 144, 181 ], [ 240, 146, 144, 182 ], [ 240, 146, 144, 183 ], [ 240, 146, 144, 184 ], [ 240, 146, 144, 185 ], [ 240, 146, 144, 186 ], [ 240, 146, 144, 187 ], [ 240, 146, 144, 188 ], [ 240, 146, 144, 189 ], [ 240, 146, 144, 190 ], [ 240, 146, 144, 191 ], [ 240, 146, 145, 128 ], [ 240, 146, 145, 129 ], [ 240, 146, 145, 130 ], [ 240, 146, 145, 131 ], [ 240, 146, 145, 132 ], [ 240, 146, 145, 133 ], [ 240, 146, 145, 134 ], [ 240, 146, 145, 135 ], [ 240, 146, 145, 136 ], [ 240, 146, 145, 137 ], [ 240, 146, 145, 138 ], [ 240, 146, 145, 139 ], [ 240, 146, 145, 140 ], [ 240, 146, 145, 141 ], [ 240, 146, 145, 142 ], [ 240, 146, 145, 143 ], [ 240, 146, 145, 144 ], [ 240, 146, 145, 145 ], [ 240, 146, 145, 146 ], [ 240, 146, 145, 147 ], [ 240, 146, 145, 148 ], [ 240, 146, 145, 149 ], [ 240, 146, 145, 150 ], [ 240, 146, 145, 151 ], [ 240, 146, 145, 152 ], [ 240, 146, 145, 153 ], [ 240, 146, 145, 154 ], [ 240, 146, 145, 155 ], [ 240, 146, 145, 156 ], [ 240, 146, 145, 157 ], [ 240, 146, 145, 158 ], [ 240, 146, 145, 159 ], [ 240, 146, 145, 160 ], [ 240, 146, 145, 161 ], [ 240, 146, 145, 162 ], [ 240, 146, 145, 163 ], [ 240, 146, 145, 164 ], [ 240, 146, 145, 165 ], [ 240, 146, 145, 166 ], [ 240, 146, 145, 167 ], [ 240, 146, 145, 168 ], [ 240, 146, 145, 169 ], [ 240, 146, 145, 170 ], [ 240, 146, 145, 171 ], [ 240, 146, 145, 172 ], [ 240, 146, 145, 173 ], [ 240, 146, 145, 174 ], [ 240, 150, 169, 160 ], [ 240, 150, 169, 161 ], [ 240, 150, 169, 162 ], [ 240, 150, 169, 163 ], [ 240, 150, 169, 164 ], [ 240, 150, 169, 165 ], [ 240, 150, 169, 166 ], [ 240, 150, 169, 167 ], [ 240, 150, 169, 168 ], [ 240, 150, 169, 169 ], [ 240, 150, 173, 144 ], [ 240, 150, 173, 145 ], [ 240, 150, 173, 146 ], [ 240, 150, 173, 147 ], [ 240, 150, 173, 148 ], [ 240, 150, 173, 149 ], [ 240, 150, 173, 150 ], [ 240, 150, 173, 151 ], [ 240, 150, 173, 152 ], [ 240, 150, 173, 153 ], [ 240, 150, 173, 155 ], [ 240, 150, 173, 156 ], [ 240, 150, 173, 157 ], [ 240, 150, 173, 158 ], [ 240, 150, 173, 159 ], [ 240, 150, 173, 160 ], [ 240, 150, 173, 161 ], [ 240, 157, 141, 160 ], [ 240, 157, 141, 161 ], [ 240, 157, 141, 162 ], [ 240, 157, 141, 163 ], [ 240, 157, 141, 164 ], [ 240, 157, 141, 165 ], [ 240, 157, 141, 166 ], [ 240, 157, 141, 167 ], [ 240, 157, 141, 168 ], [ 240, 157, 141, 169 ], [ 240, 157, 141, 170 ], [ 240, 157, 141, 171 ], [ 240, 157, 141, 172 ], [ 240, 157, 141, 173 ], [ 240, 157, 141, 174 ], [ 240, 157, 141, 175 ], [ 240, 157, 141, 176 ], [ 240, 157, 141, 177 ], [ 240, 157, 159, 142 ], [ 240, 157, 159, 143 ], [ 240, 157, 159, 144 ], [ 240, 157, 159, 145 ], [ 240, 157, 159, 146 ], [ 240, 157, 159, 147 ], [ 240, 157, 159, 148 ], [ 240, 157, 159, 149 ], [ 240, 157, 159, 150 ], [ 240, 157, 159, 151 ], [ 240, 157, 159, 152 ], [ 240, 157, 159, 153 ], [ 240, 157, 159, 154 ], [ 240, 157, 159, 155 ], [ 240, 157, 159, 156 ], [ 240, 157, 159, 157 ], [ 240, 157, 159, 158 ], [ 240, 157, 159, 159 ], [ 240, 157, 159, 160 ], [ 240, 157, 159, 161 ], [ 240, 157, 159, 162 ], [ 240, 157, 159, 163 ], [ 240, 157, 159, 164 ], [ 240, 157, 159, 165 ], [ 240, 157, 159, 166 ], [ 240, 157, 159, 167 ], [ 240, 157, 159, 168 ], [ 240, 157, 159, 169 ], [ 240, 157, 159, 170 ], [ 240, 157, 159, 171 ], [ 240, 157, 159, 172 ], [ 240, 157, 159, 173 ], [ 240, 157, 159, 174 ], [ 240, 157, 159, 175 ], [ 240, 157, 159, 176 ], [ 240, 157, 159, 177 ], [ 240, 157, 159, 178 ], [ 240, 157, 159, 179 ], [ 240, 157, 159, 180 ], [ 240, 157, 159, 181 ], [ 240, 157, 159, 182 ], [ 240, 157, 159, 183 ], [ 240, 157, 159, 184 ], [ 240, 157, 159, 185 ], [ 240, 157, 159, 186 ], [ 240, 157, 159, 187 ], [ 240, 157, 159, 188 ], [ 240, 157, 159, 189 ], [ 240, 157, 159, 190 ], [ 240, 157, 159, 191 ], [ 240, 158, 163, 135 ], [ 240, 158, 163, 136 ], [ 240, 158, 163, 137 ], [ 240, 158, 163, 138 ], [ 240, 158, 163, 139 ], [ 240, 158, 163, 140 ], [ 240, 158, 163, 141 ], [ 240, 158, 163, 142 ], [ 240, 158, 163, 143 ], [ 240, 159, 132, 128 ], [ 240, 159, 132, 129 ], [ 240, 159, 132, 130 ], [ 240, 159, 132, 131 ], [ 240, 159, 132, 132 ], [ 240, 159, 132, 133 ], [ 240, 159, 132, 134 ], [ 240, 159, 132, 135 ], [ 240, 159, 132, 136 ], [ 240, 159, 132, 137 ], [ 240, 159, 132, 138 ], [ 240, 159, 132, 139 ], [ 240, 159, 132, 140 ] ]
nilq/baby-python
python
import functools import time import argparse import sys import asyncio import cocrawler.burner as burner import cocrawler.config as config def burn(dt, data): t0 = time.clock() end = t0 + dt while time.clock() < end: pass return 1, async def work(): while True: dt, data = await queue.get() partial = functools.partial(burn, dt, data) await b.burn(partial) queue.task_done() async def crawl(): workers = [asyncio.Task(work(), loop=loop) for _ in range(100)] await queue.join() for w in workers: if not w.done(): w.cancel() ARGS = argparse.ArgumentParser(description='bench_burn benchmark for burner thread overhead') ARGS.add_argument('--threads', type=int, default=2) ARGS.add_argument('--workers', type=int, default=100) ARGS.add_argument('--datasize', type=int, default=10000) ARGS.add_argument('--affinity', action='store_true') ARGS.add_argument('--duration', type=float, default=0.010) ARGS.add_argument('--count', type=int, default=10000) args = ARGS.parse_args() c = {'Multiprocess': {'BurnerThreads': args.threads, 'Affinity': args.affinity}} config.set_config(c) loop = asyncio.get_event_loop() b = burner.Burner('parser') queue = asyncio.Queue() for _ in range(args.count): queue.put_nowait((args.duration, 'x' * args.datasize)) print('args are', args) print('Processing {} items of size {} kbytes and {:.3f} seconds of burn using {} burner threads'.format( args.count, int(args.datasize/1000), args.duration, args.threads)) t0 = time.time() c0 = time.clock() try: loop.run_until_complete(crawl()) except KeyboardInterrupt: sys.stderr.flush() print('\nInterrupt. Exiting.\n') finally: loop.stop() loop.run_forever() loop.close() elapsed = time.time() - t0 print('Elapsed time is {:.1f} seconds.'.format(elapsed)) expected = args.count * args.duration / args.threads print('Expected is {:.1f} seconds.'.format(expected)) print('Burner-side overhead is {}% or {:.4f} seconds per call'.format( int((elapsed - expected)/expected*100), (elapsed - expected)/args.count)) celapsed = time.clock() - c0 print('Main-thread overhead is {}%, {:.4f} seconds per call, {} calls per cpu-second'.format( int(celapsed/elapsed*100), celapsed/args.count, int(args.count/celapsed)))
nilq/baby-python
python
## Script (Python) "updateProductionStage" ##parameters=sci # Copy the object in development to review and production. object = sci.object st = object.portal_staging st.updateStages(object, 'dev', ['review', 'prod'], sci.kwargs.get('comment', ''))
nilq/baby-python
python
""" Benchmark Sorted Dictionary Datatypes """ import warnings from .benchmark import * # Tests. @register_test def contains(func, size): for val in lists[size][::100]: assert func(val) @register_test def getitem(func, size): for val in lists[size][::100]: assert func(val) == -val @register_test def setitem(func, size): for val in lists[size][::100]: func(val, -val) @register_test def setitem_existing(func, size): for val in lists[size][::100]: func(val, -val) @register_test def delitem(func, size): for val in lists[size][::100]: func(val) @register_test def iter(func, size): assert all(idx == val for idx, val in enumerate(func())) # Setups. def do_nothing(obj, size): pass def fill_values(obj, size): if hasattr(obj, 'update'): obj.update({val: -val for val in range(size)}) else: for val in range(size): obj[val] = -val # Implementation imports. from .context import sortedcontainers from sortedcontainers import SortedDict kinds['SortedDict'] = SortedDict try: from rbtree import rbtree kinds['rbtree'] = rbtree except ImportError: warnings.warn('No module named rbtree', ImportWarning) try: from blist import sorteddict kinds['blist.sorteddict'] = sorteddict except ImportError: warnings.warn('No module named blist', ImportWarning) try: from treap import treap kinds['treap'] = treap except ImportError: warnings.warn('No module named treap', ImportWarning) try: from bintrees import FastAVLTree, FastRBTree kinds['FastAVLTree'] = FastAVLTree kinds['FastRBTree'] = FastRBTree except ImportError: warnings.warn('No module named bintrees', ImportWarning) try: from skiplistcollections import SkipListDict kinds['SkipListDict'] = SkipListDict except ImportError: warnings.warn('No module named skiplistcollections', ImportWarning) try: from banyan import SortedDict as BanyanSortedDict kinds['banyan.SortedDict'] = BanyanSortedDict except ImportError: warnings.warn('No module named banyan', ImportWarning) # Implementation configuration. for name in tests: impls[name] = OrderedDict() for name, kind in kinds.items(): impls['contains'][name] = { 'setup': fill_values, 'ctor': kind, 'func': '__contains__', 'limit': 1000000 } if 'treap' in impls['contains']: del impls['contains']['treap'] for name, kind in kinds.items(): impls['getitem'][name] = { 'setup': fill_values, 'ctor': kind, 'func': '__getitem__', 'limit': 1000000 } for name, kind in kinds.items(): impls['setitem'][name] = { 'setup': do_nothing, 'ctor': kind, 'func': '__setitem__', 'limit': 1000000 } for name, kind in kinds.items(): impls['setitem_existing'][name] = { 'setup': fill_values, 'ctor': kind, 'func': '__setitem__', 'limit': 1000000 } for name, kind in kinds.items(): impls['delitem'][name] = { 'setup': fill_values, 'ctor': kind, 'func': '__delitem__', 'limit': 1000000 } for name, kind in kinds.items(): impls['iter'][name] = { 'setup': fill_values, 'ctor': kind, 'func': '__iter__', 'limit': 1000000 } if __name__ == '__main__': main('SortedDict')
nilq/baby-python
python
import sys import salt.client.ssh import salt.utils.parsers class SaltSSH(salt.utils.parsers.SaltSSHOptionParser): """ Used to Execute the salt ssh routine """ def run(self): if "-H" in sys.argv or "--hosts" in sys.argv: sys.argv += ["x", "x"] # Hack: pass a mandatory two options # that won't be used anyways with -H or --hosts self.parse_args() ssh = salt.client.ssh.SSH(self.config) ssh.run()
nilq/baby-python
python
import os import importlib import pandas as pd import matplotlib import matplotlib.pyplot as plt from matplotlib.finance import candlestick2_ohlc # from matplotlib.finance import volume_overlay import matplotlib.ticker as ticker from catalyst.exchange.exchange_bundle import ExchangeBundle from catalyst.exchange.exchange_bcolz import BcolzExchangeBarReader from catalyst.exchange.bundle_utils import get_df_from_arrays, get_bcolz_chunk from catalyst.exchange.factory import get_exchange EXCHANGE_NAMES = ['bitfinex', 'bittrex', 'poloniex'] exchanges = dict((e, getattr(importlib.import_module( 'catalyst.exchange.{0}.{0}'.format(e)), e.capitalize())) for e in EXCHANGE_NAMES) class ValidateChunks(object): def __init__(self): self.columns = ['open', 'high', 'low', 'close', 'volume'] def chunk_to_df(self, exchange_name, symbol, data_frequency, period): exchange = get_exchange(exchange_name) asset = exchange.get_asset(symbol) filename = get_bcolz_chunk( exchange_name=exchange_name, symbol=symbol, data_frequency=data_frequency, period=period ) reader = BcolzExchangeBarReader(rootdir=filename, data_frequency=data_frequency) # metadata = BcolzMinuteBarMetadata.read(filename) start = reader.first_trading_day end = reader.last_available_dt if data_frequency == 'daily': end = end - pd.Timedelta(hours=23, minutes=59) print(start, end, data_frequency) arrays = reader.load_raw_arrays(self.columns, start, end, [asset.sid, ]) bundle = ExchangeBundle(exchange_name) periods = bundle.get_calendar_periods_range( start, end, data_frequency ) return get_df_from_arrays(arrays, periods) def plot_ohlcv(self, df): fig, ax = plt.subplots() # Plot the candlestick candlestick2_ohlc(ax, df['open'], df['high'], df['low'], df['close'], width=1, colorup='g', colordown='r', alpha=0.5) # shift y-limits of the candlestick plot so that there is space # at the bottom for the volume bar chart pad = 0.25 yl = ax.get_ylim() ax.set_ylim(yl[0] - (yl[1] - yl[0]) * pad, yl[1]) # Add a seconds axis for the volume overlay ax2 = ax.twinx() ax2.set_position( matplotlib.transforms.Bbox([[0.125, 0.1], [0.9, 0.26]])) # Plot the volume overlay # bc = volume_overlay(ax2, df['open'], df['close'], df['volume'], # colorup='g', alpha=0.5, width=1) ax.xaxis.set_major_locator(ticker.MaxNLocator(6)) def mydate(x, pos): try: return df.index[int(x)] except IndexError: return '' ax.xaxis.set_major_formatter(ticker.FuncFormatter(mydate)) plt.margins(0) plt.show() def plot(self, filename): df = self.chunk_to_df(filename) self.plot_ohlcv(df) def to_csv(self, filename): df = self.chunk_to_df(filename) df.to_csv(os.path.basename(filename).split('.')[0] + '.csv') v = ValidateChunks() df = v.chunk_to_df( exchange_name='bitfinex', symbol='eth_btc', data_frequency='daily', period='2016' ) print(df.tail()) v.plot_ohlcv(df) # v.plot( # ex # )
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Atmosphere container base class definitions Created on Wed Nov 16 16:37:03 2016 @author: maxwell """ from collections import MutableMapping import numpy as np from . import constants from .readprof import readprof, readprof_ozone class Atmosphere(MutableMapping): """ Dict-like container for atmosphere array variables. Keeps temperature, pressure, height, ozone and humidity. """ #minimum allowable water vapor mixing ratio _qmin = 3.0e-6 #bounds for where to look for tropopause _ttl_pmax = 400.0 _ttl_pmin = 5.0 _tmin = 100 _tmax = 375 _mcclathcydir = 'atmosphere/profiles/mcclatchy' def __init__(self, gridstagger=None, plev=None,**kwargs): """ Define available keys and set defaults. Note that we require to know if the grid is staggered, and if we should treat RH or q as static. None of the other variables will be checked for consistency, so it is up to the user to make sure that all of the variables have the right dimensionality and units. It is recommended that all vectors be numpy arrays. vars: plev (hPa) pressure tlev (K) temperature qlev (g/g) water vapor mixing ratio rhlev (0<=x<=1) relative humididty o3lev (g/g) ozone mass mixing ratio """ print("Initializing Atmosphere object") #check that the grid is well defined if gridstagger is None: estr = "{} class requires (bool) gridstagger input." raise ValueError(estr.format(self.__class__.__name__)) if plev is None: estr = "{} class requires ndarray plev input." raise ValueError(estr.format(self.__class__.__name__)) self.gridstagger = gridstagger self.plev = plev self.nlev = len(plev) self.nlay = self.nlev-1 #we need at least some kind of moisture self.qlev = kwargs.get('qlev', None) self.rhlev = kwargs.get('rhlev', None) self.holdrh = kwargs.get('holdrh', None) #optional, we will provide defaults self.tlev = kwargs.get('tlev', None) self.o3lev = kwargs.get('o3lev', None) #T defaults to isothermal if (self.tlev is None): self.tlev = 288.0*np.ones(len(self.plev)) print("WARNING: T not provided, default to mean isothermal value.") # assign moisture vars with sanity checks if (self.qlev is None and self.rhlev is None): self.qlev = np.zeros(len(self.plev)) self.rhlev = np.zeros(len(self.plev)) print("WARNING: moisture not provided, default to 0.") userh = False elif (self.qlev is None and self.rhlev is not None): print("WARNING: q not provided. Setting based on RH.") self.rhlev = self._enforece_rh_range(self.rhlev) self.qlev = self._enforce_q_gradient( self._rh2q(self.plev, self.tlev, self.rhlev) ) userh = True elif (self.qlev is not None and self.rhlev is None): print("WARNING: RH not provided. Seting based on q.") self.qlev = self._enforce_q_gradient(self.qlev) self.rhlev = self._q2rh(self.plev, self.tlev, self.qlev) userh = False else: userh = True self.qlev = self._enforce_q_gradient(self.qlev) self.rhlev = self._enforce_rh_range(self.rhlev) if(self.holdrh is None): self.holdrh = userh print( "WARNING: holdrh not provided, setting to {0}".format(userh)) #o3 defaults to 0 if (self.o3lev is None): self.o3lev = np.zeros(len(self.plev)) print("WARNING: ozone not provided, default to 0.") #define layer tags self.play = 0.5*(plev[:-1] + plev[1:]) self.tlay = self._lev2lay(self.plev,self.tlev, self.play) self.o3lay = self._lev2lay(self.plev, self.o3lev, self.play) self.qlay = self._lev2lay(self.plev, self.qlev, self.play) self.rhlay = self._lev2lay(self.plev, self.rhlev, self.play) tsfc = kwargs.get('tsfc', None) if (tsfc is None): print("WARNING: tsfc not provided. Using tlev[-1].") tsfc = self.tlev[-1] self.tsfc = tsfc self._p_z() self._updatecoldpoint() self._updatewarmpoint() self._updatewv() # %%dict-like behavior def __setitem__(self, key, value=None,): if (key == 'plev' or key == 'play' or key == 'p' or key == 'zlev' or key == 'zlay' or key == 'z'): err = "{cls} object doesn't support changing value of {key}." raise TypeError( err.format(cls=self.__class__.__name__, key=key) ) if self.__contains__(key): self.__dict__[key] = value else: raise KeyError ("'{0}' not found in collection".format(key)) def __getitem__(self, key): return self.__dict__[key] def __delitem__(self,key): raise TypeError ("{cls} object doesn't support item deletion" .format(cls=self.__class__.__name__)) def __iter__(self): return self.__dict__.__iter__() def __len__(self): if (self.gridstagger): return self.nlay else: return self.nlev def __contains__(self,key): return self.__dict__.__contains__(key) # %% alternate constructors @classmethod def mcclatchy(cls, prof,gridstagger=None,p=None,holdrh=None, **kwargs): """ Alternate constructor using a McClatchy standard profile. """ keys = ['plev', 'tlev', 'qlev', 'o3lev'] prof = ''.join(['/'.join([cls._mcclathcydir,prof]),'.lay']) #if any column vars are provided, we will use them as defaults here t = kwargs.pop('tlev',None) q = kwargs.pop('qlev', None) o3 = kwargs.pop('o3lev', None) print("Initializing {cls} object from file {f}.".format( cls=cls.__name__, f=prof)) try: (ps,ts,qs,o3s) = readprof(prof) except FileNotFoundError: raise FileNotFoundError( "File {prof} does not exist".format(prof=prof) ) if (p is None): print("WARNING:: pressure levels not provided. " "Using default McClatchy values. ") p = ps else: print("{}: interpolating to provided grid".format( cls.__name__) ) if t is None: t = cls.interp(ps,ts,p) if q is None: q = cls.interp(ps,qs,p) if o3 is None: o3 = cls.interp(ps,o3s,p) defaults = dict(zip(keys, [p,t,q,o3])) if gridstagger is None: print("WARNING: gridstagger not provided for Atmosphere()." ,"Setting to True" ) gridstagger = True return cls(gridstagger=gridstagger,holdrh=holdrh,**defaults,**kwargs) @classmethod def fromfile(cls, fname, gridstagger=None, p=None, holdrh=None, **kwargs): """ Alternate constructor using any standard profile from file. """ keys = ['plev', 'tlev', 'qlev', 'o3lev'] #if any column vars are provided, we will use them as defaults here t = kwargs.pop('tlev',None) q = kwargs.pop('qlev', None) o3 = kwargs.pop('o3lev', None) print("Initializing {cls} object from file {f}.".format( cls=cls.__name__, f=fname)) try: (ps,ts,qs,o3s) = readprof(fname) except FileNotFoundError: raise FileNotFoundError( "File {prof} does not exist".format(prof=fname) ) if (p is None): print("WARNING:: pressure levels not provided. " "Using default McClatchy values. ") p = ps else: print("{}: interpolating to provided grid".format( cls.__name__) ) if t is None: t = cls.interp(ps,ts,p) if q is None: q = cls.interp(ps,qs,p) if o3 is None: o3 = cls.interp(ps,o3s,p) defaults = dict(zip(keys, [p,t,q,o3])) if gridstagger is None: print("WARNING: gridstagger not provided for Atmosphere()." ,"Setting to True" ) gridstagger = True return cls(gridstagger=gridstagger,holdrh=holdrh,**defaults,**kwargs) def ozone_fromfile(self, fname): """ setter method to set ozone from a text file directly. """ print('Attempt to read ozone profile from file:{f}'.format( f=fname)) try: ps, o3s = readprof_ozone(fname) except FileNotFoundError: raise FileNotFoundError( "File {prof} does not exist".format(prof=fname) ) self.o3 = np.maximum(self.interp(ps, o3s, self.p),0) # %% interpolation @staticmethod def _findvalue(x,xq): """ Find equal or larger value in array with at least one index to left. Array lookup should be sorted in ascending order. """ L,R = int(1), int(len(x)-1) while L < R: M = int((L+R)/2) if xq > x[M]: L = M+1 elif xq < x[M]: R = M else: return M return R @classmethod def interp(cls,x,y,xq): """ Interpolate linearly with extrapolation when out of range. Expects xq to be a vector (i.e., numpy 1D array). """ yq = np.empty(len(xq)) #yq needs to be same type as xq for iout,target in enumerate(xq): idx = cls._findvalue(x,target) f = (target-x[idx-1])/(x[idx]-x[idx-1]) yq[iout] = (1-f)*y[idx-1] + f*y[idx] return yq @classmethod def _lev2lay(cls,plev,xlev,play): """Move Level vars to layers""" return cls.interp(plev, xlev, play) @classmethod #remake so that it takes generic arguments and returns a vecotr def _lay2lev(cls,play,xlay,plev): """Move Layer vars to levels""" return cls.interp(play,xlay,plev) def updategrid(self): """ Interpolate temperature to all levels/layers. Spread WV variables too. """ if(self.gridstagger): self['tlev'] = self._lay2lev( self['play'], self['tlay'], self['plev'] ) self['qlev'] = self._lay2lev( self['play'], self['qlay'], self['plev'] ) self['rhlev'] = self._lay2lev( self['play'], self['rhlay'], self['plev'] ) self['o3lev'] = self._lay2lev( self['play'], self['o3lay'], self['plev'] ) else: self['tlay'] = self._lev2lay( self['plev'], self['tlev'], self['play'] ) self['qlay'] = self._lev2lay( self['plev'], self['qlev'], self['play'] ) self['rhlay'] = self._lev2lay( self['plev'], self['rhlev'], self['play'] ) self['o3lay'] = self._lay2lev( self['plev'], self['o3lev'], self['play'] ) self._updatewv() self._p_z() self._updatecoldpoint() self._updatewarmpoint() # %% moisture @staticmethod def satvap(temp): """ Saturation Vapor pressure (Goff and Gratch, 1946) Temp is the temperature in Kelvins and may be a numpy array """ ttrans = 0 #253.15 tsteam = 373.16 tice= 273.16 #choose water or ice saturation loge = np.where(temp>=ttrans, (-7.90298*(tsteam/temp-1) + 5.02808*np.log10(tsteam/temp) -1.3816e-7 * (10**(11.344*(1-temp/tsteam))-1) +8.1328e-3 * (10**(-3.49149*(tsteam/temp-1))-1) +np.log10(1013.25) ), (-9.09718*(tice/temp-1) - 3.56654*np.log10(tice/temp) -0.876793*(1-temp/tice) + np.log10(6.1173) ) ) return 10**loge @classmethod def satmixrat(cls,pres,temp): """Saturation mixing ratio""" return constants.eps*cls.satvap(temp)/pres @classmethod def _q2rh(cls, p,t,q): """ Convert mixing ratio to relative humidity. Assumes that the grid pressure is equivalent to the dry pressure. """ return q/cls.satmixrat(p,t) @classmethod def _rh2q(cls, p,t,rh): """ Convert RH to mixing ratio and enforce minimum values for q. Also enforces the vertical gradient of q such that q can never increase with height. """ return rh*cls.satmixrat(p,t) @classmethod def _enforce_q_gradient(cls, q): """Ensure q decrease with height""" q[-1] = np.maximum(q[-1], cls._qmin) for i in np.arange(len(q)-1,0,-1): q[i-1] = np.maximum(np.minimum(q[i], q[i-1]), cls._qmin) return q @classmethod def _enforce_rh_range(cls,rh): rh = np.maximum(0.0, np.minimum(1.0, rh)) return rh def _updatewv(self): """Spread water vapor from rh to q or vice-versa as grid specifies.""" if(self.holdrh): self.qlev = self._enforce_q_gradient( self._rh2q(self.plev,self.tlev,self.rhlev) ) self.qlay = self._enforce_q_gradient( self._rh2q(self.play,self.tlay,self.rhlay) ) else: self.rhlev = self._enforce_rh_range( self._q2rh(self.plev, self.tlev, self.qlev) ) self.rhlay = self._enforce_rh_range( self._q2rh(self.play, self.tlay, self.qlay) ) # %% height-related methods def _p_z(self): tv = self.tlay*(1 + (1-1/constants.eps)*self.qlay) dz = ( (constants.Rd/constants.grav) *np.log(self.plev[1:]/self.plev[:-1]) * tv ) zlev = np.zeros(len(self.plev)) zlev[1:] = np.cumsum(dz[::-1]) self.zlev = zlev[::-1] self.zlay = self._lev2lay( self.plev, self.zlev, self.play) #%% get cold point and conv. top def _updatecoldpoint(self): mask = np.logical_and( self.p <= self._ttl_pmax, self.p >= self._ttl_pmin) icold_point = np.argmin(np.where(mask, self.t, 99999)) self._icold_point = icold_point def _updatewarmpoint(self): mask = self.p >= self._ttl_pmax iwarm_point = np.argmax(np.where(mask, self.t, -99999)) self._iwarm_point = iwarm_point # %% property variables for more obvious getting and setting def _checkvar(self, value): if len(self) != len(value): raise ValueError( "Length of array provided does not match the target dimension" ) @property def t(self): if (self.gridstagger): return self.tlay else: return self.tlev @t.setter def t(self, value): self._checkvar(value) if(self.gridstagger): self.tlay = np.minimum(np.maximum(value,self._tmin),self._tmax) else: self.tlev = np.minimum(np.maximum(value,self._tmin),self._tmax) self.updategrid() @property def tsfc(self): return self._tsfc @tsfc.setter def tsfc(self,value): self._tsfc = np.minimum(np.maximum(value,self._tmin), self._tmax) @property def q(self): if (self.gridstagger): return self.qlay else: return self.qlev @q.setter def q(self,value): self._checkvar(value) if (self.holdrh): print( "WARNING: holdrh set to True, but setting q directly. " "Value will be overwritten." ) if (self.gridstagger): self.qlay = value else: self.qlev = value self.updategrid() @property def rh(self): if (self.gridstagger): return self.rhlay else: return self.rhlev @rh.setter def rh(self,value): self._checkvar(value) if (not self.holdrh): print( "WARNING: holdrh set to False, but setting RH directly. " "Value will be overwritten." ) if(self.gridstagger): self.rhlay = value else: self.rhlev = value self.updategrid() @property def o3(self): if(self.gridstagger): return self.o3lay else: return self.o3lev @o3.setter def o3(self, value): self._checkvar(value) if(self.gridstagger): self.o3lay = value else: self.o3lev = value self.updategrid() @property def p(self): if(self.gridstagger): return self.play else: return self.plev @property def z(self): if(self.gridstagger): return self.zlay else: return self.zlev @property def tcold(self): return self.t[self.icold] @property def pcold(self): return self.p[self.icold] @property def zcold(self): return self.z[self.icold] @property def icold(self): return self._icold_point @property def tconv(self): return self.t[self.iconv] @property def pconv(self): return self.p[self.iconv] @property def zconv(self): return self.z[self.iconv] #although icold is calculated internally, not all atmospheres will need a # convective top(iconv). Provide a setter method so that external objects # can include this functionality if desired. @property def iconv(self): try: return self._iconv_top except AttributeError: msg = ("WARNING: attempt to access undefined _iconv_top. Using " "cold point 'icold' instead") print(msg) return self.icold @iconv.setter def iconv(self, value): self._iconv_top = value @property def twarm(self): return self.t[self.iwarm] @property def pwarm(self): return self.p[self.iwarm] @property def iwarm(self): return self._iwarm_point
nilq/baby-python
python
# -*- coding:utf-8 -*- #autor -> manoel vilela #gerando graficos em relação a eficiencia dos algoritmos de ordenação bubble, merge and personal import pylab def extract_data(file_name): data = open(file_name, "r").read().split() data = [element.split() for element in data.split("=")] vector, values = data
nilq/baby-python
python
import graphene from django.db.models import F, Q from tabletop.models import DurationType, Game from tabletop.schema import GameNode from tabletop.utils.graphene import optimize_queryset class Query(object): games = graphene.List( GameNode, id=graphene.UUID(), query=graphene.String(), players=graphene.Int(), entity=graphene.UUID(), max_duration=graphene.Int(), parent=graphene.UUID(), ) def resolve_games( self, info, id: str = None, query: str = None, parent: str = None, players: int = None, entity: str = None, max_duration: int = None, ): # TODO(dcramer): fix optimize_queryset so it handles the OneToOne join automatically qs = Game.objects.select_related("image").distinct() if id: qs = qs.filter(id=id) if parent: qs = qs.filter(parent=parent) if query: qs = qs.filter(name__istartswith=query) if entity: qs = qs.filter(entities=entity) if players: qs = qs.filter(min_players__lte=players, max_players__gte=players) if max_duration: qs = qs.filter( Q(duration__lte=max_duration, duration_type=DurationType.total) | Q( duration__lte=max_duration / (players if players else F("max_players")), duration_type=DurationType.per_player, ) ) qs = optimize_queryset(qs, info, "games", GameNode.fix_queryset) return qs.order_by("name")
nilq/baby-python
python
#General imports==================================== from html.parser import HTMLParser from urllib import parse #Finds all anchor <a> tags in a website============ class LinkFinder(HTMLParser): def __init__(self, base_url, page_url): super().__init__() self.base_url = base_url self.page_url = page_url self.links = set() def handle_starttag(self, tag, attrs): if tag == 'a': for (attribute, value) in attrs: if attribute == 'href': url = parse.urljoin(self.base_url, value) self.links.add(url) def page_links(self): return self.links def error(self, message): pass
nilq/baby-python
python
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2017 Ganggao Zhu- Grupo de Sistemas Inteligentes # gzhu[at]dit.upm.es # DIT, UPM # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from gensim import corpora, models, similarities, matutils class TextAnalysis: """This implements wrapper for gensim lsa and tfidf analysis of text collection""" def __init__(self, text_process, model, dictionary, tfidf, tfidf_index, lsa, lsa_index): self._text_process = text_process self._model = model self._dictionary = dictionary self._tfidf = tfidf self._tfidf_index = tfidf_index self._lsa = lsa self._lsa_index = lsa_index def text2model(self, text): t = self._text_process(text) bow = self._dictionary.doc2bow(t) tfidf = self._tfidf[bow] if self._model == 'tfidf': return tfidf else: return self._lsa[tfidf] def text_similarity(self, t1, t2): if self._model == 'tfidf': t1_vec = matutils.any2sparse(self.text2model(t1)) t2_vec = matutils.any2sparse(self.text2model(t2)) return matutils.cossim(t1_vec, t2_vec) else: t1_vec = matutils.any2sparse(self.text2model(t1)) t2_vec = matutils.any2sparse(self.text2model(t2)) return matutils.cossim(t1_vec, t2_vec) def search(self, text): query = self.text2model(text) if self._model == 'tfidf': return self._tfidf_index[query] else: return self._lsa_index[query] @classmethod def load(cls, text_process, model='tfidf', top_N=100, save_dir='data/'): import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) dictionary = corpora.Dictionary.load(save_dir+'dictionary') tfidf = models.TfidfModel.load(save_dir+'tfidf.model') tfidf_index = similarities.Similarity.load(save_dir+'tfidf_index/tfidf.index') tfidf_index.num_best = top_N if model == 'lsa': lsa = models.LsiModel.load(save_dir+'lsa.model') lsa_index = similarities.Similarity.load(save_dir+'lsa_index/lsa.index') lsa_index.num_best = top_N return cls(text_process, model, dictionary, tfidf, tfidf_index, lsa, lsa_index) return cls(text_process, model, dictionary, tfidf, tfidf_index, None, None) @classmethod def train(cls, texts, text_process, model='lsa', topic_n=100, top_N=100, save_dir='data/'): import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) corpus = [text_process(t) for t in texts] dictionary = corpora.Dictionary(corpus) print(dictionary) dictionary.save(save_dir+'dictionary') bow = [dictionary.doc2bow(t) for t in corpus] corpora.MmCorpus.serialize(save_dir+'bow', bow) bow_corpus = corpora.MmCorpus(save_dir+'bow') tfidf = models.TfidfModel(bow_corpus, id2word=dictionary) corpora.MmCorpus.serialize(save_dir+'tfidf', tfidf[bow_corpus]) tfidf.save(save_dir+'tfidf.model') tfidf_corpus = corpora.MmCorpus(save_dir+'tfidf') tfidf_index = similarities.Similarity(save_dir+'tfidf_index/shard', tfidf_corpus, num_features=tfidf_corpus.num_terms) tfidf_index.num_best = top_N tfidf_index.save(save_dir+'tfidf_index/tfidf.index') if model == 'lsa': lsa = models.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=topic_n) lsa.save(save_dir+'lsa.model') lsa_index = similarities.Similarity(save_dir+'lsa_index/shard', lsa[tfidf_corpus], num_features=topic_n) lsa_index.num_best = top_N lsa_index.save(save_dir+'lsa_index/lsa.index') return cls(text_process, model, dictionary, tfidf, tfidf_index, lsa, lsa_index) return cls(text_process, model, dictionary, tfidf, tfidf_index, None, None)
nilq/baby-python
python
""" MIT License Copyright (c) 2020-2021 Hyeonki Hong <hhk7734@gmail.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np import tensorflow as tf import tensorflow.keras.backend as K from tensorflow.keras.layers import Input from tensorflow.keras.optimizers import Adam from .dataset.keras_sequence import YOLODataset # for exporting from .model import YOLOv4Model from .training.callbacks import ( SaveWeightsCallback, # for exporting YOLOCallbackAtEachStep, ) from .training.yolo_loss import YOLOv4Loss from .utils.mAP import create_mAP_input_files # for exporting from .utils.tflite import save_as_tflite # for expoerting from .utils.weights import ( load_weights as _load_weights, save_weights as _save_weights, ) from ..common.base_class import BaseClass physical_devices = tf.config.experimental.list_physical_devices("GPU") if len(physical_devices) > 0: tf.config.experimental.set_memory_growth(physical_devices[0], True) print("Call tf.config.experimental.set_memory_growth(GPU0, True)") class YOLOv4(BaseClass): @property def model(self) -> YOLOv4Model: return self._model def make_model(self): K.clear_session() _input = Input(self.config.net.input_shape) self._model = YOLOv4Model(config=self.config) self._model(_input) def load_weights(self, weights_path: str, weights_type: str = "tf"): """ Usage: yolo.load_weights("checkpoints") yolo.load_weights("yolov4.weights", weights_type="yolo") """ if weights_type == "yolo": _load_weights(self._model, weights_path) elif weights_type == "tf": self._model.load_weights(weights_path) def save_weights( self, weights_path: str, weights_type: str = "tf", to: int = 0 ): """ Usage: yolo.save_weights("checkpoints") yolo.save_weights("yolov4.weights", weights_type="yolo") yolo.save_weights("yolov4.conv.137", weights_type="yolo", to=137) """ to_layer = "" if to > 0: to_layer = self.config.metalayers[to - 1].name if weights_type == "yolo": _save_weights(self._model, weights_path, to=to_layer) elif weights_type == "tf": self._model.save_weights(weights_path) def summary(self, line_length=90, summary_type: str = "tf", **kwargs): if summary_type == "tf": self._model.summary(line_length=line_length, **kwargs) else: self.config.summary() ############# # Inference # ############# @tf.function def _predict(self, x): yolos = self._model(x, training=False) # [yolo0, yolo1, ...] # yolo == Dim(batch, height, width, channels) batch = yolos[0].shape[0] candidates = [] stride = 5 + self.config.yolo_0.classes for yolo in yolos: candidates.append(K.reshape(yolo, shape=(batch, -1, stride))) return K.concatenate(candidates, axis=1) def predict(self, frame: np.ndarray): """ Predict one frame @param frame: Dim(height, width, channels) @return pred_bboxes Dim(-1, (x,y,w,h,o, cls_id0, prob0, cls_id1, prob1)) """ # image_data == Dim(1, input_size[1], input_size[0], channels) height, width, _ = frame.shape image_data = self.resize_image(frame) image_data = image_data / 255.0 image_data = image_data[np.newaxis, ...].astype(np.float32) candidates = self._predict(image_data)[0].numpy() # Select 0 pred_bboxes = self.yolo_diou_nms( candidates=candidates, beta_nms=self.config.yolo_0.beta_nms ) self.fit_to_original(pred_bboxes, height, width) return pred_bboxes ############ # Training # ############ def compile( self, optimizer=None, loss=None, **kwargs, ): if optimizer is None: optimizer = Adam(learning_rate=self.config.net.learning_rate) if loss is None: loss = YOLOv4Loss(config=self.config, model=self.model) return self._model.compile( optimizer=optimizer, loss=loss, **kwargs, ) def fit( self, dataset, callbacks=None, validation_data=None, validation_steps=None, verbose: int = 3, **kwargs, ): """ verbose=3 is one line per step """ callbacks = callbacks or [] callbacks.append( YOLOCallbackAtEachStep(config=self.config, verbose=verbose) ) epochs = self.config.net.max_batches // len(dataset) + 1 return self._model.fit( dataset, epochs=epochs, verbose=verbose if verbose < 3 else 0, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, **kwargs, )
nilq/baby-python
python